diff --git a/sys/dev/null/null.c b/sys/dev/null/null.c index 0f1d118..3005c19 100644 --- a/sys/dev/null/null.c +++ b/sys/dev/null/null.c @@ -112,10 +112,10 @@ null_modevent(module_t mod __unused, int type, void *data __unused) if (bootverbose) printf("null: \n"); zbuf = (void *)malloc(PAGE_SIZE, M_TEMP, M_WAITOK | M_ZERO); - null_dev = make_dev(&null_cdevsw, 0, UID_ROOT, GID_WHEEL, - 0666, "null"); - zero_dev = make_dev(&zero_cdevsw, 0, UID_ROOT, GID_WHEEL, - 0666, "zero"); + null_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &null_cdevsw, 0, + NULL, UID_ROOT, GID_WHEEL, 0666, "null"); + zero_dev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &zero_cdevsw, 0, + NULL, UID_ROOT, GID_WHEEL, 0666, "zero"); break; case MOD_UNLOAD: diff --git a/sys/fs/devfs/devfs.h b/sys/fs/devfs/devfs.h index a50a7b0..ee49c8b 100644 --- a/sys/fs/devfs/devfs.h +++ b/sys/fs/devfs/devfs.h @@ -172,7 +172,7 @@ extern unsigned devfs_rule_depth; void devfs_rules_apply(struct devfs_mount *dm, struct devfs_dirent *de); void devfs_rules_cleanup (struct devfs_mount *dm); int devfs_rules_ioctl(struct devfs_mount *dm, u_long cmd, caddr_t data, struct thread *td); -int devfs_allocv (struct devfs_dirent *de, struct mount *mp, +int devfs_allocv (struct devfs_dirent *de, struct mount *mp, int lockmode, struct vnode **vpp); void devfs_delete(struct devfs_mount *dm, struct devfs_dirent *de, int vp_locked); void devfs_dirent_free(struct devfs_dirent *de); diff --git a/sys/fs/devfs/devfs_rule.c b/sys/fs/devfs/devfs_rule.c index bc1f32c..3d01f45 100644 --- a/sys/fs/devfs/devfs_rule.c +++ b/sys/fs/devfs/devfs_rule.c @@ -528,6 +528,7 @@ devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de) struct devfs_rule *dr = &dk->dk_rule; struct cdev *dev; struct cdevsw *dsw; + int ref; dev = devfs_rule_getdev(de); /* @@ -545,14 +546,14 @@ devfs_rule_match(struct devfs_krule *dk, struct devfs_dirent *de) if (dr->dr_icond & DRC_DSWFLAGS) { if (dev == NULL) return (0); - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (0); if ((dsw->d_flags & dr->dr_dswflags) == 0) { - dev_relthread(dev); + dev_relthread(dev, ref); return (0); } - dev_relthread(dev); + dev_relthread(dev, ref); } if (dr->dr_icond & DRC_PATHPTRN) if (!devfs_rule_matchpath(dk, de)) diff --git a/sys/fs/devfs/devfs_vfsops.c b/sys/fs/devfs/devfs_vfsops.c index ff86e36..87ec162 100644 --- a/sys/fs/devfs/devfs_vfsops.c +++ b/sys/fs/devfs/devfs_vfsops.c @@ -81,7 +81,8 @@ devfs_mount(struct mount *mp) MNT_ILOCK(mp); mp->mnt_flag |= MNT_LOCAL; - mp->mnt_kern_flag |= MNTK_MPSAFE; + mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED | + MNTK_EXTENDED_SHARED; #ifdef MAC mp->mnt_flag |= MNT_MULTILABEL; #endif @@ -155,7 +156,7 @@ devfs_root(struct mount *mp, int flags, struct vnode **vpp) dmp = VFSTODEVFS(mp); sx_xlock(&dmp->dm_lock); - error = devfs_allocv(dmp->dm_rootdir, mp, &vp); + error = devfs_allocv(dmp->dm_rootdir, mp, LK_EXCLUSIVE, &vp); if (error) return (error); vp->v_vflag |= VV_ROOT; diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c index c277355..450afc1 100644 --- a/sys/fs/devfs/devfs_vnops.c +++ b/sys/fs/devfs/devfs_vnops.c @@ -82,13 +82,14 @@ struct mtx cdevpriv_mtx; MTX_SYSINIT(cdevpriv_mtx, &cdevpriv_mtx, "cdevpriv lock", MTX_DEF); static int -devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp) +devfs_fp_check(struct file *fp, struct cdev **devp, struct cdevsw **dswp, + int *ref) { - *dswp = devvn_refthread(fp->f_vnode, devp); + *dswp = devvn_refthread(fp->f_vnode, devp, ref); if (*devp != fp->f_data) { if (*dswp != NULL) - dev_relthread(*devp); + dev_relthread(*devp, *ref); return (ENXIO); } KASSERT((*devp)->si_refcount > 0, @@ -331,7 +332,8 @@ devfs_insmntque_dtr(struct vnode *vp, void *arg) * it on return. */ int -devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp) +devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode, + struct vnode **vpp) { int error; struct vnode *vp; @@ -351,7 +353,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp) VI_LOCK(vp); mtx_unlock(&devfs_de_interlock); sx_xunlock(&dmp->dm_lock); - error = vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, curthread); + error = vget(vp, lockmode | LK_INTERLOCK, curthread); sx_xlock(&dmp->dm_lock); if (devfs_allocv_drop_refs(0, dmp, de)) { if (error == 0) @@ -395,6 +397,8 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp) dev->si_usecount += vp->v_usecount; dev_unlock(); VI_UNLOCK(vp); + if ((dev->si_flags & SI_ETERNAL) != 0) + vp->v_vflag |= VV_ETERNALDEV; vp->v_op = &devfs_specops; } else if (de->de_dirent->d_type == DT_DIR) { vp->v_type = VDIR; @@ -403,6 +407,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, struct vnode **vpp) } else { vp->v_type = VBAD; } + VN_LOCK_ASHARE(vp); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWITNESS); mtx_lock(&devfs_de_interlock); vp->v_data = de; @@ -458,7 +463,7 @@ devfs_close(struct vop_close_args *ap) struct thread *td = ap->a_td; struct cdev *dev = vp->v_rdev; struct cdevsw *dsw; - int vp_locked, error; + int vp_locked, error, ref; /* * XXX: Don't call d_close() if we were called because of @@ -501,7 +506,7 @@ devfs_close(struct vop_close_args *ap) * sum of the reference counts on all the aliased * vnodes descends to one, we are on last close. */ - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); VI_LOCK(vp); @@ -511,7 +516,7 @@ devfs_close(struct vop_close_args *ap) /* Keep device updated on status. */ } else if (count_dev(dev) > 1) { VI_UNLOCK(vp); - dev_relthread(dev); + dev_relthread(dev, ref); return (0); } vholdl(vp); @@ -521,7 +526,7 @@ devfs_close(struct vop_close_args *ap) KASSERT(dev->si_refcount > 0, ("devfs_close() on un-referenced struct cdev *(%s)", devtoname(dev))); error = dsw->d_close(dev, ap->a_fflag, S_IFCHR, td); - dev_relthread(dev); + dev_relthread(dev, ref); vn_lock(vp, vp_locked | LK_RETRY); vdrop(vp); return (error); @@ -639,20 +644,20 @@ devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struc struct cdevsw *dsw; struct vnode *vp; struct vnode *vpold; - int error, i; + int error, i, ref; const char *p; struct fiodgname_arg *fgn; struct file *fpop; fpop = td->td_fpop; - error = devfs_fp_check(fp, &dev, &dsw); + error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error) return (error); if (com == FIODTYPE) { *(int *)data = dsw->d_flags & D_TYPEMASK; td->td_fpop = fpop; - dev_relthread(dev); + dev_relthread(dev, ref); return (0); } else if (com == FIODGNAME) { fgn = data; @@ -663,12 +668,12 @@ devfs_ioctl_f(struct file *fp, u_long com, void *data, struct ucred *cred, struc else error = copyout(p, fgn->buf, i); td->td_fpop = fpop; - dev_relthread(dev); + dev_relthread(dev, ref); return (error); } error = dsw->d_ioctl(dev, com, data, fp->f_flag, td); td->td_fpop = NULL; - dev_relthread(dev); + dev_relthread(dev, ref); if (error == ENOIOCTL) error = ENOTTY; if (error == 0 && com == TIOCSCTTY) { @@ -703,18 +708,18 @@ devfs_kqfilter_f(struct file *fp, struct knote *kn) { struct cdev *dev; struct cdevsw *dsw; - int error; + int error, ref; struct file *fpop; struct thread *td; td = curthread; fpop = td->td_fpop; - error = devfs_fp_check(fp, &dev, &dsw); + error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error) return (error); error = dsw->d_kqfilter(dev, kn); td->td_fpop = fpop; - dev_relthread(dev); + dev_relthread(dev, ref); return (error); } @@ -753,7 +758,7 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) struct devfs_dirent **dde; struct devfs_mount *dmp; struct cdev *cdev; - int error, flags, nameiop; + int error, flags, nameiop, dvplocked; char specname[SPECNAMELEN + 1], *pname; cnp = ap->a_cnp; @@ -794,10 +799,12 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) de = devfs_parent_dirent(dd); if (de == NULL) return (ENOENT); + dvplocked = VOP_ISLOCKED(dvp); VOP_UNLOCK(dvp, 0); - error = devfs_allocv(de, dvp->v_mount, vpp); + error = devfs_allocv(de, dvp->v_mount, + cnp->cn_lkflags & LK_TYPE_MASK, vpp); *dm_unlock = 0; - vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); + vn_lock(dvp, dvplocked | LK_RETRY); return (error); } @@ -881,7 +888,8 @@ devfs_lookupx(struct vop_lookup_args *ap, int *dm_unlock) return (0); } } - error = devfs_allocv(de, dvp->v_mount, vpp); + error = devfs_allocv(de, dvp->v_mount, cnp->cn_lkflags & LK_TYPE_MASK, + vpp); *dm_unlock = 0; return (error); } @@ -939,7 +947,7 @@ devfs_mknod(struct vop_mknod_args *ap) if (de == NULL) goto notfound; de->de_flags &= ~DE_WHITEOUT; - error = devfs_allocv(de, dvp->v_mount, vpp); + error = devfs_allocv(de, dvp->v_mount, LK_EXCLUSIVE, vpp); return (error); notfound: sx_xunlock(&dmp->dm_lock); @@ -954,7 +962,7 @@ devfs_open(struct vop_open_args *ap) struct vnode *vp = ap->a_vp; struct cdev *dev = vp->v_rdev; struct file *fp = ap->a_fp; - int error; + int error, ref, vlocked; struct cdevsw *dsw; struct file *fpop; @@ -968,14 +976,18 @@ devfs_open(struct vop_open_args *ap) if (dev->si_iosize_max == 0) dev->si_iosize_max = DFLTPHYS; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); /* XXX: Special casing of ttys for deadfs. Probably redundant. */ - if (dsw->d_flags & D_TTY) + if (dsw->d_flags & D_TTY) { + VI_LOCK(vp); vp->v_vflag |= VV_ISTTY; + VI_UNLOCK(vp); + } + vlocked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp, 0); fpop = td->td_fpop; @@ -990,10 +1002,8 @@ devfs_open(struct vop_open_args *ap) error = dsw->d_open(dev, ap->a_mode, S_IFCHR, td); td->td_fpop = fpop; - vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - - dev_relthread(dev); - + vn_lock(vp, vlocked | LK_RETRY); + dev_relthread(dev, ref); if (error) return (error); @@ -1001,7 +1011,7 @@ devfs_open(struct vop_open_args *ap) KASSERT(fp != NULL, ("Could not vnode bypass device on NULL fp")); #else - if(fp == NULL) + if (fp == NULL) return (error); #endif if (fp->f_ops == &badfileops) @@ -1037,16 +1047,16 @@ devfs_poll_f(struct file *fp, int events, struct ucred *cred, struct thread *td) { struct cdev *dev; struct cdevsw *dsw; - int error; + int error, ref; struct file *fpop; fpop = td->td_fpop; - error = devfs_fp_check(fp, &dev, &dsw); + error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error) return (poll_no_poll(events)); error = dsw->d_poll(dev, events, td); td->td_fpop = fpop; - dev_relthread(dev); + dev_relthread(dev, ref); return(error); } @@ -1066,12 +1076,12 @@ static int devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) { struct cdev *dev; - int ioflag, error, resid; + int ioflag, error, ref, resid; struct cdevsw *dsw; struct file *fpop; fpop = td->td_fpop; - error = devfs_fp_check(fp, &dev, &dsw); + error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error) return (error); resid = uio->uio_resid; @@ -1086,7 +1096,7 @@ devfs_read_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, st if (uio->uio_resid != resid || (error == 0 && resid != 0)) vfs_timestamp(&dev->si_atime); td->td_fpop = fpop; - dev_relthread(dev); + dev_relthread(dev, ref); if ((flags & FOF_OFFSET) == 0) fp->f_offset = uio->uio_offset; @@ -1494,7 +1504,7 @@ devfs_symlink(struct vop_symlink_args *ap) mac_devfs_create_symlink(ap->a_cnp->cn_cred, dmp->dm_mount, dd, de); #endif TAILQ_INSERT_TAIL(&dd->de_dlist, de, de_list); - return (devfs_allocv(de, ap->a_dvp->v_mount, ap->a_vpp)); + return (devfs_allocv(de, ap->a_dvp->v_mount, LK_EXCLUSIVE, ap->a_vpp)); } static int @@ -1509,12 +1519,12 @@ static int devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, struct thread *td) { struct cdev *dev; - int error, ioflag, resid; + int error, ioflag, ref, resid; struct cdevsw *dsw; struct file *fpop; fpop = td->td_fpop; - error = devfs_fp_check(fp, &dev, &dsw); + error = devfs_fp_check(fp, &dev, &dsw, &ref); if (error) return (error); KASSERT(uio->uio_td == td, ("uio_td %p is not td %p", uio->uio_td, td)); @@ -1532,7 +1542,7 @@ devfs_write_f(struct file *fp, struct uio *uio, struct ucred *cred, int flags, s dev->si_mtime = dev->si_ctime; } td->td_fpop = fpop; - dev_relthread(dev); + dev_relthread(dev, ref); if ((flags & FOF_OFFSET) == 0) fp->f_offset = uio->uio_offset; diff --git a/sys/kern/kern_conf.c b/sys/kern/kern_conf.c index 7e4e677..12d1a8d 100644 --- a/sys/kern/kern_conf.c +++ b/sys/kern/kern_conf.c @@ -177,12 +177,16 @@ dev_rel(struct cdev *dev) } struct cdevsw * -dev_refthread(struct cdev *dev) +dev_refthread(struct cdev *dev, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; mtx_assert(&devmtx, MA_NOTOWNED); + if ((dev->si_flags & SI_ETERNAL) != 0) { + *ref = 0; + return (dev->si_devsw); + } dev_lock(); csw = dev->si_devsw; if (csw != NULL) { @@ -193,36 +197,59 @@ dev_refthread(struct cdev *dev) csw = NULL; } dev_unlock(); + *ref = 1; return (csw); } struct cdevsw * -devvn_refthread(struct vnode *vp, struct cdev **devp) +devvn_refthread(struct vnode *vp, struct cdev **devp, int *ref) { struct cdevsw *csw; struct cdev_priv *cdp; + struct cdev *dev; mtx_assert(&devmtx, MA_NOTOWNED); + if ((vp->v_vflag & VV_ETERNALDEV) != 0) { + dev = vp->v_rdev; + if (dev == NULL) + return (NULL); + KASSERT((dev->si_flags & SI_ETERNAL) != 0, + ("Not eternal cdev")); + *ref = 0; + csw = dev->si_devsw; + KASSERT(csw != NULL, ("Eternal cdev is destroyed")); + *devp = dev; + return (csw); + } + csw = NULL; dev_lock(); - *devp = vp->v_rdev; - if (*devp != NULL) { - cdp = cdev2priv(*devp); - if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { - csw = (*devp)->si_devsw; - if (csw != NULL) - (*devp)->si_threadcount++; - } + dev = vp->v_rdev; + if (dev == NULL) { + dev_unlock(); + return (NULL); + } + cdp = cdev2priv(dev); + if ((cdp->cdp_flags & CDP_SCHED_DTR) == 0) { + csw = dev->si_devsw; + if (csw != NULL) + dev->si_threadcount++; } dev_unlock(); + if (csw != NULL) { + *devp = dev; + *ref = 1; + } return (csw); } void -dev_relthread(struct cdev *dev) +dev_relthread(struct cdev *dev, int ref) { mtx_assert(&devmtx, MA_NOTOWNED); + if (!ref) + return; dev_lock(); KASSERT(dev->si_threadcount > 0, ("%s threadcount is wrong", dev->si_name)); @@ -325,15 +352,15 @@ static int giant_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_open(dev, oflags, devtype, td); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -341,15 +368,15 @@ static int giant_fdopen(struct cdev *dev, int oflags, struct thread *td, struct file *fp) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_fdopen(dev, oflags, td, fp); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -357,15 +384,15 @@ static int giant_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_close(dev, fflag, devtype, td); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -374,9 +401,10 @@ giant_strategy(struct bio *bp) { struct cdevsw *dsw; struct cdev *dev; + int ref; dev = bp->bio_dev; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) { biofinish(bp, NULL, ENXIO); return; @@ -384,22 +412,22 @@ giant_strategy(struct bio *bp) mtx_lock(&Giant); dsw->d_gianttrick->d_strategy(bp); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); } static int giant_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_ioctl(dev, cmd, data, fflag, td); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -407,15 +435,15 @@ static int giant_read(struct cdev *dev, struct uio *uio, int ioflag) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_read(dev, uio, ioflag); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -423,15 +451,15 @@ static int giant_write(struct cdev *dev, struct uio *uio, int ioflag) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_write(dev, uio, ioflag); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -439,15 +467,15 @@ static int giant_poll(struct cdev *dev, int events, struct thread *td) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_poll(dev, events, td); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -455,15 +483,15 @@ static int giant_kqfilter(struct cdev *dev, struct knote *kn) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_kqfilter(dev, kn); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -472,16 +500,16 @@ giant_mmap(struct cdev *dev, vm_ooffset_t offset, vm_paddr_t *paddr, int nprot, vm_memattr_t *memattr) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_mmap(dev, offset, paddr, nprot, memattr); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -490,16 +518,16 @@ giant_mmap_single(struct cdev *dev, vm_ooffset_t *offset, vm_size_t size, vm_object_t *object, int nprot) { struct cdevsw *dsw; - int retval; + int ref, retval; - dsw = dev_refthread(dev); + dsw = dev_refthread(dev, &ref); if (dsw == NULL) return (ENXIO); mtx_lock(&Giant); retval = dsw->d_gianttrick->d_mmap_single(dev, offset, size, object, nprot); mtx_unlock(&Giant); - dev_relthread(dev); + dev_relthread(dev, ref); return (retval); } @@ -676,6 +704,8 @@ make_dev_credv(int flags, struct cdev **dres, struct cdevsw *devsw, int unit, dev = newdev(devsw, unit, dev); if (flags & MAKEDEV_REF) dev_refl(dev); + if (flags & MAKEDEV_ETERNAL) + dev->si_flags |= SI_ETERNAL; if (dev->si_flags & SI_CHEAPCLONE && dev->si_flags & SI_NAMED) { /* @@ -840,6 +870,9 @@ destroy_devl(struct cdev *dev) mtx_assert(&devmtx, MA_OWNED); KASSERT(dev->si_flags & SI_NAMED, ("WARNING: Driver mistake: destroy_dev on %d\n", dev2unit(dev))); + KASSERT((dev->si_flags & SI_ETERNAL) == 0, + ("WARNING: Driver mistake: destroy_dev on eternal %d\n", + dev2unit(dev))); devfs_destroy(dev); @@ -1196,3 +1229,71 @@ devdtr_init(void *dummy __unused) } SYSINIT(devdtr, SI_SUB_DEVFS, SI_ORDER_SECOND, devdtr_init, NULL); + +#include "opt_ddb.h" +#ifdef DDB +#include + +#include + +DB_SHOW_COMMAND(cdev, db_show_cdev) +{ + struct cdev_priv *cdp; + struct cdev *dev; + u_int flags; + char buf[512]; + + if (!have_addr) { + TAILQ_FOREACH(cdp, &cdevp_list, cdp_list) { + dev = &cdp->cdp_c; + db_printf("%s %p\n", dev->si_name, dev); + if (db_pager_quit) + break; + } + return; + } + + dev = (struct cdev *)addr; + cdp = cdev2priv(dev); + db_printf("dev %s ref %d use %ld thr %ld inuse %u fdpriv %p\n", + dev->si_name, dev->si_refcount, dev->si_usecount, + dev->si_threadcount, cdp->cdp_inuse, cdp->cdp_fdpriv.lh_first); + db_printf("devsw %p si_drv0 %d si_drv1 %p si_drv2 %p\n", + dev->si_devsw, dev->si_drv0, dev->si_drv1, dev->si_drv2); + flags = dev->si_flags; +#define SI_FLAG(flag) do { \ + if (flags & (flag)) { \ + if (buf[0] != '\0') \ + strlcat(buf, ", ", sizeof(buf)); \ + strlcat(buf, (#flag) + 3, sizeof(buf)); \ + flags &= ~(flag); \ + } \ +} while (0) + buf[0] = '\0'; + SI_FLAG(SI_ETERNAL); + SI_FLAG(SI_ALIAS); + SI_FLAG(SI_NAMED); + SI_FLAG(SI_CHEAPCLONE); + SI_FLAG(SI_CHILD); + SI_FLAG(SI_DEVOPEN); + SI_FLAG(SI_CONSOPEN); + SI_FLAG(SI_DUMPDEV); + SI_FLAG(SI_CANDELETE); + SI_FLAG(SI_CLONELIST); + db_printf("si_flags %s\n", buf); + + flags = cdp->cdp_flags; +#define CDP_FLAG(flag) do { \ + if (flags & (flag)) { \ + if (buf[0] != '\0') \ + strlcat(buf, ", ", sizeof(buf)); \ + strlcat(buf, (#flag) + 4, sizeof(buf)); \ + flags &= ~(flag); \ + } \ +} while (0) + buf[0] = '\0'; + CDP_FLAG(CDP_ACTIVE); + CDP_FLAG(CDP_SCHED_DTR); + db_printf("cdp_flags %s\n", buf); +} +#endif diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 917fb7b..9021849 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -1797,7 +1797,7 @@ ttyhook_register(struct tty **rtp, struct proc *p, int fd, struct cdev *dev; struct cdevsw *cdp; struct filedesc *fdp; - int error; + int error, ref; /* Validate the file descriptor. */ if ((fdp = p->p_fd) == NULL) @@ -1823,7 +1823,7 @@ ttyhook_register(struct tty **rtp, struct proc *p, int fd, } /* Make sure it is a TTY. */ - cdp = devvn_refthread(fp->f_vnode, &dev); + cdp = devvn_refthread(fp->f_vnode, &dev, &ref); if (cdp == NULL) { error = ENXIO; goto done1; @@ -1859,7 +1859,7 @@ ttyhook_register(struct tty **rtp, struct proc *p, int fd, th->th_rint = ttyhook_defrint; done3: tty_unlock(tp); -done2: dev_relthread(dev); +done2: dev_relthread(dev, ref); done1: fdrop(fp, curthread); return (error); } diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index 1bc259f..34f7acb 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -3203,6 +3203,7 @@ dev_strategy(struct cdev *dev, struct buf *bp) { struct cdevsw *csw; struct bio *bip; + int ref; if ((!bp->b_iocmd) || (bp->b_iocmd & (bp->b_iocmd - 1))) panic("b_iocmd botch"); @@ -3224,7 +3225,7 @@ dev_strategy(struct cdev *dev, struct buf *bp) KASSERT(dev->si_refcount > 0, ("dev_strategy on un-referenced struct cdev *(%s)", devtoname(dev))); - csw = dev_refthread(dev); + csw = dev_refthread(dev, &ref); if (csw == NULL) { g_destroy_bio(bip); bp->b_error = ENXIO; @@ -3233,7 +3234,7 @@ dev_strategy(struct cdev *dev, struct buf *bp) return; } (*csw->d_strategy)(bip); - dev_relthread(dev); + dev_relthread(dev, ref); } /* diff --git a/sys/sys/conf.h b/sys/sys/conf.h index 82ea088..6c60601 100644 --- a/sys/sys/conf.h +++ b/sys/sys/conf.h @@ -54,6 +54,7 @@ struct file; struct cdev { void *__si_reserved; u_int si_flags; +#define SI_ETERNAL 0x0001 /* never destroyed */ #define SI_ALIAS 0x0002 /* carrier of alias name */ #define SI_NAMED 0x0004 /* make_dev{_alias} has been called */ #define SI_CHEAPCLONE 0x0008 /* can be removed_dev'ed when vnode reclaims */ @@ -249,9 +250,9 @@ int destroy_dev_sched(struct cdev *dev); int destroy_dev_sched_cb(struct cdev *dev, void (*cb)(void *), void *arg); void destroy_dev_drain(struct cdevsw *csw); void drain_dev_clone_events(void); -struct cdevsw *dev_refthread(struct cdev *_dev); -struct cdevsw *devvn_refthread(struct vnode *vp, struct cdev **devp); -void dev_relthread(struct cdev *_dev); +struct cdevsw *dev_refthread(struct cdev *_dev, int *_ref); +struct cdevsw *devvn_refthread(struct vnode *vp, struct cdev **devp, int *_ref); +void dev_relthread(struct cdev *_dev, int _ref); void dev_depends(struct cdev *_pdev, struct cdev *_cdev); void dev_ref(struct cdev *dev); void dev_refl(struct cdev *dev); @@ -262,10 +263,11 @@ struct cdev *make_dev(struct cdevsw *_devsw, int _unit, uid_t _uid, gid_t _gid, struct cdev *make_dev_cred(struct cdevsw *_devsw, int _unit, struct ucred *_cr, uid_t _uid, gid_t _gid, int _perms, const char *_fmt, ...) __printflike(7, 8); -#define MAKEDEV_REF 0x1 -#define MAKEDEV_WHTOUT 0x2 -#define MAKEDEV_NOWAIT 0x4 -#define MAKEDEV_WAITOK 0x8 +#define MAKEDEV_REF 0x01 +#define MAKEDEV_WHTOUT 0x02 +#define MAKEDEV_NOWAIT 0x04 +#define MAKEDEV_WAITOK 0x08 +#define MAKEDEV_ETERNAL 0x10 struct cdev *make_dev_credf(int _flags, struct cdevsw *_devsw, int _unit, struct ucred *_cr, uid_t _uid, gid_t _gid, int _mode, @@ -279,6 +281,12 @@ void dev_lock(void); void dev_unlock(void); void setconf(void); +#ifdef KLD_MODULE +#define MAKEDEV_ETERNAL_KLD 0 +#else +#define MAKEDEV_ETERNAL_KLD MAKEDEV_ETERNAL +#endif + #define dev2unit(d) ((d)->si_drv0) typedef void (*cdevpriv_dtr_t)(void *data); diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index 95f3d4e..c38d645 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -243,6 +243,7 @@ struct xvnode { #define VV_ROOT 0x0001 /* root of its filesystem */ #define VV_ISTTY 0x0002 /* vnode represents a tty */ #define VV_NOSYNC 0x0004 /* unlinked, stop syncing */ +#define VV_ETERNALDEV 0x0008 /* device that is never destroyed */ #define VV_CACHEDLABEL 0x0010 /* Vnode has valid cached MAC label */ #define VV_TEXT 0x0020 /* vnode is a pure text prototype */ #define VV_COPYONWRITE 0x0040 /* vnode is doing copy-on-write */ diff --git a/sys/vm/device_pager.c b/sys/vm/device_pager.c index 7d8d361..c7eab1d 100644 --- a/sys/vm/device_pager.c +++ b/sys/vm/device_pager.c @@ -108,6 +108,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t off; vm_memattr_t dummy; struct cdevsw *csw; + int ref; /* * Offset should be page aligned. @@ -122,7 +123,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, * Make sure this device can be mapped. */ dev = handle; - csw = dev_refthread(dev); + csw = dev_refthread(dev, &ref); if (csw == NULL) return (NULL); @@ -135,7 +136,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, npages = OFF_TO_IDX(size); for (off = foff; npages--; off += PAGE_SIZE) if (csw->d_mmap(dev, off, &paddr, (int)prot, &dummy) != 0) { - dev_relthread(dev); + dev_relthread(dev, ref); return (NULL); } @@ -177,7 +178,7 @@ dev_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, object->size = pindex; } mtx_unlock(&dev_pager_mtx); - dev_relthread(dev); + dev_relthread(dev, ref); vm_object_deallocate(object1); return (object); } @@ -214,7 +215,7 @@ dev_pager_getpages(object, m, count, reqpage) vm_page_t m_paddr, page; vm_memattr_t memattr; struct cdev *dev; - int i, ret; + int i, ref, ret; struct cdevsw *csw; struct thread *td; struct file *fpop; @@ -225,7 +226,7 @@ dev_pager_getpages(object, m, count, reqpage) offset = page->pindex; memattr = object->memattr; VM_OBJECT_UNLOCK(object); - csw = dev_refthread(dev); + csw = dev_refthread(dev, &ref); if (csw == NULL) panic("dev_pager_getpage: no cdevsw"); td = curthread; @@ -235,7 +236,7 @@ dev_pager_getpages(object, m, count, reqpage) PROT_READ, &memattr); KASSERT(ret == 0, ("dev_pager_getpage: map function returns error")); td->td_fpop = fpop; - dev_relthread(dev); + dev_relthread(dev, ref); /* If "paddr" is a real page, perform a sanity check on "memattr". */ if ((m_paddr = vm_phys_paddr_to_vm_page(paddr)) != NULL && pmap_page_get_memattr(m_paddr) != memattr) { diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index 999a481..bd9f98f 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -1292,15 +1292,15 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize, { vm_object_t obj; struct cdevsw *dsw; - int error, flags; + int error, flags, ref; flags = *flagsp; - dsw = dev_refthread(cdev); + dsw = dev_refthread(cdev, &ref); if (dsw == NULL) return (ENXIO); if (dsw->d_flags & D_MMAP_ANON) { - dev_relthread(cdev); + dev_relthread(cdev, ref); *maxprotp = VM_PROT_ALL; *flagsp |= MAP_ANON; return (0); @@ -1310,11 +1310,11 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize, */ if ((*maxprotp & VM_PROT_WRITE) == 0 && (prot & PROT_WRITE) != 0) { - dev_relthread(cdev); + dev_relthread(cdev, ref); return (EACCES); } if (flags & (MAP_PRIVATE|MAP_COPY)) { - dev_relthread(cdev); + dev_relthread(cdev, ref); return (EINVAL); } /* @@ -1324,7 +1324,7 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize, #ifdef MAC_XXX error = mac_cdev_check_mmap(td->td_ucred, cdev, prot); if (error != 0) { - dev_relthread(cdev); + dev_relthread(cdev, ref); return (error); } #endif @@ -1338,7 +1338,7 @@ vm_mmap_cdev(struct thread *td, vm_size_t objsize, * XXX assumes VM_PROT_* == PROT_* */ error = dsw->d_mmap_single(cdev, foff, objsize, objp, (int)prot); - dev_relthread(cdev); + dev_relthread(cdev, ref); if (error != ENODEV) return (error); obj = vm_pager_allocate(OBJT_DEVICE, cdev, objsize, prot, *foff,