diff --git a/sys/fs/devfs/devfs.h b/sys/fs/devfs/devfs.h index 5f64a267279..cf706f34178 100644 --- a/sys/fs/devfs/devfs.h +++ b/sys/fs/devfs/devfs.h @@ -153,6 +153,7 @@ struct devfs_dirent { struct timespec de_ctime; struct vnode *de_vnode; char *de_symlink; + int de_usecount; }; struct devfs_mount { @@ -192,6 +193,7 @@ char *devfs_fqpn(char *, struct devfs_mount *, struct devfs_dirent *, struct componentname *); void devfs_delete(struct devfs_mount *, struct devfs_dirent *, int); void devfs_dirent_free(struct devfs_dirent *); +bool devfs_populate_needed(struct devfs_mount *dm); void devfs_populate(struct devfs_mount *); void devfs_cleanup(struct devfs_mount *); void devfs_unmount_final(struct devfs_mount *); @@ -202,6 +204,11 @@ struct devfs_dirent *devfs_vmkdir(struct devfs_mount *, char *, int, struct devfs_dirent *devfs_find(struct devfs_dirent *, const char *, int, int); +void devfs_ctty_ref(struct vnode *); +void devfs_ctty_unref(struct vnode *); +int devfs_usecountl(struct vnode *); +int devfs_usecount(struct vnode *); + #endif /* _KERNEL */ #endif /* !_FS_DEVFS_DEVFS_H_ */ diff --git a/sys/fs/devfs/devfs_devs.c b/sys/fs/devfs/devfs_devs.c index 417e13e2757..5df21a3712d 100644 --- a/sys/fs/devfs/devfs_devs.c +++ b/sys/fs/devfs/devfs_devs.c @@ -659,6 +659,13 @@ devfs_populate_loop(struct devfs_mount *dm, int cleanup) return (0); } +bool +devfs_populate_needed(struct devfs_mount *dm) +{ + + return (dm->dm_generation != devfs_generation); +} + /* * The caller needs to hold the dm for the duration of the call. */ @@ -668,9 +675,9 @@ devfs_populate(struct devfs_mount *dm) unsigned gen; sx_assert(&dm->dm_lock, SX_XLOCKED); - gen = devfs_generation; - if (dm->dm_generation == gen) + if (!devfs_populate_needed(dm)) return; + gen = devfs_generation; while (devfs_populate_loop(dm, 0)) continue; dm->dm_generation = gen; diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c index 0605fad13cf..74ca00b785c 100644 --- a/sys/fs/devfs/devfs_vnops.c +++ b/sys/fs/devfs/devfs_vnops.c @@ -222,6 +222,115 @@ devfs_clear_cdevpriv(void) devfs_fpdrop(fp); } +static void +devfs_usecount_add(struct vnode *vp) +{ + struct devfs_dirent *de; + struct cdev *dev; + + mtx_lock(&devfs_de_interlock); + VI_LOCK(vp); + if (VN_IS_DOOMED(vp)) { + goto out_unlock; + } + VNPASS(vp->v_type == VCHR, vp); + + de = vp->v_data; + dev = vp->v_rdev; + MPASS(de != NULL); + MPASS(dev != NULL); + dev->si_usecount++; + de->de_usecount++; +out_unlock: + VI_UNLOCK(vp); + mtx_unlock(&devfs_de_interlock); +} + +static void +devfs_usecount_subl(struct vnode *vp) +{ + struct devfs_dirent *de; + struct cdev *dev; + + mtx_assert(&devfs_de_interlock, MA_OWNED); + ASSERT_VI_LOCKED(vp, __func__); + VNPASS(vp->v_type == VCHR || vp->v_type == VBAD, vp); + + de = vp->v_data; + dev = vp->v_rdev; + if (de == NULL) + return; + if (dev == NULL) { + MPASS(de->de_usecount == 0); + return; + } + if (dev->si_usecount < de->de_usecount) + panic("%s: si_usecount underflow for dev %p " + "(has %ld, dirent has %d)\n", + __func__, dev, dev->si_usecount, de->de_usecount); + if (VN_IS_DOOMED(vp)) { + dev->si_usecount -= de->de_usecount; + de->de_usecount = 0; + } else { + if (de->de_usecount == 0) + panic("%s: de_usecount underflow for dev %p\n", + __func__, dev); + dev->si_usecount--; + de->de_usecount--; + } +} + +static void +devfs_usecount_sub(struct vnode *vp) +{ + + mtx_lock(&devfs_de_interlock); + VI_LOCK(vp); + devfs_usecount_subl(vp); + VI_UNLOCK(vp); + mtx_unlock(&devfs_de_interlock); +} + +int +devfs_usecountl(struct vnode *vp) +{ + + VNPASS(vp->v_type == VCHR, vp); + mtx_assert(&devfs_de_interlock, MA_OWNED); + ASSERT_VI_LOCKED(vp, __func__); + return (vp->v_rdev->si_usecount); +} + +int +devfs_usecount(struct vnode *vp) +{ + int count; + + VNPASS(vp->v_type == VCHR, vp); + mtx_lock(&devfs_de_interlock); + VI_LOCK(vp); + count = devfs_usecountl(vp); + VI_UNLOCK(vp); + mtx_unlock(&devfs_de_interlock); + return (count); +} + +void +devfs_ctty_ref(struct vnode *vp) +{ + + vrefact(vp); + devfs_usecount_add(vp); +} + +void +devfs_ctty_unref(struct vnode *vp) +{ + + devfs_usecount_sub(vp); + vrele(vp); +} + /* * On success devfs_populate_vp() returns with dmp->dm_lock held. */ @@ -235,6 +344,11 @@ devfs_populate_vp(struct vnode *vp) ASSERT_VOP_LOCKED(vp, "devfs_populate_vp"); dmp = VFSTODEVFS(vp->v_mount); + if (!devfs_populate_needed(dmp)) { + sx_xlock(&dmp->dm_lock); + goto out_nopopulate; + } + locked = VOP_ISLOCKED(vp); sx_xlock(&dmp->dm_lock); @@ -252,6 +366,7 @@ devfs_populate_vp(struct vnode *vp) devfs_unmount_final(dmp); return (ERESTART); } +out_nopopulate: if (VN_IS_DOOMED(vp)) { sx_xunlock(&dmp->dm_lock); return (ERESTART); @@ -420,6 +535,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode, struct cdev *dev; struct devfs_mount *dmp; struct cdevsw *dsw; + enum vgetstate vs; dmp = VFSTODEVFS(mp); if (de->de_flags & DE_DOOMED) { @@ -432,10 +548,10 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode, mtx_lock(&devfs_de_interlock); vp = de->de_vnode; if (vp != NULL) { - VI_LOCK(vp); + vs = vget_prep(vp); mtx_unlock(&devfs_de_interlock); sx_xunlock(&dmp->dm_lock); - vget(vp, lockmode | LK_INTERLOCK | LK_RETRY, curthread); + vget_finish(vp, lockmode | LK_RETRY, vs); sx_xlock(&dmp->dm_lock); if (devfs_allocv_drop_refs(0, dmp, de)) { vput(vp); @@ -480,7 +596,6 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode, /* XXX: v_rdev should be protect by vnode lock */ vp->v_rdev = dev; VNPASS(vp->v_usecount == 1, vp); - dev->si_usecount++; /* Special casing of ttys for deadfs. Probably redundant. */ dsw = dev->si_devsw; if (dsw != NULL && (dsw->d_flags & D_TTY) != 0) @@ -562,6 +677,7 @@ devfs_close(struct vop_close_args *ap) struct proc *p; struct cdev *dev = vp->v_rdev; struct cdevsw *dsw; + struct devfs_dirent *de = vp->v_data; int dflags, error, ref, vp_locked; /* @@ -580,7 +696,7 @@ devfs_close(struct vop_close_args *ap) * if the reference count is 2 (this last descriptor * plus the session), release the reference from the session. */ - if (vp->v_usecount == 2 && td != NULL) { + if (de->de_usecount == 2 && td != NULL) { p = td->td_proc; PROC_LOCK(p); if (vp == p->p_session->s_ttyvp) { @@ -589,19 +705,21 @@ devfs_close(struct vop_close_args *ap) sx_xlock(&proctree_lock); if (vp == p->p_session->s_ttyvp) { SESS_LOCK(p->p_session); + mtx_lock(&devfs_de_interlock); VI_LOCK(vp); - if (vp->v_usecount == 2 && vcount(vp) == 1 && + if (de->de_usecount == 2 && devfs_usecountl(vp) == 2 && !VN_IS_DOOMED(vp)) { p->p_session->s_ttyvp = NULL; p->p_session->s_ttydp = NULL; oldvp = vp; } VI_UNLOCK(vp); + mtx_unlock(&devfs_de_interlock); SESS_UNLOCK(p->p_session); } sx_xunlock(&proctree_lock); if (oldvp != NULL) - vrele(oldvp); + devfs_ctty_unref(oldvp); } else PROC_UNLOCK(p); } @@ -618,9 +736,12 @@ devfs_close(struct vop_close_args *ap) if (dsw == NULL) return (ENXIO); dflags = 0; + mtx_lock(&devfs_de_interlock); VI_LOCK(vp); - if (vp->v_usecount == 1 && vcount(vp) == 1) + if (de->de_usecount == 1 && devfs_usecountl(vp) == 1) dflags |= FLASTCLOSE; + devfs_usecount_subl(vp); + mtx_unlock(&devfs_de_interlock); if (VN_IS_DOOMED(vp)) { /* Forced close. */ dflags |= FREVOKE | FNONBLOCK; @@ -843,7 +964,7 @@ devfs_ioctl(struct vop_ioctl_args *ap) return (0); } - vrefact(vp); + devfs_ctty_ref(vp); SESS_LOCK(sess); vpold = sess->s_ttyvp; sess->s_ttyvp = vp; @@ -1152,6 +1273,9 @@ devfs_open(struct vop_open_args *ap) return (ENXIO); } + if (vp->v_type == VCHR) + devfs_usecount_add(vp); + vlocked = VOP_ISLOCKED(vp); VOP_UNLOCK(vp); @@ -1171,6 +1295,9 @@ devfs_open(struct vop_open_args *ap) td->td_fpop = fpop; vn_lock(vp, vlocked | LK_RETRY); + if (error != 0 && vp->v_type == VCHR) + devfs_usecount_sub(vp); + dev_relthread(dev, ref); if (error != 0) { if (error == ERESTART) @@ -1399,19 +1526,28 @@ devfs_readlink(struct vop_readlink_args *ap) return (uiomove(de->de_symlink, strlen(de->de_symlink), ap->a_uio)); } -static int -devfs_reclaim(struct vop_reclaim_args *ap) +static void +devfs_reclaiml(struct vnode *vp) { - struct vnode *vp; struct devfs_dirent *de; - vp = ap->a_vp; - mtx_lock(&devfs_de_interlock); + mtx_assert(&devfs_de_interlock, MA_OWNED); de = vp->v_data; if (de != NULL) { + MPASS(de->de_usecount == 0); de->de_vnode = NULL; vp->v_data = NULL; } +} + +static int +devfs_reclaim(struct vop_reclaim_args *ap) +{ + struct vnode *vp; + + vp = ap->a_vp; + mtx_lock(&devfs_de_interlock); + devfs_reclaiml(vp); mtx_unlock(&devfs_de_interlock); return (0); } @@ -1425,14 +1561,14 @@ devfs_reclaim_vchr(struct vop_reclaim_args *ap) vp = ap->a_vp; MPASS(vp->v_type == VCHR); - devfs_reclaim(ap); - + mtx_lock(&devfs_de_interlock); VI_LOCK(vp); + devfs_usecount_subl(vp); + devfs_reclaiml(vp); + mtx_unlock(&devfs_de_interlock); dev_lock(); dev = vp->v_rdev; vp->v_rdev = NULL; - if (dev != NULL) - dev->si_usecount -= (vp->v_usecount > 0); dev_unlock(); VI_UNLOCK(vp); if (dev != NULL) @@ -1492,13 +1628,14 @@ devfs_revoke(struct vop_revoke_args *ap) struct cdev *dev; struct cdev_priv *cdp; struct devfs_dirent *de; + enum vgetstate vs; u_int i; KASSERT((ap->a_flags & REVOKEALL) != 0, ("devfs_revoke !REVOKEALL")); dev = vp->v_rdev; cdp = cdev2priv(dev); - + dev_lock(); cdp->cdp_inuse++; dev_unlock(); @@ -1521,17 +1658,16 @@ devfs_revoke(struct vop_revoke_args *ap) vp2 = de->de_vnode; if (vp2 != NULL) { dev_unlock(); - VI_LOCK(vp2); + vs = vget_prep(vp2); mtx_unlock(&devfs_de_interlock); - if (vget(vp2, LK_EXCLUSIVE | LK_INTERLOCK, - curthread)) + if (vget_finish(vp2, LK_EXCLUSIVE, vs) != 0) goto loop; vhold(vp2); vgone(vp2); vdrop(vp2); vput(vp2); break; - } + } } if (vp2 != NULL) { continue; @@ -1927,6 +2063,9 @@ static struct vop_vector devfs_vnodeops = { #endif .vop_symlink = devfs_symlink, .vop_vptocnp = devfs_vptocnp, + .vop_lock1 = vop_lock, + .vop_unlock = vop_unlock, + .vop_islocked = vop_islocked, }; VFS_VOP_VECTOR_REGISTER(devfs_vnodeops); @@ -1965,6 +2104,9 @@ static struct vop_vector devfs_specops = { .vop_symlink = VOP_PANIC, .vop_vptocnp = devfs_vptocnp, .vop_write = dead_write, + .vop_lock1 = vop_lock, + .vop_unlock = vop_unlock, + .vop_islocked = vop_islocked, }; VFS_VOP_VECTOR_REGISTER(devfs_specops); diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index 4dd555a18db..60fd2a2c366 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -182,6 +182,7 @@ #include #include #include +#include #include @@ -484,8 +485,20 @@ null_setattr(struct vop_setattr_args *ap) } /* - * We handle getattr only to change the fsid. + * We handle stat and getattr only to change the fsid. */ +static int +null_stat(struct vop_stat_args *ap) +{ + int error; + + if ((error = null_bypass((struct vop_generic_args *)ap)) != 0) + return (error); + + ap->a_sb->st_dev = ap->a_vp->v_mount->mnt_stat.f_fsid.val[0]; + return (0); +} + static int null_getattr(struct vop_getattr_args *ap) { @@ -918,6 +931,7 @@ struct vop_vector null_vnodeops = { .vop_accessx = null_accessx, .vop_advlockpurge = vop_stdadvlockpurge, .vop_bmap = VOP_EOPNOTSUPP, + .vop_stat = null_stat, .vop_getattr = null_getattr, .vop_getwritemount = null_getwritemount, .vop_inactive = null_inactive, diff --git a/sys/kern/kern_proc.c b/sys/kern/kern_proc.c index ce9554b6dfa..50449938c5c 100644 --- a/sys/kern/kern_proc.c +++ b/sys/kern/kern_proc.c @@ -88,6 +88,8 @@ __FBSDID("$FreeBSD$"); #include #include +#include + #ifdef COMPAT_FREEBSD32 #include #include @@ -858,7 +860,7 @@ killjobc(void) VOP_REVOKE(ttyvp, REVOKEALL); VOP_UNLOCK(ttyvp); } - vrele(ttyvp); + devfs_ctty_unref(ttyvp); sx_xlock(&proctree_lock); } } diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 4c11ff56000..a6ed98f8629 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -67,6 +67,8 @@ __FBSDID("$FreeBSD$"); #include #include +#include + #include static MALLOC_DEFINE(M_TTY, "tty", "tty device"); @@ -1256,7 +1258,7 @@ tty_drop_ctty(struct tty *tp, struct proc *p) * is either changed or released. */ if (vp != NULL) - vrele(vp); + devfs_ctty_unref(vp); return (0); } diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 670262ab1fe..1c51c4907ab 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -3143,8 +3143,8 @@ cache_fpl_handled_impl(struct cache_fpl *fpl, int error, int line) #define cache_fpl_handled(x, e) cache_fpl_handled_impl((x), (e), __LINE__) #define CACHE_FPL_SUPPORTED_CN_FLAGS \ - (LOCKLEAF | LOCKPARENT | WANTPARENT | FOLLOW | LOCKSHARED | SAVENAME | \ - ISOPEN | NOMACCHECK | AUDITVNODE1 | AUDITVNODE2) + (LOCKLEAF | LOCKPARENT | WANTPARENT | NOCACHE | FOLLOW | LOCKSHARED | SAVENAME | \ + WILLBEDIR | ISOPEN | NOMACCHECK | AUDITVNODE1 | AUDITVNODE2) #define CACHE_FPL_INTERNAL_CN_FLAGS \ (ISDOTDOT | MAKEENTRY | ISLASTCN) @@ -3194,10 +3194,6 @@ cache_can_fplookup(struct cache_fpl *fpl) cache_fpl_aborted(fpl); return (false); } - if (cnp->cn_nameiop != LOOKUP) { - cache_fpl_aborted(fpl); - return (false); - } if (ndp->ni_dirfd != AT_FDCWD) { cache_fpl_aborted(fpl); return (false); @@ -3407,6 +3403,22 @@ cache_fplookup_final_child(struct cache_fpl *fpl, enum vgetstate tvs) return (cache_fpl_handled(fpl, 0)); } +/* + * They want to possibly modify the state of the namecache. + * + * Don't try to match the API contract, just leave. + * TODO: this leaves scalability on the table + */ +static int +cache_fplookup_final_modifying(struct cache_fpl *fpl) +{ + struct componentname *cnp; + + cnp = fpl->cnp; + MPASS(cnp->cn_nameiop != LOOKUP); + return (cache_fpl_partial(fpl)); +} + static int __noinline cache_fplookup_final_withparent(struct cache_fpl *fpl) { @@ -3489,6 +3501,10 @@ cache_fplookup_final(struct cache_fpl *fpl) VNPASS(cache_fplookup_vnode_supported(dvp), dvp); + if (cnp->cn_nameiop != LOOKUP) { + return (cache_fplookup_final_modifying(fpl)); + } + if ((cnp->cn_flags & (LOCKPARENT|WANTPARENT)) != 0) return (cache_fplookup_final_withparent(fpl)); @@ -3633,6 +3649,12 @@ cache_fplookup_next(struct cache_fpl *fpl) tvp = atomic_load_ptr(&ncp->nc_vp); nc_flag = atomic_load_char(&ncp->nc_flag); if ((nc_flag & NCF_NEGATIVE) != 0) { + /* + * If they want to create an entry we need to replace this one. + */ + if (__predict_false(fpl->cnp->cn_nameiop == CREATE)) { + return (cache_fpl_partial(fpl)); + } negstate = NCP2NEGSTATE(ncp); neg_hot = ((negstate->neg_flag & NEG_HOT) != 0); if (__predict_false(!cache_ncp_canuse(ncp))) { @@ -3938,7 +3960,7 @@ cache_fplookup_impl(struct vnode *dvp, struct cache_fpl *fpl) VNPASS(cache_fplookup_vnode_supported(fpl->dvp), fpl->dvp); - error = VOP_FPLOOKUP_VEXEC(fpl->dvp, cnp->cn_cred, cnp->cn_thread); + error = VOP_FPLOOKUP_VEXEC(fpl->dvp, cnp->cn_cred); if (__predict_false(error != 0)) { error = cache_fplookup_failed_vexec(fpl, error); break; diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index 54ae088f4b7..e78fc25ec34 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -484,6 +484,9 @@ namei(struct nameidata *ndp) ("namei: nameiop contaminated with flags")); KASSERT((cnp->cn_flags & OPMASK) == 0, ("namei: flags contaminated with nameiops")); + if (cnp->cn_flags & NOCACHE) + KASSERT(cnp->cn_nameiop != LOOKUP, + ("%s: NOCACHE passed with LOOKUP", __func__)); MPASS(ndp->ni_startdir == NULL || ndp->ni_startdir->v_type == VDIR || ndp->ni_startdir->v_type == VBAD); diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index fb245cb6e3d..86b9c748740 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -108,8 +108,6 @@ static int flushbuflist(struct bufv *bufv, int flags, struct bufobj *bo, static void syncer_shutdown(void *arg, int howto); static int vtryrecycle(struct vnode *vp); static void v_init_counters(struct vnode *); -static void v_incr_devcount(struct vnode *); -static void v_decr_devcount(struct vnode *); static void vgonel(struct vnode *); static void vfs_knllock(void *arg); static void vfs_knlunlock(void *arg); @@ -408,7 +406,7 @@ sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) buf[req->newlen] = '\0'; - ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | NOCACHE | SAVENAME; + ndflags = LOCKLEAF | NOFOLLOW | AUDITVNODE1 | SAVENAME; NDINIT(&nd, LOOKUP, ndflags, UIO_SYSSPACE, buf, curthread); if ((error = namei(&nd)) != 0) goto out; @@ -2793,59 +2791,6 @@ v_init_counters(struct vnode *vp) refcount_init(&vp->v_usecount, 1); } -/* - * Increment si_usecount of the associated device, if any. - */ -static void -v_incr_devcount(struct vnode *vp) -{ - - ASSERT_VI_LOCKED(vp, __FUNCTION__); - if (vp->v_type == VCHR && vp->v_rdev != NULL) { - dev_lock(); - vp->v_rdev->si_usecount++; - dev_unlock(); - } -} - -/* - * Decrement si_usecount of the associated device, if any. - * - * The caller is required to hold the interlock when transitioning a VCHR use - * count to zero. This prevents a race with devfs_reclaim_vchr() that would - * leak a si_usecount reference. The vnode lock will also prevent this race - * if it is held while dropping the last ref. - * - * The race is: - * - * CPU1 CPU2 - * devfs_reclaim_vchr - * make v_usecount == 0 - * VI_LOCK - * sees v_usecount == 0, no updates - * vp->v_rdev = NULL; - * ... - * VI_UNLOCK - * VI_LOCK - * v_decr_devcount - * sees v_rdev == NULL, no updates - * - * In this scenario si_devcount decrement is not performed. - */ -static void -v_decr_devcount(struct vnode *vp) -{ - - ASSERT_VOP_LOCKED(vp, __func__); - ASSERT_VI_LOCKED(vp, __FUNCTION__); - if (vp->v_type == VCHR && vp->v_rdev != NULL) { - dev_lock(); - VNPASS(vp->v_rdev->si_usecount > 0, vp); - vp->v_rdev->si_usecount--; - dev_unlock(); - } -} - /* * Grab a particular vnode from the free list, increment its * reference count and lock it. VIRF_DOOMED is set if the vnode @@ -2921,41 +2866,6 @@ vget(struct vnode *vp, int flags, struct thread *td) return (vget_finish(vp, flags, vs)); } -static void __noinline -vget_finish_vchr(struct vnode *vp) -{ - - VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); - - /* - * See the comment in vget_finish before usecount bump. - */ - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 0, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - return; - } - - VI_LOCK(vp); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { -#ifdef INVARIANTS - int old = atomic_fetchadd_int(&vp->v_holdcnt, -1); - VNASSERT(old > 1, vp, ("%s: wrong hold count %d", __func__, old)); -#else - refcount_release(&vp->v_holdcnt); -#endif - VI_UNLOCK(vp); - return; - } - v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); - VI_UNLOCK(vp); -} - int vget_finish(struct vnode *vp, int flags, enum vgetstate vs) { @@ -2993,11 +2903,6 @@ vget_finish_ref(struct vnode *vp, enum vgetstate vs) if (vs == VGET_USECOUNT) return; - if (__predict_false(vp->v_type == VCHR)) { - vget_finish_vchr(vp); - return; - } - /* * We hold the vnode. If the usecount is 0 it will be utilized to keep * the vnode around. Otherwise someone else lended their hold count and @@ -3019,61 +2924,12 @@ vget_finish_ref(struct vnode *vp, enum vgetstate vs) * Increase the reference (use) and hold count of a vnode. * This will also remove the vnode from the free list if it is presently free. */ -static void __noinline -vref_vchr(struct vnode *vp, bool interlock) -{ - - /* - * See the comment in vget_finish before usecount bump. - */ - if (!interlock) { - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { - VNODE_REFCOUNT_FENCE_ACQ(); - VNASSERT(vp->v_holdcnt > 0, vp, - ("%s: active vnode not held", __func__)); - return; - } - VI_LOCK(vp); - /* - * By the time we get here the vnode might have been doomed, at - * which point the 0->1 use count transition is no longer - * protected by the interlock. Since it can't bounce back to - * VCHR and requires vref semantics, punt it back - */ - if (__predict_false(vp->v_type == VBAD)) { - VI_UNLOCK(vp); - vref(vp); - return; - } - } - VNASSERT(vp->v_type == VCHR, vp, ("type != VCHR)")); - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { - VNODE_REFCOUNT_FENCE_ACQ(); - VNASSERT(vp->v_holdcnt > 0, vp, - ("%s: active vnode not held", __func__)); - if (!interlock) - VI_UNLOCK(vp); - return; - } - vhold(vp); - v_incr_devcount(vp); - refcount_acquire(&vp->v_usecount); - if (!interlock) - VI_UNLOCK(vp); - return; -} - void vref(struct vnode *vp) { int old; CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false(vp->v_type == VCHR)) { - vref_vchr(vp, false); - return; - } - if (refcount_acquire_if_not_zero(&vp->v_usecount)) { VNODE_REFCOUNT_FENCE_ACQ(); VNASSERT(vp->v_holdcnt > 0, vp, @@ -3102,10 +2958,6 @@ vrefl(struct vnode *vp) ASSERT_VI_LOCKED(vp, __func__); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false(vp->v_type == VCHR)) { - vref_vchr(vp, true); - return; - } vref(vp); } @@ -3246,9 +3098,6 @@ enum vput_op { VRELE, VPUT, VUNREF }; * By releasing the last usecount we take ownership of the hold count which * provides liveness of the vnode, meaning we have to vdrop. * - * If the vnode is of type VCHR we may need to decrement si_usecount, see - * v_decr_devcount for details. - * * For all vnodes we may need to perform inactive processing. It requires an * exclusive lock on the vnode, while it is legal to call here with only a * shared lock (or no locks). If locking the vnode in an expected manner fails, @@ -3269,8 +3118,6 @@ vput_final(struct vnode *vp, enum vput_op func) VNPASS(vp->v_holdcnt > 0, vp); VI_LOCK(vp); - if (__predict_false(vp->v_type == VCHR && func != VRELE)) - v_decr_devcount(vp); /* * By the time we got here someone else might have transitioned @@ -3358,28 +3205,9 @@ vput_final(struct vnode *vp, enum vput_op func) * Releasing the last use count requires additional processing, see vput_final * above for details. * - * Note that releasing use count without the vnode lock requires special casing - * for VCHR, see v_decr_devcount for details. - * * Comment above each variant denotes lock state on entry and exit. */ -static void __noinline -vrele_vchr(struct vnode *vp) -{ - - if (refcount_release_if_not_last(&vp->v_usecount)) - return; - VI_LOCK(vp); - if (!refcount_release(&vp->v_usecount)) { - VI_UNLOCK(vp); - return; - } - v_decr_devcount(vp); - VI_UNLOCK(vp); - vput_final(vp, VRELE); -} - /* * in: any * out: same as passed in @@ -3389,10 +3217,6 @@ vrele(struct vnode *vp) { ASSERT_VI_UNLOCKED(vp, __func__); - if (__predict_false(vp->v_type == VCHR)) { - vrele_vchr(vp); - return; - } if (!refcount_release(&vp->v_usecount)) return; vput_final(vp, VRELE); @@ -4143,20 +3967,6 @@ vgonel(struct vnode *vp) vp->v_type = VBAD; } -/* - * Calculate the total number of references to a special device. - */ -int -vcount(struct vnode *vp) -{ - int count; - - dev_lock(); - count = vp->v_rdev->si_usecount; - dev_unlock(); - return (count); -} - /* * Print out a description of a vnode. */ diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 69a6be79820..19618a3d765 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -87,6 +87,8 @@ __FBSDID("$FreeBSD$"); #include #include +#include + #include MALLOC_DEFINE(M_FADVISE, "fadvise", "posix_fadvise(2) information"); @@ -4220,7 +4222,7 @@ sys_revoke(struct thread *td, struct revoke_args *uap) if (error != 0) goto out; } - if (vp->v_usecount > 1 || vcount(vp) > 1) + if (devfs_usecount(vp) > 0) VOP_REVOKE(vp, REVOKEALL); out: vput(vp); diff --git a/sys/kern/vnode_if.src b/sys/kern/vnode_if.src index 10bca613606..07e20e6f81c 100644 --- a/sys/kern/vnode_if.src +++ b/sys/kern/vnode_if.src @@ -153,7 +153,6 @@ vop_close { vop_fplookup_vexec { IN struct vnode *vp; IN struct ucred *cred; - IN struct thread *td; }; diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index 87c01a96206..2e21833c035 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -676,7 +676,6 @@ int vaccess_acl_posix1e(enum vtype type, uid_t file_uid, gid_t file_gid, struct acl *acl, accmode_t accmode, struct ucred *cred); void vattr_null(struct vattr *vap); -int vcount(struct vnode *vp); void vlazy(struct vnode *); void vdrop(struct vnode *); void vdropl(struct vnode *);