diff --git a/share/man/man9/vgone.9 b/share/man/man9/vgone.9 index dc10cda76e76..e915a703dd61 100644 --- a/share/man/man9/vgone.9 +++ b/share/man/man9/vgone.9 @@ -47,7 +47,7 @@ the removal from its mount point vnode list. If the vnode has a .Va v_usecount of zero, and its -.Dv VI_DOOMED +.Dv VIRF_DOOMED flag is not set, it is moved to the head of the free list as in most cases the vnode is about to be reused, or its file system is being unmounted. diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c index cbe25c07f14c..eeddf1735bce 100644 --- a/sys/cam/ctl/ctl_backend_block.c +++ b/sys/cam/ctl/ctl_backend_block.c @@ -843,7 +843,7 @@ ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) val = vattr.va_bytes / be_lun->cbe_lun.blocksize; } if (strcmp(attrname, "blocksavail") == 0 && - (be_lun->vn->v_iflag & VI_DOOMED) == 0) { + !VN_IS_DOOMED(be_lun->vn)) { error = VFS_STATFS(be_lun->vn->v_mount, &statfs); if (error == 0) val = statfs.f_bavail * statfs.f_bsize / diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c index dd1b868cac7a..42d299157dcc 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c @@ -605,7 +605,7 @@ zfsctl_relock_dot(vnode_t *dvp, int ltype) vn_lock(dvp, LK_DOWNGRADE | LK_RETRY); /* Relock for the "." case may left us with reclaimed vnode. */ - if ((dvp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dvp)) { vrele(dvp); return (SET_ERROR(ENOENT)); } diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c index 8eeeaffcc378..018945e0b8c7 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c @@ -1413,7 +1413,7 @@ zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags) * Relock for the "." case could leave us with * reclaimed vnode. */ - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { vrele(dvp); return (SET_ERROR(ENOENT)); } @@ -5913,7 +5913,7 @@ zfs_vptocnp(struct vop_vptocnp_args *ap) vput(covered_vp); } vn_lock(vp, ltype | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) error = SET_ERROR(ENOENT); return (error); } @@ -5932,11 +5932,11 @@ zfs_lock(ap) znode_t *zp; int err; - err = vop_stdlock(ap); + err = vop_lock(ap); if (err == 0 && (ap->a_flags & LK_NOWAIT) == 0) { vp = ap->a_vp; zp = vp->v_data; - if (vp->v_mount != NULL && (vp->v_iflag & VI_DOOMED) == 0 && + if (vp->v_mount != NULL && !VN_IS_DOOMED(vp) && zp != NULL && (zp->z_pflags & ZFS_XATTR) == 0) VERIFY(!RRM_LOCK_HELD(&zp->z_zfsvfs->z_teardown_lock)); } @@ -5989,7 +5989,11 @@ struct vop_vector zfs_vnodeops = { .vop_vptocnp = zfs_vptocnp, #ifdef DIAGNOSTIC .vop_lock1 = zfs_lock, +#else + .vop_lock1 = vop_lock, #endif + .vop_unlock = vop_unlock, + .vop_islocked = vop_islocked, }; struct vop_vector zfs_fifoops = { diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c index c9e88d345974..c3dcd3d13d58 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c @@ -1216,8 +1216,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) locked = VOP_ISLOCKED(vp); VI_LOCK(vp); - if ((vp->v_iflag & VI_DOOMED) != 0 && - locked != LK_EXCLUSIVE) { + if (VN_IS_DOOMED(vp) && locked != LK_EXCLUSIVE) { /* * The vnode is doomed and this thread doesn't * hold the exclusive lock on it, so the vnode diff --git a/sys/dev/beri/virtio/virtio_block.c b/sys/dev/beri/virtio/virtio_block.c index 7c8a03966a39..11dc4a82439d 100644 --- a/sys/dev/beri/virtio/virtio_block.c +++ b/sys/dev/beri/virtio/virtio_block.c @@ -258,7 +258,7 @@ open_file(struct beri_vtblk_softc *sc, struct thread *td) if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); - if (nd.ni_vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(nd.ni_vp)) { return (1); } } diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index 2a7a8395761b..fa122a5cd8e6 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -1453,7 +1453,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) goto bad; if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); - if (nd.ni_vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(nd.ni_vp)) { /* Forced unmount. */ error = EBADF; goto bad; diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c index ffbb4a25262f..49677862d930 100644 --- a/sys/dev/xen/blkback/blkback.c +++ b/sys/dev/xen/blkback/blkback.c @@ -2627,7 +2627,7 @@ xbb_open_file(struct xbb_softc *xbb) */ if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); - if (xbb->vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(xbb->vn)) { error = EBADF; xenbus_dev_fatal(xbb->dev, error, "error locking file %s", diff --git a/sys/fs/autofs/autofs_vnops.c b/sys/fs/autofs/autofs_vnops.c index 09d77b9fccf8..be9f559ce1fd 100644 --- a/sys/fs/autofs/autofs_vnops.c +++ b/sys/fs/autofs/autofs_vnops.c @@ -169,8 +169,8 @@ autofs_trigger_vn(struct vnode *vp, const char *path, int pathlen, sx_xunlock(&autofs_softc->sc_lock); vn_lock(vp, lock_flags | LK_RETRY); vunref(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) { - AUTOFS_DEBUG("VI_DOOMED"); + if (VN_IS_DOOMED(vp)) { + AUTOFS_DEBUG("VIRF_DOOMED"); return (ENOENT); } @@ -661,7 +661,7 @@ autofs_node_vn(struct autofs_node *anp, struct mount *mp, int flags, sx_xunlock(&anp->an_vnode_lock); return (error); } - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { /* * We got forcibly unmounted. */ diff --git a/sys/fs/cd9660/cd9660_vnops.c b/sys/fs/cd9660/cd9660_vnops.c index a923bd2da551..b1bf2ce42344 100644 --- a/sys/fs/cd9660/cd9660_vnops.c +++ b/sys/fs/cd9660/cd9660_vnops.c @@ -260,7 +260,7 @@ cd9660_ioctl(ap) vp = ap->a_vp; vn_lock(vp, LK_SHARED | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); return (EBADF); } diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c index eaff5f9a7df1..bb2832682b7c 100644 --- a/sys/fs/devfs/devfs_vnops.c +++ b/sys/fs/devfs/devfs_vnops.c @@ -252,7 +252,7 @@ devfs_populate_vp(struct vnode *vp) devfs_unmount_final(dmp); return (ERESTART); } - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { sx_xunlock(&dmp->dm_lock); return (ERESTART); } @@ -441,7 +441,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode, vput(vp); return (ENOENT); } - else if ((vp->v_iflag & VI_DOOMED) != 0) { + else if (VN_IS_DOOMED(vp)) { mtx_lock(&devfs_de_interlock); if (de->de_vnode == vp) { de->de_vnode = NULL; @@ -592,7 +592,7 @@ devfs_close(struct vop_close_args *ap) SESS_LOCK(p->p_session); VI_LOCK(vp); if (vp->v_usecount == 2 && vcount(vp) == 1 && - (vp->v_iflag & VI_DOOMED) == 0) { + !VN_IS_DOOMED(vp)) { p->p_session->s_ttyvp = NULL; p->p_session->s_ttydp = NULL; oldvp = vp; @@ -622,7 +622,7 @@ devfs_close(struct vop_close_args *ap) VI_LOCK(vp); if (vp->v_usecount == 1 && vcount(vp) == 1) dflags |= FLASTCLOSE; - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { /* Forced close. */ dflags |= FREVOKE | FNONBLOCK; } else if (dsw->d_flags & D_TRACKCLOSE) { @@ -1562,7 +1562,7 @@ devfs_rioctl(struct vop_ioctl_args *ap) vp = ap->a_vp; vn_lock(vp, LK_SHARED | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); return (EBADF); } diff --git a/sys/fs/ext2fs/ext2_lookup.c b/sys/fs/ext2fs/ext2_lookup.c index c41c70e9020e..bcd9bb37d22f 100644 --- a/sys/fs/ext2fs/ext2_lookup.c +++ b/sys/fs/ext2fs/ext2_lookup.c @@ -665,7 +665,7 @@ ext2_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp pdp = vdp; if (flags & ISDOTDOT) { error = vn_vget_ino(pdp, ino, cnp->cn_lkflags, &tdp); - if (pdp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(pdp)) { if (error == 0) vput(tdp); error = ENOENT; diff --git a/sys/fs/fdescfs/fdesc_vnops.c b/sys/fs/fdescfs/fdesc_vnops.c index 127bdccd8c40..9b319fbc8f17 100644 --- a/sys/fs/fdescfs/fdesc_vnops.c +++ b/sys/fs/fdescfs/fdesc_vnops.c @@ -347,7 +347,7 @@ fdesc_lookup(struct vop_lookup_args *ap) vn_lock(dvp, LK_RETRY | LK_EXCLUSIVE); vdrop(dvp); fvp = dvp; - if ((dvp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(dvp)) error = ENOENT; } else { /* diff --git a/sys/fs/fuse/fuse_io.c b/sys/fs/fuse/fuse_io.c index f1b1e0c66ffc..6f4644af280e 100644 --- a/sys/fs/fuse/fuse_io.c +++ b/sys/fs/fuse/fuse_io.c @@ -1116,7 +1116,7 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td) struct fuse_vnode_data *fvdat = VTOFUD(vp); int error = 0; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return 0; ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf"); diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c index 04bb167b1bff..9e97540612af 100644 --- a/sys/fs/nfsclient/nfs_clport.c +++ b/sys/fs/nfsclient/nfs_clport.c @@ -149,13 +149,13 @@ nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp, * get called on this vnode between when NFSVOPLOCK() drops * the VI_LOCK() and vget() acquires it again, so that it * hasn't yet had v_usecount incremented. If this were to - * happen, the VI_DOOMED flag would be set, so check for + * happen, the VIRF_DOOMED flag would be set, so check for * that here. Since we now have the v_usecount incremented, - * we should be ok until we vrele() it, if the VI_DOOMED + * we should be ok until we vrele() it, if the VIRF_DOOMED * flag isn't set now. */ VI_LOCK(nvp); - if ((nvp->v_iflag & VI_DOOMED)) { + if (VN_IS_DOOMED(nvp)) { VI_UNLOCK(nvp); vrele(nvp); error = ENOENT; @@ -350,7 +350,7 @@ nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize, vfs_hash_ref(mntp, hash, td, &nvp, newnfs_vncmpf, nfhp); if (nvp == NULL) { error = ENOENT; - } else if ((nvp->v_iflag & VI_DOOMED) != 0) { + } else if (VN_IS_DOOMED(nvp)) { error = ENOENT; vrele(nvp); } else { diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c index f79a25fa3eec..7d721ae8da3e 100644 --- a/sys/fs/nfsclient/nfs_clvnops.c +++ b/sys/fs/nfsclient/nfs_clvnops.c @@ -1235,7 +1235,7 @@ nfs_lookup(struct vop_lookup_args *ap) vrele(newvp); *vpp = NULLVP; } else if (error == ENOENT) { - if (dvp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(dvp)) return (ENOENT); /* * We only accept a negative hit in the cache if the @@ -1340,7 +1340,7 @@ nfs_lookup(struct vop_lookup_args *ap) error = vfs_busy(mp, 0); NFSVOPLOCK(dvp, ltype | LK_RETRY); vfs_rel(mp); - if (error == 0 && (dvp->v_iflag & VI_DOOMED)) { + if (error == 0 && VN_IS_DOOMED(dvp)) { vfs_unbusy(mp); error = ENOENT; } @@ -1355,7 +1355,7 @@ nfs_lookup(struct vop_lookup_args *ap) vfs_unbusy(mp); if (newvp != dvp) NFSVOPLOCK(dvp, ltype | LK_RETRY); - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { if (error == 0) { if (newvp == dvp) vrele(newvp); @@ -3139,7 +3139,7 @@ nfs_advlock(struct vop_advlock_args *ap) else cred = td->td_ucred; NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { error = EBADF; goto out; } @@ -3169,7 +3169,7 @@ nfs_advlock(struct vop_advlock_args *ap) if (error) return (EINTR); NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { error = EBADF; goto out; } diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c index 9dc7da1a74c8..48513080f224 100644 --- a/sys/fs/nfsserver/nfs_nfsdport.c +++ b/sys/fs/nfsserver/nfs_nfsdport.c @@ -1457,7 +1457,7 @@ nfsvno_link(struct nameidata *ndp, struct vnode *vp, struct ucred *cred, } if (!error) { NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) == 0) + if (!VN_IS_DOOMED(vp)) error = VOP_LINK(ndp->ni_dvp, vp, &ndp->ni_cnd); else error = EPERM; @@ -1738,7 +1738,7 @@ nfsvno_open(struct nfsrv_descript *nd, struct nameidata *ndp, * Updates the file rev and sets the mtime and ctime * to the current clock time, returning the va_filerev and va_Xtime * values. - * Return ESTALE to indicate the vnode is VI_DOOMED. + * Return ESTALE to indicate the vnode is VIRF_DOOMED. */ int nfsvno_updfilerev(struct vnode *vp, struct nfsvattr *nvap, @@ -1750,7 +1750,7 @@ nfsvno_updfilerev(struct vnode *vp, struct nfsvattr *nvap, vfs_timestamp(&va.va_mtime); if (NFSVOPISLOCKED(vp) != LK_EXCLUSIVE) { NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) return (ESTALE); } (void) VOP_SETATTR(vp, &va, nd->nd_cred); diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c index 3bfe8f6444fd..830fd23d6f1c 100644 --- a/sys/fs/nfsserver/nfs_nfsdserv.c +++ b/sys/fs/nfsserver/nfs_nfsdserv.c @@ -3024,7 +3024,7 @@ nfsrvd_open(struct nfsrv_descript *nd, __unused int isdgram, } vp = dp; NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) == 0) + if (!VN_IS_DOOMED(vp)) nd->nd_repstat = nfsrv_opencheck(clientid, &stateid, stp, vp, nd, p, nd->nd_repstat); else diff --git a/sys/fs/nfsserver/nfs_nfsdstate.c b/sys/fs/nfsserver/nfs_nfsdstate.c index a509eb2bdb91..ce24aa62d636 100644 --- a/sys/fs/nfsserver/nfs_nfsdstate.c +++ b/sys/fs/nfsserver/nfs_nfsdstate.c @@ -2159,7 +2159,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, NFSUNLOCKSTATE(); NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); vnode_unlocked = 0; - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) ret = NFSERR_SERVERFAULT; NFSLOCKSTATE(); } @@ -2257,7 +2257,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, NFSUNLOCKSTATE(); NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); vnode_unlocked = 0; - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { error = NFSERR_SERVERFAULT; goto out; } @@ -2379,7 +2379,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, } if (vnode_unlocked != 0) { NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) + if (error == 0 && VN_IS_DOOMED(vp)) error = NFSERR_SERVERFAULT; } if (other_lop) @@ -5133,7 +5133,7 @@ nfsrv_checkstable(struct nfsclient *clp) * Return 0 to indicate the conflict can't be revoked and 1 to indicate * the revocation worked and the conflicting client is "bye, bye", so it * can be tried again. - * Return 2 to indicate that the vnode is VI_DOOMED after NFSVOPLOCK(). + * Return 2 to indicate that the vnode is VIRF_DOOMED after NFSVOPLOCK(). * Unlocks State before a non-zero value is returned. */ static int @@ -5164,7 +5164,7 @@ nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, vnode_t vp, *haslockp = 1; if (vp != NULL) { NFSVOPLOCK(vp, lktype | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) return (2); } return (1); @@ -5339,7 +5339,7 @@ nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, NFSPROC_T *p, *haslockp = 1; if (vp != NULL) { NFSVOPLOCK(vp, lktype | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { *haslockp = 0; NFSLOCKV4ROOTMUTEX(); nfsv4_unlock(&nfsv4rootfs_lock, 1); @@ -8313,7 +8313,7 @@ nfsrv_copymr(vnode_t vp, vnode_t fvp, vnode_t dvp, struct nfsdevice *ds, * changed until the copy is complete. */ NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if (ret == 0 && (vp->v_iflag & VI_DOOMED) != 0) { + if (ret == 0 && VN_IS_DOOMED(vp)) { NFSD_DEBUG(4, "nfsrv_copymr: lk_exclusive doomed\n"); ret = ESTALE; } diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c index cf7aed525845..a6f31b976d92 100644 --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -225,7 +225,7 @@ null_nodeget(mp, lowervp, vpp) */ if (VOP_ISLOCKED(lowervp) != LK_EXCLUSIVE) { vn_lock(lowervp, LK_UPGRADE | LK_RETRY); - if ((lowervp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(lowervp)) { vput(lowervp); return (ENOENT); } diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c index e2ef7e874d9f..352fa84c35b4 100644 --- a/sys/fs/nullfs/null_vfsops.c +++ b/sys/fs/nullfs/null_vfsops.c @@ -442,7 +442,7 @@ nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp) * extra unlock before allowing the final vdrop() to * free the vnode. */ - KASSERT((vp->v_iflag & VI_DOOMED) != 0, + KASSERT(VN_IS_DOOMED(vp), ("not reclaimed nullfs vnode %p", vp)); VOP_UNLOCK(vp, 0); } else { @@ -453,7 +453,7 @@ nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp) * relevant for future reclamations. */ ASSERT_VOP_ELOCKED(vp, "unlink_lowervp"); - KASSERT((vp->v_iflag & VI_DOOMED) == 0, + KASSERT(!VN_IS_DOOMED(vp), ("reclaimed nullfs vnode %p", vp)); xp->null_flags &= ~NULLV_NOUNLOCK; } diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index 7b01da41b15e..03b16ccc17b0 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -396,7 +396,7 @@ null_lookup(struct vop_lookup_args *ap) * doomed state and return error. */ if ((error == 0 || error == EJUSTRETURN) && - (dvp->v_iflag & VI_DOOMED) != 0) { + VN_IS_DOOMED(dvp)) { error = ENOENT; if (lvp != NULL) vput(lvp); diff --git a/sys/fs/pseudofs/pseudofs_vnops.c b/sys/fs/pseudofs/pseudofs_vnops.c index ce15547e44f5..0a3c9e967706 100644 --- a/sys/fs/pseudofs/pseudofs_vnops.c +++ b/sys/fs/pseudofs/pseudofs_vnops.c @@ -290,7 +290,7 @@ pfs_ioctl(struct vop_ioctl_args *va) vn = va->a_vp; vn_lock(vn, LK_SHARED | LK_RETRY); - if (vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vn)) { VOP_UNLOCK(vn, 0); return (EBADF); } @@ -512,7 +512,7 @@ pfs_lookup(struct vop_cachedlookup_args *va) vfs_rel(mp); if (error != 0) PFS_RETURN(ENOENT); - if (vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vn)) { vfs_unbusy(mp); PFS_RETURN(ENOENT); } @@ -581,13 +581,13 @@ pfs_lookup(struct vop_cachedlookup_args *va) if (cnp->cn_flags & ISDOTDOT) { vfs_unbusy(mp); vn_lock(vn, LK_EXCLUSIVE | LK_RETRY); - if (vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vn)) { vput(*vpp); *vpp = NULL; PFS_RETURN(ENOENT); } } - if (cnp->cn_flags & MAKEENTRY && !(vn->v_iflag & VI_DOOMED)) + if (cnp->cn_flags & MAKEENTRY && !VN_IS_DOOMED(vn)) cache_enter(vn, *vpp, cnp); PFS_RETURN (0); failed: diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c index ab1eb0574c1b..4e84e571e93a 100644 --- a/sys/fs/smbfs/smbfs_io.c +++ b/sys/fs/smbfs/smbfs_io.c @@ -637,7 +637,7 @@ smbfs_vinvalbuf(struct vnode *vp, struct thread *td) struct smbnode *np = VTOSMB(vp); int error = 0; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return 0; while (np->n_flag & NFLUSHINPROG) { diff --git a/sys/fs/smbfs/smbfs_vnops.c b/sys/fs/smbfs/smbfs_vnops.c index 456d501d225e..9d94c14bef6b 100644 --- a/sys/fs/smbfs/smbfs_vnops.c +++ b/sys/fs/smbfs/smbfs_vnops.c @@ -1345,7 +1345,7 @@ smbfs_lookup(ap) error = ENOENT; goto out; } - if ((dvp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dvp)) { vfs_unbusy(mp); error = ENOENT; goto out; @@ -1355,7 +1355,7 @@ smbfs_lookup(ap) error = smbfs_nget(mp, dvp, name, nmlen, NULL, &vp); vfs_unbusy(mp); vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); - if ((dvp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dvp)) { if (error == 0) vput(vp); error = ENOENT; diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index 94b518166898..75fec2d088e5 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -598,15 +598,15 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag, MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0); VI_LOCK(vp); if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) || - ((vp->v_iflag & VI_DOOMED) != 0 && - (lkflag & LK_NOWAIT) != 0)) { + (VN_IS_DOOMED(vp) && + (lkflag & LK_NOWAIT) != 0)) { VI_UNLOCK(vp); TMPFS_NODE_UNLOCK(node); error = ENOENT; vp = NULL; goto out; } - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); node->tn_vpstate |= TMPFS_VNODE_WRECLAIM; while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) { @@ -671,7 +671,7 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag, MPASS(vp != NULL); /* lkflag is ignored, the lock is exclusive */ - (void) vn_lock(vp, lkflag | LK_RETRY); + (void) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vp->v_data = node; vp->v_type = node->tn_type; diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c index b1dbdf02bab2..637b2f6bf178 100644 --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -1578,7 +1578,7 @@ tmpfs_vptocnp(struct vop_vptocnp_args *ap) tmpfs_free_node(tm, tnp); return (0); } - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { tmpfs_free_node(tm, tnp); return (ENOENT); } @@ -1632,6 +1632,9 @@ struct vop_vector tmpfs_vnodeop_entries = { .vop_whiteout = tmpfs_whiteout, .vop_bmap = VOP_EOPNOTSUPP, .vop_vptocnp = tmpfs_vptocnp, + .vop_lock1 = vop_lock, + .vop_unlock = vop_unlock, + .vop_islocked = vop_islocked, }; /* diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c index faa95ea49041..85b8db5709ac 100644 --- a/sys/fs/unionfs/union_subr.c +++ b/sys/fs/unionfs/union_subr.c @@ -127,7 +127,8 @@ unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp, VI_LOCK_FLAGS(vp, MTX_DUPOK); VI_UNLOCK(dvp); vp->v_iflag &= ~VI_OWEINACT; - if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) { + if (VN_IS_DOOMED(vp) || + ((vp->v_iflag & VI_DOINGINACT) != 0)) { VI_UNLOCK(vp); vp = NULLVP; } else @@ -163,7 +164,8 @@ unionfs_ins_cached_vnode(struct unionfs_node *uncp, vp = UNIONFSTOV(unp); VI_LOCK_FLAGS(vp, MTX_DUPOK); vp->v_iflag &= ~VI_OWEINACT; - if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) { + if (VN_IS_DOOMED(vp) || + ((vp->v_iflag & VI_DOINGINACT) != 0)) { LIST_INSERT_HEAD(hd, uncp, un_hash); VI_UNLOCK(vp); vp = NULLVP; diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 55962841e6ea..2285569988d0 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -1156,6 +1156,86 @@ lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk) return (0); } +/* + * Leightweight entry points for common operations. + * + * Functionality is similar to sx locks, that is none of the additional lockmgr + * features are supported (including shared locking disablement, returning with + * an error after sleep or unlocking the interlock). + * + * If in doubt, use lockmgr_*_fast_path. + */ +int +lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line) +{ + uintptr_t x; + + MPASS((flags & LK_TYPE_MASK) == LK_SHARED); + MPASS((flags & LK_INTERLOCK) == 0); + MPASS((lk->lock_object.lo_flags & LK_NOSHARE) == 0); + + if (LK_CAN_WITNESS(flags)) + WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, + file, line, NULL); + if (__predict_true(lockmgr_slock_try(lk, &x, flags, true))) { + lockmgr_note_shared_acquire(lk, 0, 0, file, line, flags); + return (0); + } + + return (lockmgr_slock_hard(lk, flags, NULL, file, line, NULL)); +} + +int +lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line) +{ + uintptr_t tid; + + MPASS((flags & LK_TYPE_MASK) == LK_EXCLUSIVE); + MPASS((flags & LK_INTERLOCK) == 0); + + if (LK_CAN_WITNESS(flags)) + WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | + LOP_EXCLUSIVE, file, line, NULL); + tid = (uintptr_t)curthread; + if (atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { + lockmgr_note_exclusive_acquire(lk, 0, 0, file, line, + flags); + return (0); + } + + return (lockmgr_xlock_hard(lk, flags, NULL, file, line, NULL)); +} + +int +lockmgr_unlock(struct lock *lk) +{ + uintptr_t x, tid; + const char *file; + int line; + + file = __FILE__; + line = __LINE__; + + _lockmgr_assert(lk, KA_LOCKED, file, line); + x = lk->lk_lock; + if (__predict_true(x & LK_SHARE) != 0) { + if (lockmgr_sunlock_try(lk, &x)) { + lockmgr_note_shared_release(lk, file, line); + } else { + return (lockmgr_sunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); + } + } else { + tid = (uintptr_t)curthread; + if (!lockmgr_recursed(lk) && + atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) { + lockmgr_note_exclusive_release(lk, file, line); + } else { + return (lockmgr_xunlock_hard(lk, x, LK_RELEASE, NULL, file, line)); + } + } + return (0); +} + int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, const char *wmesg, int pri, int timo, const char *file, int line) diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c index 6fb8692d4861..57c816346b24 100644 --- a/sys/kern/kern_lockf.c +++ b/sys/kern/kern_lockf.c @@ -592,7 +592,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, * the vnode interlock. */ VI_LOCK(vp); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); lf_free_lock(lock); return (ENOENT); @@ -622,7 +622,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, * trying to allocate memory. */ VI_LOCK(vp); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); sx_xlock(&lf_lock_states_lock); LIST_REMOVE(ls, ls_link); @@ -655,10 +655,10 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, /* * Recheck the doomed vnode after state->ls_lock is * locked. lf_purgelocks() requires that no new threads add - * pending locks when vnode is marked by VI_DOOMED flag. + * pending locks when vnode is marked by VIRF_DOOMED flag. */ VI_LOCK(vp); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { state->ls_threads--; wakeup(state); VI_UNLOCK(vp); @@ -771,12 +771,12 @@ lf_purgelocks(struct vnode *vp, struct lockf **statep) /* * For this to work correctly, the caller must ensure that no * other threads enter the locking system for this vnode, - * e.g. by checking VI_DOOMED. We wake up any threads that are + * e.g. by checking VIRF_DOOMED. We wake up any threads that are * sleeping waiting for locks on this vnode and then free all * the remaining locks. */ VI_LOCK(vp); - KASSERT(vp->v_iflag & VI_DOOMED, + KASSERT(VN_IS_DOOMED(vp), ("lf_purgelocks: vp %p has not vgone yet", vp)); state = *statep; if (state == NULL) { diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index e7139a6dab50..420dd1255e98 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -518,7 +518,7 @@ kern_reroot(void) VOP_UNLOCK(vp, 0); return (ENOENT); } - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); vfs_unbusy(mp); return (ENOENT); diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 4ab510fc7bb1..d8e893db4b29 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -1250,9 +1250,9 @@ tty_drop_ctty(struct tty *tp, struct proc *p) * If we did have a vnode, release our reference. Ordinarily we manage * these at the devfs layer, but we can't necessarily know that we were * invoked on the vnode referenced in the session (i.e. the vnode we - * hold a reference to). We explicitly don't check VBAD/VI_DOOMED here + * hold a reference to). We explicitly don't check VBAD/VIRF_DOOMED here * to avoid a vnode leak -- in circumstances elsewhere where we'd hit a - * VI_DOOMED vnode, release has been deferred until the controlling TTY + * VIRF_DOOMED vnode, release has been deferred until the controlling TTY * is either changed or released. */ if (vp != NULL) diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 6787d4d4af81..f5335fb14651 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -1158,7 +1158,7 @@ cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cn if (ltype != VOP_ISLOCKED(*vpp)) { if (ltype == LK_EXCLUSIVE) { vn_lock(*vpp, LK_UPGRADE | LK_RETRY); - if ((*vpp)->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED((*vpp))) { /* forced unmount */ vrele(*vpp); *vpp = NULL; @@ -1401,7 +1401,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, error = vget_finish(*vpp, cnp->cn_lkflags, vs); if (cnp->cn_flags & ISDOTDOT) { vn_lock(dvp, ltype | LK_RETRY); - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { if (error == 0) vput(*vpp); *vpp = NULL; @@ -1706,9 +1706,9 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, u_long lnumcache; CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); - VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, + VNASSERT(vp == NULL || !VN_IS_DOOMED(vp), vp, ("cache_enter: Adding a doomed vnode")); - VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, + VNASSERT(dvp == NULL || !VN_IS_DOOMED(dvp), dvp, ("cache_enter: Doomed vnode used as src")); #ifdef DEBUG_CACHE @@ -2365,7 +2365,7 @@ vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) } *vp = dvp; - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { /* forced unmount */ vrele(dvp); error = ENOENT; @@ -2429,7 +2429,7 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, * mnt_vnodecovered can be NULL only for the * case of unmount. */ - if ((vp->v_iflag & VI_DOOMED) != 0 || + if (VN_IS_DOOMED(vp) || (vp1 = vp->v_mount->mnt_vnodecovered) == NULL || vp1->v_mountedhere != vp->v_mount) { vput(vp); diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index 8aee8abfbe7e..e98f0847a253 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -544,6 +544,71 @@ vop_stdislocked(ap) return (lockstatus(ap->a_vp->v_vnlock)); } +/* + * Variants of the above set. + * + * Differences are: + * - shared locking disablement is not supported + * - v_vnlock pointer is not honored + */ +int +vop_lock(ap) + struct vop_lock1_args /* { + struct vnode *a_vp; + int a_flags; + char *file; + int line; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + int flags = ap->a_flags; + struct mtx *ilk; + + MPASS(vp->v_vnlock == &vp->v_lock); + + if (__predict_false((flags & ~(LK_TYPE_MASK | LK_NODDLKTREAT | LK_RETRY)) != 0)) + goto other; + + switch (flags & LK_TYPE_MASK) { + case LK_SHARED: + return (lockmgr_slock(&vp->v_lock, flags, ap->a_file, ap->a_line)); + case LK_EXCLUSIVE: + return (lockmgr_xlock(&vp->v_lock, flags, ap->a_file, ap->a_line)); + } +other: + ilk = VI_MTX(vp); + return (lockmgr_lock_fast_path(&vp->v_lock, flags, + &ilk->lock_object, ap->a_file, ap->a_line)); +} + +int +vop_unlock(ap) + struct vop_unlock_args /* { + struct vnode *a_vp; + int a_flags; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + MPASS(vp->v_vnlock == &vp->v_lock); + MPASS(ap->a_flags == 0); + + return (lockmgr_unlock(&vp->v_lock)); +} + +int +vop_islocked(ap) + struct vop_islocked_args /* { + struct vnode *a_vp; + } */ *ap; +{ + struct vnode *vp = ap->a_vp; + + MPASS(vp->v_vnlock == &vp->v_lock); + + return (lockstatus(&vp->v_lock)); +} + /* * Return true for select/poll. */ @@ -594,7 +659,7 @@ vop_stdgetwritemount(ap) * Note that having a reference does not prevent forced unmount from * setting ->v_mount to NULL after the lock gets released. This is of * no consequence for typical consumers (most notably vn_start_write) - * since in this case the vnode is VI_DOOMED. Unmount might have + * since in this case the vnode is VIRF_DOOMED. Unmount might have * progressed far enough that its completion is only delayed by the * reference obtained here. The consumer only needs to concern itself * with releasing it. @@ -1019,7 +1084,7 @@ vop_stdadvise(struct vop_advise_args *ap) case POSIX_FADV_DONTNEED: error = 0; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); break; } diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index b127c9daccd3..10941014e031 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -867,7 +867,7 @@ lookup(struct nameidata *ndp) } if ((dp->v_vflag & VV_ROOT) == 0) break; - if (dp->v_iflag & VI_DOOMED) { /* forced unmount */ + if (VN_IS_DOOMED(dp)) { /* forced unmount */ error = ENOENT; goto bad; } @@ -911,7 +911,7 @@ lookup(struct nameidata *ndp) if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN) && dp != vp_crossmp && VOP_ISLOCKED(dp) == LK_SHARED) vn_lock(dp, LK_UPGRADE|LK_RETRY); - if ((dp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dp)) { error = ENOENT; goto bad; } @@ -1028,7 +1028,7 @@ lookup(struct nameidata *ndp) ((cnp->cn_flags & FOLLOW) || (cnp->cn_flags & TRAILINGSLASH) || *ndp->ni_next == '/')) { cnp->cn_flags |= ISSYMLINK; - if (dp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dp)) { /* * We can't know whether the directory was mounted with * NOSYMFOLLOW, so we can't follow safely. @@ -1135,7 +1135,7 @@ lookup(struct nameidata *ndp) if (needs_exclusive_leaf(dp->v_mount, cnp->cn_flags) && VOP_ISLOCKED(dp) != LK_EXCLUSIVE) { vn_lock(dp, LK_UPGRADE | LK_RETRY); - if (dp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dp)) { error = ENOENT; goto bad2; } diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index dedbff6bbecd..88818019514b 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -137,7 +137,7 @@ static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, /* * Number of vnodes in existence. Increased whenever getnewvnode() - * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. + * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. */ static unsigned long numvnodes; @@ -367,7 +367,7 @@ sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) goto out; vp = nd.ni_vp; - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { /* * This vnode is being recycled. Return != 0 to let the caller * know that the sysctl had no effect. Return EAGAIN because a @@ -1033,7 +1033,7 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) if (vp->v_usecount || (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || ((vp->v_iflag & VI_FREE) != 0) || - (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && + VN_IS_DOOMED(vp) || (vp->v_object != NULL && vp->v_object->resident_page_count > trigger)) { VI_UNLOCK(vp); goto next_iter; @@ -1049,7 +1049,7 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) * v_usecount may have been bumped after VOP_LOCK() dropped * the vnode interlock and before it was locked again. * - * It is not necessary to recheck VI_DOOMED because it can + * It is not necessary to recheck VIRF_DOOMED because it can * only be set by another thread that holds both the vnode * lock and vnode interlock. If another thread has the * vnode lock before we get to VOP_LOCK() and obtains the @@ -1066,8 +1066,8 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) vdropl(vp); goto next_iter_mntunlocked; } - KASSERT((vp->v_iflag & VI_DOOMED) == 0, - ("VI_DOOMED unexpectedly detected in vlrureclaim()")); + KASSERT(!VN_IS_DOOMED(vp), + ("VIRF_DOOMED unexpectedly detected in vlrureclaim()")); counter_u64_add(recycles_count, 1); vgonel(vp); VOP_UNLOCK(vp, 0); @@ -1436,7 +1436,7 @@ vtryrecycle(struct vnode *vp) __func__, vp); return (EBUSY); } - if ((vp->v_iflag & VI_DOOMED) == 0) { + if (!VN_IS_DOOMED(vp)) { counter_u64_add(recycles_count, 1); vgonel(vp); } @@ -1663,6 +1663,65 @@ getnewvnode(const char *tag, struct mount *mp, struct vop_vector *vops, return (0); } +static void +freevnode(struct vnode *vp) +{ + struct bufobj *bo; + + /* + * The vnode has been marked for destruction, so free it. + * + * The vnode will be returned to the zone where it will + * normally remain until it is needed for another vnode. We + * need to cleanup (or verify that the cleanup has already + * been done) any residual data left from its current use + * so as not to contaminate the freshly allocated vnode. + */ + CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); + atomic_subtract_long(&numvnodes, 1); + bo = &vp->v_bufobj; + VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, + ("cleaned vnode still on the free list.")); + VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); + VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); + VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); + VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); + VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); + VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); + VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, + ("clean blk trie not empty")); + VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); + VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, + ("dirty blk trie not empty")); + VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); + VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); + VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); + VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, + ("Dangling rangelock waiters")); + VI_UNLOCK(vp); +#ifdef MAC + mac_vnode_destroy(vp); +#endif + if (vp->v_pollinfo != NULL) { + destroy_vpollinfo(vp->v_pollinfo); + vp->v_pollinfo = NULL; + } +#ifdef INVARIANTS + /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ + vp->v_op = NULL; +#endif + vp->v_mountedhere = NULL; + vp->v_unpcb = NULL; + vp->v_rdev = NULL; + vp->v_fifoinfo = NULL; + vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; + vp->v_irflag = 0; + vp->v_iflag = 0; + vp->v_vflag = 0; + bo->bo_flag = 0; + uma_zfree(vnode_zone, vp); +} + /* * Delete from old mount point vnode list, if on one. */ @@ -2694,7 +2753,7 @@ v_decr_devcount(struct vnode *vp) /* * Grab a particular vnode from the free list, increment its - * reference count and lock it. VI_DOOMED is set if the vnode + * reference count and lock it. VIRF_DOOMED is set if the vnode * is being destroyed. Only callers who specify LK_RETRY will * see doomed vnodes. If inactive processing was delayed in * vput try to do it here. @@ -2707,7 +2766,7 @@ v_decr_devcount(struct vnode *vp) * usecount is permitted to transition 1->0 without the interlock because * vnode is kept live by holdcnt. */ -static enum vgetstate +static enum vgetstate __always_inline _vget_prep(struct vnode *vp, bool interlock) { enum vgetstate vs; @@ -2715,7 +2774,10 @@ _vget_prep(struct vnode *vp, bool interlock) if (refcount_acquire_if_not_zero(&vp->v_usecount)) { vs = VGET_USECOUNT; } else { - _vhold(vp, interlock); + if (interlock) + vholdl(vp); + else + vhold(vp); vs = VGET_HOLDCNT; } return (vs); @@ -2910,9 +2972,7 @@ vrefcnt(struct vnode *vp) return (vp->v_usecount); } -#define VPUTX_VRELE 1 -#define VPUTX_VPUT 2 -#define VPUTX_VUNREF 3 +enum vputx_variant { VPUTX_VRELE, VPUTX_VPUT, VPUTX_VUNREF }; /* * Decrement the use and hold counts for a vnode. @@ -2920,36 +2980,19 @@ vrefcnt(struct vnode *vp) * See an explanation near vget() as to why atomic operation is safe. */ static void -vputx(struct vnode *vp, int func) +vputx(struct vnode *vp, enum vputx_variant func) { int error; KASSERT(vp != NULL, ("vputx: null vp")); if (func == VPUTX_VUNREF) ASSERT_VOP_LOCKED(vp, "vunref"); - else if (func == VPUTX_VPUT) - ASSERT_VOP_LOCKED(vp, "vput"); - else - KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); ASSERT_VI_UNLOCKED(vp, __func__); VNASSERT(vp->v_holdcnt > 0 && vp->v_usecount > 0, vp, ("%s: wrong ref counts", __func__)); CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - /* - * It is an invariant that all VOP_* calls operate on a held vnode. - * We may be only having an implicit hold stemming from our usecount, - * which we are about to release. If we unlock the vnode afterwards we - * open a time window where someone else dropped the last usecount and - * proceeded to free the vnode before our unlock finished. For this - * reason we unlock the vnode early. This is a little bit wasteful as - * it may be the vnode is exclusively locked and inactive processing is - * needed, in which case we are adding work. - */ - if (func == VPUTX_VPUT) - VOP_UNLOCK(vp, 0); - /* * We want to hold the vnode until the inactive finishes to * prevent vgone() races. We drop the use count here and the @@ -2976,15 +3019,6 @@ vputx(struct vnode *vp, int func) return; } - error = 0; - - if (vp->v_usecount != 0) { - vn_printf(vp, "vputx: usecount not zero for vnode "); - panic("vputx: usecount not zero"); - } - - CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); - /* * Check if the fs wants to perform inactive processing. Note we * may be only holding the interlock, in which case it is possible @@ -2992,7 +3026,7 @@ vputx(struct vnode *vp, int func) * Since vgone performs inactive on its own there is nothing to do * here but to drop our hold count. */ - if (__predict_false(vp->v_iflag & VI_DOOMED) || + if (__predict_false(VN_IS_DOOMED(vp)) || VOP_NEED_INACTIVE(vp) == 0) { vdropl(vp); return; @@ -3013,6 +3047,7 @@ vputx(struct vnode *vp, int func) VI_LOCK(vp); break; case VPUTX_VUNREF: + error = 0; if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { error = VOP_LOCK(vp, LK_TRYUPGRADE | LK_INTERLOCK); VI_LOCK(vp); @@ -3045,11 +3080,21 @@ vrele(struct vnode *vp) * Release an already locked vnode. This give the same effects as * unlock+vrele(), but takes less time and avoids releasing and * re-aquiring the lock (as vrele() acquires the lock internally.) + * + * It is an invariant that all VOP_* calls operate on a held vnode. + * We may be only having an implicit hold stemming from our usecount, + * which we are about to release. If we unlock the vnode afterwards we + * open a time window where someone else dropped the last usecount and + * proceeded to free the vnode before our unlock finished. For this + * reason we unlock the vnode early. This is a little bit wasteful as + * it may be the vnode is exclusively locked and inactive processing is + * needed, in which case we are adding work. */ void vput(struct vnode *vp) { + VOP_UNLOCK(vp, 0); vputx(vp, VPUTX_VPUT); } @@ -3066,31 +3111,12 @@ vunref(struct vnode *vp) /* * Increase the hold count and activate if this is the first reference. */ -void -_vhold(struct vnode *vp, bool locked) +static void +vhold_activate(struct vnode *vp) { struct mount *mp; - if (locked) - ASSERT_VI_LOCKED(vp, __func__); - else - ASSERT_VI_UNLOCKED(vp, __func__); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (!locked) { - if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { - VNODE_REFCOUNT_FENCE_ACQ(); - VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, - ("_vhold: vnode with holdcnt is free")); - return; - } - VI_LOCK(vp); - } - if ((vp->v_iflag & VI_FREE) == 0) { - refcount_acquire(&vp->v_holdcnt); - if (!locked) - VI_UNLOCK(vp); - return; - } + ASSERT_VI_LOCKED(vp, __func__); VNASSERT(vp->v_holdcnt == 0, vp, ("%s: wrong hold count", __func__)); VNASSERT(vp->v_op != NULL, vp, @@ -3100,7 +3126,7 @@ _vhold(struct vnode *vp, bool locked) * and put it on the active list. */ VNASSERT(vp->v_mount != NULL, vp, - ("_vhold: vnode not on per mount vnode list")); + ("vhold: vnode not on per mount vnode list")); mp = vp->v_mount; mtx_lock(&mp->mnt_listmtx); if ((vp->v_mflag & VMP_TMPMNTFREELIST) != 0) { @@ -3121,8 +3147,36 @@ _vhold(struct vnode *vp, bool locked) mp->mnt_activevnodelistsize++; mtx_unlock(&mp->mnt_listmtx); refcount_acquire(&vp->v_holdcnt); - if (!locked) - VI_UNLOCK(vp); +} + +void +vhold(struct vnode *vp) +{ + + ASSERT_VI_UNLOCKED(vp, __func__); + CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + if (refcount_acquire_if_not_zero(&vp->v_holdcnt)) { + VNODE_REFCOUNT_FENCE_ACQ(); + VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, + ("_vhold: vnode with holdcnt is free")); + return; + } + VI_LOCK(vp); + vholdl(vp); + VI_UNLOCK(vp); +} + +void +vholdl(struct vnode *vp) +{ + + ASSERT_VI_LOCKED(vp, __func__); + CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + if ((vp->v_iflag & VI_FREE) == 0) { + refcount_acquire(&vp->v_holdcnt); + return; + } + vhold_activate(vp); } void @@ -3141,136 +3195,78 @@ vholdnz(struct vnode *vp) /* * Drop the hold count of the vnode. If this is the last reference to * the vnode we place it on the free list unless it has been vgone'd - * (marked VI_DOOMED) in which case we will free it. + * (marked VIRF_DOOMED) in which case we will free it. * * Because the vnode vm object keeps a hold reference on the vnode if * there is at least one resident non-cached page, the vnode cannot * leave the active list without the page cleanup done. */ -void -_vdrop(struct vnode *vp, bool locked) +static void +vdrop_deactivate(struct vnode *vp) { - struct bufobj *bo; struct mount *mp; - if (locked) - ASSERT_VI_LOCKED(vp, __func__); - else - ASSERT_VI_UNLOCKED(vp, __func__); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - if (__predict_false((int)vp->v_holdcnt <= 0)) { - vn_printf(vp, "vdrop: holdcnt %d", vp->v_holdcnt); - panic("vdrop: wrong holdcnt"); - } - if (!locked) { - if (refcount_release_if_not_last(&vp->v_holdcnt)) - return; - VI_LOCK(vp); + /* + * Mark a vnode as free: remove it from its active list + * and put it up for recycling on the freelist. + */ + VNASSERT(vp->v_op != NULL, vp, + ("vdrop: vnode already reclaimed.")); + VNASSERT(!VN_IS_DOOMED(vp), vp, + ("vdrop: returning doomed vnode")); + VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, + ("vdrop: vnode already free")); + VNASSERT(vp->v_holdcnt == 0, vp, + ("vdrop: freeing when we shouldn't")); + if ((vp->v_iflag & VI_OWEINACT) == 0) { + mp = vp->v_mount; + mtx_lock(&mp->mnt_listmtx); + vp->v_iflag &= ~VI_ACTIVE; + TAILQ_REMOVE(&mp->mnt_activevnodelist, vp, v_actfreelist); + mp->mnt_activevnodelistsize--; + TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, vp, v_actfreelist); + mp->mnt_tmpfreevnodelistsize++; + vp->v_iflag |= VI_FREE; + vp->v_mflag |= VMP_TMPMNTFREELIST; + VI_UNLOCK(vp); + if (mp->mnt_tmpfreevnodelistsize >= mnt_free_list_batch) + vnlru_return_batch_locked(mp); + mtx_unlock(&mp->mnt_listmtx); + } else { + VI_UNLOCK(vp); + counter_u64_add(free_owe_inact, 1); } - if (refcount_release(&vp->v_holdcnt) == 0) { +} + +void +vdrop(struct vnode *vp) +{ + + ASSERT_VI_UNLOCKED(vp, __func__); + CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + if (refcount_release_if_not_last(&vp->v_holdcnt)) + return; + VI_LOCK(vp); + vdropl(vp); +} + +void +vdropl(struct vnode *vp) +{ + + ASSERT_VI_LOCKED(vp, __func__); + CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + VNASSERT(VN_IS_DOOMED(vp) || (vp->v_iflag & VI_ACTIVE) != 0, vp, + ("vdrop: if vnode is not doomed it must be active")); + if (!refcount_release(&vp->v_holdcnt)) { VI_UNLOCK(vp); return; } - if ((vp->v_iflag & VI_DOOMED) == 0) { - /* - * Mark a vnode as free: remove it from its active list - * and put it up for recycling on the freelist. - */ - VNASSERT(vp->v_op != NULL, vp, - ("vdropl: vnode already reclaimed.")); - VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, - ("vnode already free")); - VNASSERT(vp->v_holdcnt == 0, vp, - ("vdropl: freeing when we shouldn't")); - if ((vp->v_iflag & VI_OWEINACT) == 0) { - mp = vp->v_mount; - if (mp != NULL) { - mtx_lock(&mp->mnt_listmtx); - if (vp->v_iflag & VI_ACTIVE) { - vp->v_iflag &= ~VI_ACTIVE; - TAILQ_REMOVE(&mp->mnt_activevnodelist, - vp, v_actfreelist); - mp->mnt_activevnodelistsize--; - } - TAILQ_INSERT_TAIL(&mp->mnt_tmpfreevnodelist, - vp, v_actfreelist); - mp->mnt_tmpfreevnodelistsize++; - vp->v_iflag |= VI_FREE; - vp->v_mflag |= VMP_TMPMNTFREELIST; - VI_UNLOCK(vp); - if (mp->mnt_tmpfreevnodelistsize >= - mnt_free_list_batch) - vnlru_return_batch_locked(mp); - mtx_unlock(&mp->mnt_listmtx); - } else { - VNASSERT((vp->v_iflag & VI_ACTIVE) == 0, vp, - ("vdropl: active vnode not on per mount " - "vnode list")); - mtx_lock(&vnode_free_list_mtx); - TAILQ_INSERT_TAIL(&vnode_free_list, vp, - v_actfreelist); - freevnodes++; - vp->v_iflag |= VI_FREE; - VI_UNLOCK(vp); - mtx_unlock(&vnode_free_list_mtx); - } - } else { - VI_UNLOCK(vp); - counter_u64_add(free_owe_inact, 1); - } + if (VN_IS_DOOMED(vp)) { + freevnode(vp); return; } - /* - * The vnode has been marked for destruction, so free it. - * - * The vnode will be returned to the zone where it will - * normally remain until it is needed for another vnode. We - * need to cleanup (or verify that the cleanup has already - * been done) any residual data left from its current use - * so as not to contaminate the freshly allocated vnode. - */ - CTR2(KTR_VFS, "%s: destroying the vnode %p", __func__, vp); - atomic_subtract_long(&numvnodes, 1); - bo = &vp->v_bufobj; - VNASSERT((vp->v_iflag & VI_FREE) == 0, vp, - ("cleaned vnode still on the free list.")); - VNASSERT(vp->v_data == NULL, vp, ("cleaned vnode isn't")); - VNASSERT(vp->v_holdcnt == 0, vp, ("Non-zero hold count")); - VNASSERT(vp->v_usecount == 0, vp, ("Non-zero use count")); - VNASSERT(vp->v_writecount == 0, vp, ("Non-zero write count")); - VNASSERT(bo->bo_numoutput == 0, vp, ("Clean vnode has pending I/O's")); - VNASSERT(bo->bo_clean.bv_cnt == 0, vp, ("cleanbufcnt not 0")); - VNASSERT(pctrie_is_empty(&bo->bo_clean.bv_root), vp, - ("clean blk trie not empty")); - VNASSERT(bo->bo_dirty.bv_cnt == 0, vp, ("dirtybufcnt not 0")); - VNASSERT(pctrie_is_empty(&bo->bo_dirty.bv_root), vp, - ("dirty blk trie not empty")); - VNASSERT(TAILQ_EMPTY(&vp->v_cache_dst), vp, ("vp has namecache dst")); - VNASSERT(LIST_EMPTY(&vp->v_cache_src), vp, ("vp has namecache src")); - VNASSERT(vp->v_cache_dd == NULL, vp, ("vp has namecache for ..")); - VNASSERT(TAILQ_EMPTY(&vp->v_rl.rl_waiters), vp, - ("Dangling rangelock waiters")); - VI_UNLOCK(vp); -#ifdef MAC - mac_vnode_destroy(vp); -#endif - if (vp->v_pollinfo != NULL) { - destroy_vpollinfo(vp->v_pollinfo); - vp->v_pollinfo = NULL; - } -#ifdef INVARIANTS - /* XXX Elsewhere we detect an already freed vnode via NULL v_op. */ - vp->v_op = NULL; -#endif - vp->v_mountedhere = NULL; - vp->v_unpcb = NULL; - vp->v_rdev = NULL; - vp->v_fifoinfo = NULL; - vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; - vp->v_iflag = 0; - vp->v_vflag = 0; - bo->bo_flag = 0; - uma_zfree(vnode_zone, vp); + vdrop_deactivate(vp); } /* @@ -3582,9 +3578,9 @@ vgonel(struct vnode *vp) /* * Don't vgonel if we're already doomed. */ - if (vp->v_iflag & VI_DOOMED) + if (vp->v_irflag & VIRF_DOOMED) return; - vp->v_iflag |= VI_DOOMED; + vp->v_irflag |= VIRF_DOOMED; /* * Check to see if the vnode is in use. If so, we have to call @@ -3733,6 +3729,13 @@ vn_printf(struct vnode *vp, const char *fmt, ...) } buf[0] = '\0'; buf[1] = '\0'; + if (vp->v_irflag & VIRF_DOOMED) + strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); + flags = vp->v_irflag & ~(VIRF_DOOMED); + if (flags != 0) { + snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); + strlcat(buf, buf2, sizeof(buf)); + } if (vp->v_vflag & VV_ROOT) strlcat(buf, "|VV_ROOT", sizeof(buf)); if (vp->v_vflag & VV_ISTTY) @@ -3772,8 +3775,6 @@ vn_printf(struct vnode *vp, const char *fmt, ...) strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); if (vp->v_iflag & VI_MOUNT) strlcat(buf, "|VI_MOUNT", sizeof(buf)); - if (vp->v_iflag & VI_DOOMED) - strlcat(buf, "|VI_DOOMED", sizeof(buf)); if (vp->v_iflag & VI_FREE) strlcat(buf, "|VI_FREE", sizeof(buf)); if (vp->v_iflag & VI_ACTIVE) @@ -3782,8 +3783,8 @@ vn_printf(struct vnode *vp, const char *fmt, ...) strlcat(buf, "|VI_DOINGINACT", sizeof(buf)); if (vp->v_iflag & VI_OWEINACT) strlcat(buf, "|VI_OWEINACT", sizeof(buf)); - flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_DOOMED | VI_FREE | - VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT); + flags = vp->v_iflag & ~(VI_TEXT_REF | VI_MOUNT | VI_FREE | VI_ACTIVE | + VI_DOINGINACT | VI_OWEINACT); if (flags != 0) { snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); strlcat(buf, buf2, sizeof(buf)); @@ -5208,7 +5209,7 @@ vop_close_post(void *ap, int rc) struct vop_close_args *a = ap; if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ - (a->a_vp->v_iflag & VI_DOOMED) == 0)) { + !VN_IS_DOOMED(a->a_vp))) { VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? NOTE_CLOSE_WRITE : NOTE_CLOSE); } @@ -5677,7 +5678,7 @@ vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) MNT_ILOCK(mp); vp = mp->mnt_rootvnode; if (vp != NULL) { - if ((vp->v_iflag & VI_DOOMED) == 0) { + if (!VN_IS_DOOMED(vp)) { vrefact(vp); MNT_IUNLOCK(mp); error = vn_lock(vp, flags); @@ -5717,7 +5718,7 @@ vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) mp->mnt_rootvnode = *vpp; } else { if (mp->mnt_rootvnode != *vpp) { - if ((mp->mnt_rootvnode->v_iflag & VI_DOOMED) == 0) { + if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { panic("%s: mismatch between vnode returned " " by VFS_CACHEDROOT and the one cached " " (%p != %p)", @@ -5739,7 +5740,7 @@ vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) if (!vfs_op_thread_enter(mp)) return (vfs_cache_root_fallback(mp, flags, vpp)); vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); - if (vp == NULL || (vp->v_iflag & VI_DOOMED)) { + if (vp == NULL || VN_IS_DOOMED(vp)) { vfs_op_thread_exit(mp); return (vfs_cache_root_fallback(mp, flags, vpp)); } @@ -5797,11 +5798,11 @@ __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; vp = TAILQ_NEXT(vp, v_nmntvnodes)) { - /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ - if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) + /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ + if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) continue; VI_LOCK(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); continue; } @@ -5831,11 +5832,11 @@ __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) (*mvp)->v_type = VMARKER; TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { - /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ - if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) + /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ + if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) continue; VI_LOCK(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); continue; } @@ -5992,7 +5993,7 @@ mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); KASSERT(vp->v_mount == mp || vp->v_mount == NULL, ("alien vnode on the active list %p %p", vp, mp)); - if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) + if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) break; nvp = TAILQ_NEXT(vp, v_actfreelist); VI_UNLOCK(vp); diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 64aef684c665..3f8d04fce74d 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -1820,7 +1820,7 @@ kern_funlinkat(struct thread *td, int dfd, const char *path, int fd, sb.st_ino != oldinum) { error = EIDRM; /* Identifier removed */ } else if (fp != NULL && fp->f_vnode != vp) { - if ((fp->f_vnode->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(fp->f_vnode)) error = EBADF; else error = EDEADLK; @@ -3779,7 +3779,7 @@ kern_frmdirat(struct thread *td, int dfd, const char *path, int fd, } if (fp != NULL && fp->f_vnode != vp) { - if ((fp->f_vnode->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(fp->f_vnode)) error = EBADF; else error = EDEADLK; diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c index c69010dd9995..73308bd26fea 100644 --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -328,7 +328,7 @@ vn_open_vnode_advlock(struct vnode *vp, int fmode, struct file *fp) fp->f_flag |= FHASLOCK; vn_lock(vp, lock_flags | LK_RETRY); - if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) + if (error == 0 && VN_IS_DOOMED(vp)) error = ENOENT; return (error); } @@ -1579,7 +1579,7 @@ _vn_lock(struct vnode *vp, int flags, char *file, int line) ("vn_lock: error %d incompatible with flags %#x", error, flags)); if ((flags & LK_RETRY) == 0) { - if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) { + if (error == 0 && VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); error = ENOENT; } @@ -2132,7 +2132,7 @@ vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, vfs_rel(mp); if (error != 0) return (ENOENT); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { vfs_unbusy(mp); return (ENOENT); } @@ -2142,7 +2142,7 @@ vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, vfs_unbusy(mp); if (error != 0 || *rvp != vp) vn_lock(vp, ltype | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { if (error == 0) { if (*rvp == vp) vunref(vp); diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h index 8ed97a196fce..803e749e176c 100644 --- a/sys/sys/lockmgr.h +++ b/sys/sys/lockmgr.h @@ -74,6 +74,10 @@ int lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk, const char *file, int line); int lockmgr_unlock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk); +int lockmgr_slock(struct lock *lk, u_int flags, const char *file, int line); +int lockmgr_xlock(struct lock *lk, u_int flags, const char *file, int line); +int lockmgr_unlock(struct lock *lk); + #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void _lockmgr_assert(const struct lock *lk, int what, const char *file, int line); #endif diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index b9a8d5458a0c..f1b12bfa5d8f 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -103,7 +103,8 @@ struct vnode { * Fields which define the identity of the vnode. These fields are * owned by the filesystem (XXX: and vgone() ?) */ - enum vtype v_type; /* u vnode type */ + enum vtype v_type:8; /* u vnode type */ + short v_irflag; /* i frequently read flags */ struct vop_vector *v_op; /* u vnode operations vector */ void *v_data; /* u private data for fs */ @@ -231,12 +232,13 @@ struct xvnode { * VI flags are protected by interlock and live in v_iflag * VV flags are protected by the vnode lock and live in v_vflag * - * VI_DOOMED is doubly protected by the interlock and vnode lock. Both + * VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both * are required for writing but the status may be checked with either. */ +#define VIRF_DOOMED 0x0001 /* This vnode is being recycled */ + #define VI_TEXT_REF 0x0001 /* Text ref grabbed use ref */ #define VI_MOUNT 0x0020 /* Mount in progress */ -#define VI_DOOMED 0x0080 /* This vnode is being recycled */ #define VI_FREE 0x0100 /* This vnode is on the freelist */ #define VI_ACTIVE 0x0200 /* This vnode is on the active list */ #define VI_DOINGINACT 0x0800 /* VOP_INACTIVE is in progress */ @@ -651,17 +653,15 @@ int vaccess_acl_posix1e(enum vtype type, uid_t file_uid, struct ucred *cred, int *privused); void vattr_null(struct vattr *vap); int vcount(struct vnode *vp); -#define vdrop(vp) _vdrop((vp), 0) -#define vdropl(vp) _vdrop((vp), 1) -void _vdrop(struct vnode *, bool); +void vdrop(struct vnode *); +void vdropl(struct vnode *); int vflush(struct mount *mp, int rootrefs, int flags, struct thread *td); int vget(struct vnode *vp, int flags, struct thread *td); enum vgetstate vget_prep(struct vnode *vp); int vget_finish(struct vnode *vp, int flags, enum vgetstate vs); void vgone(struct vnode *vp); -#define vhold(vp) _vhold((vp), 0) -#define vholdl(vp) _vhold((vp), 1) -void _vhold(struct vnode *, bool); +void vhold(struct vnode *); +void vholdl(struct vnode *); void vholdnz(struct vnode *); void vinactive(struct vnode *, struct thread *); int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo); @@ -761,11 +761,14 @@ int vop_stdgetwritemount(struct vop_getwritemount_args *); int vop_stdgetpages(struct vop_getpages_args *); int vop_stdinactive(struct vop_inactive_args *); int vop_stdneed_inactive(struct vop_need_inactive_args *); -int vop_stdislocked(struct vop_islocked_args *); int vop_stdkqfilter(struct vop_kqfilter_args *); int vop_stdlock(struct vop_lock1_args *); -int vop_stdputpages(struct vop_putpages_args *); int vop_stdunlock(struct vop_unlock_args *); +int vop_stdislocked(struct vop_islocked_args *); +int vop_lock(struct vop_lock1_args *); +int vop_unlock(struct vop_unlock_args *); +int vop_islocked(struct vop_islocked_args *); +int vop_stdputpages(struct vop_putpages_args *); int vop_nopoll(struct vop_poll_args *); int vop_stdaccess(struct vop_access_args *ap); int vop_stdaccessx(struct vop_accessx_args *ap); @@ -889,6 +892,8 @@ do { \ #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp)) #endif +#define VN_IS_DOOMED(vp) ((vp)->v_irflag & VIRF_DOOMED) + void vput(struct vnode *vp); void vrele(struct vnode *vp); void vref(struct vnode *vp); diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c index c504b263536d..55130de7b3c7 100644 --- a/sys/ufs/ffs/ffs_inode.c +++ b/sys/ufs/ffs/ffs_inode.c @@ -124,7 +124,7 @@ ffs_update(vp, waitfor) * * Hold a reference to the vnode to protect against * ffs_snapgone(). Since we hold a reference, it can only - * get reclaimed (VI_DOOMED flag) in a forcible downgrade + * get reclaimed (VIRF_DOOMED flag) in a forcible downgrade * or unmount. For an unmount, the entire filesystem will be * gone, so we cannot attempt to touch anything associated * with it while the vnode is unlocked; all we can do is @@ -137,7 +137,7 @@ ffs_update(vp, waitfor) pause("ffsupd", 1); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vrele(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) return (ENOENT); goto loop; } diff --git a/sys/ufs/ffs/ffs_rawread.c b/sys/ufs/ffs/ffs_rawread.c index 200fe1a9b4e5..0ecb38726559 100644 --- a/sys/ufs/ffs/ffs_rawread.c +++ b/sys/ufs/ffs/ffs_rawread.c @@ -131,7 +131,7 @@ ffs_rawread_sync(struct vnode *vp) VI_LOCK(vp); /* Check if vnode was reclaimed while unlocked. */ - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); if (upgraded != 0) VOP_LOCK(vp, LK_DOWNGRADE); diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c index 6c0f4bae5750..6854aa69b5d8 100644 --- a/sys/ufs/ffs/ffs_softdep.c +++ b/sys/ufs/ffs/ffs_softdep.c @@ -12511,7 +12511,7 @@ softdep_fsync(vp) * not now, but then the user was not asking to have it * written, so we are not breaking any promises. */ - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) break; /* * We prevent deadlock by always fetching inodes from the @@ -12532,7 +12532,7 @@ softdep_fsync(vp) error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, &pvp, FFSV_FORCEINSMQ); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { if (error == 0) vput(pvp); error = ENOENT; diff --git a/sys/ufs/ufs/ufs_inode.c b/sys/ufs/ufs/ufs_inode.c index 8fd54b384d3b..53c259f07f40 100644 --- a/sys/ufs/ufs/ufs_inode.c +++ b/sys/ufs/ufs/ufs_inode.c @@ -102,7 +102,7 @@ ufs_inactive(ap) loop: if (vn_start_secondary_write(vp, &mp, V_NOWAIT) != 0) { /* Cannot delete file while file system is suspended */ - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { /* Cannot return before file is deleted */ (void) vn_start_secondary_write(vp, &mp, V_WAIT); diff --git a/sys/ufs/ufs/ufs_lookup.c b/sys/ufs/ufs/ufs_lookup.c index ec0a328988fe..d18b5f3a891c 100644 --- a/sys/ufs/ufs/ufs_lookup.c +++ b/sys/ufs/ufs/ufs_lookup.c @@ -728,7 +728,7 @@ ufs_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp, * Relock for the "." case may left us with * reclaimed vnode. */ - if (vdp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vdp)) { vrele(vdp); return (ENOENT); } diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 98b8bf5e62ac..d8b4ee49e55d 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -2917,7 +2917,7 @@ swapongeom(struct vnode *vp) int error; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) { + if (vp->v_type != VCHR || VN_IS_DOOMED(vp)) { error = ENOENT; } else { g_topology_lock(); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index d8c7e2f5eea8..c57aa8d72e09 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -374,7 +374,7 @@ vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, * If no vp or vp is doomed or marked transparent to VM, we do not * have the page. */ - if (vp == NULL || vp->v_iflag & VI_DOOMED) + if (vp == NULL || VN_IS_DOOMED(vp)) return FALSE; /* * If the offset is beyond end of file we do @@ -553,7 +553,7 @@ vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, if (address < 0) return -1; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return -1; bsize = vp->v_mount->mnt_stat.f_iosize; @@ -591,7 +591,7 @@ vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) error = 0; vp = object->handle; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return VM_PAGER_BAD; bsize = vp->v_mount->mnt_stat.f_iosize; @@ -815,7 +815,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, ("%s does not support devices", __func__)); - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return (VM_PAGER_BAD); object = vp->v_object;