diff --git a/share/man/man9/vgone.9 b/share/man/man9/vgone.9 index dc10cda76e76..e915a703dd61 100644 --- a/share/man/man9/vgone.9 +++ b/share/man/man9/vgone.9 @@ -47,7 +47,7 @@ the removal from its mount point vnode list. If the vnode has a .Va v_usecount of zero, and its -.Dv VI_DOOMED +.Dv VIRF_DOOMED flag is not set, it is moved to the head of the free list as in most cases the vnode is about to be reused, or its file system is being unmounted. diff --git a/sys/cam/ctl/ctl_backend_block.c b/sys/cam/ctl/ctl_backend_block.c index cbe25c07f14c..eeddf1735bce 100644 --- a/sys/cam/ctl/ctl_backend_block.c +++ b/sys/cam/ctl/ctl_backend_block.c @@ -843,7 +843,7 @@ ctl_be_block_getattr_file(struct ctl_be_block_lun *be_lun, const char *attrname) val = vattr.va_bytes / be_lun->cbe_lun.blocksize; } if (strcmp(attrname, "blocksavail") == 0 && - (be_lun->vn->v_iflag & VI_DOOMED) == 0) { + !VN_IS_DOOMED(be_lun->vn)) { error = VFS_STATFS(be_lun->vn->v_mount, &statfs); if (error == 0) val = statfs.f_bavail * statfs.f_bsize / diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c index dd1b868cac7a..42d299157dcc 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_ctldir.c @@ -605,7 +605,7 @@ zfsctl_relock_dot(vnode_t *dvp, int ltype) vn_lock(dvp, LK_DOWNGRADE | LK_RETRY); /* Relock for the "." case may left us with reclaimed vnode. */ - if ((dvp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dvp)) { vrele(dvp); return (SET_ERROR(ENOENT)); } diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c index ae37dd1fba12..814d1576455b 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c @@ -1413,7 +1413,7 @@ zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags) * Relock for the "." case could leave us with * reclaimed vnode. */ - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { vrele(dvp); return (SET_ERROR(ENOENT)); } @@ -5913,7 +5913,7 @@ zfs_vptocnp(struct vop_vptocnp_args *ap) vput(covered_vp); } vn_lock(vp, ltype | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) error = SET_ERROR(ENOENT); return (error); } @@ -5936,7 +5936,7 @@ zfs_lock(ap) if (err == 0 && (ap->a_flags & LK_NOWAIT) == 0) { vp = ap->a_vp; zp = vp->v_data; - if (vp->v_mount != NULL && (vp->v_iflag & VI_DOOMED) == 0 && + if (vp->v_mount != NULL && !VN_IS_DOOMED(vp) && zp != NULL && (zp->z_pflags & ZFS_XATTR) == 0) VERIFY(!RRM_LOCK_HELD(&zp->z_zfsvfs->z_teardown_lock)); } diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c index c9e88d345974..c3dcd3d13d58 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_znode.c @@ -1216,8 +1216,7 @@ zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp) locked = VOP_ISLOCKED(vp); VI_LOCK(vp); - if ((vp->v_iflag & VI_DOOMED) != 0 && - locked != LK_EXCLUSIVE) { + if (VN_IS_DOOMED(vp) && locked != LK_EXCLUSIVE) { /* * The vnode is doomed and this thread doesn't * hold the exclusive lock on it, so the vnode diff --git a/sys/dev/beri/virtio/virtio_block.c b/sys/dev/beri/virtio/virtio_block.c index 7c8a03966a39..11dc4a82439d 100644 --- a/sys/dev/beri/virtio/virtio_block.c +++ b/sys/dev/beri/virtio/virtio_block.c @@ -258,7 +258,7 @@ open_file(struct beri_vtblk_softc *sc, struct thread *td) if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); - if (nd.ni_vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(nd.ni_vp)) { return (1); } } diff --git a/sys/dev/md/md.c b/sys/dev/md/md.c index 2a7a8395761b..fa122a5cd8e6 100644 --- a/sys/dev/md/md.c +++ b/sys/dev/md/md.c @@ -1453,7 +1453,7 @@ mdcreate_vnode(struct md_s *sc, struct md_req *mdr, struct thread *td) goto bad; if (VOP_ISLOCKED(nd.ni_vp) != LK_EXCLUSIVE) { vn_lock(nd.ni_vp, LK_UPGRADE | LK_RETRY); - if (nd.ni_vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(nd.ni_vp)) { /* Forced unmount. */ error = EBADF; goto bad; diff --git a/sys/dev/xen/blkback/blkback.c b/sys/dev/xen/blkback/blkback.c index ffbb4a25262f..49677862d930 100644 --- a/sys/dev/xen/blkback/blkback.c +++ b/sys/dev/xen/blkback/blkback.c @@ -2627,7 +2627,7 @@ xbb_open_file(struct xbb_softc *xbb) */ if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); - if (xbb->vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(xbb->vn)) { error = EBADF; xenbus_dev_fatal(xbb->dev, error, "error locking file %s", diff --git a/sys/fs/autofs/autofs_vnops.c b/sys/fs/autofs/autofs_vnops.c index 09d77b9fccf8..be9f559ce1fd 100644 --- a/sys/fs/autofs/autofs_vnops.c +++ b/sys/fs/autofs/autofs_vnops.c @@ -169,8 +169,8 @@ autofs_trigger_vn(struct vnode *vp, const char *path, int pathlen, sx_xunlock(&autofs_softc->sc_lock); vn_lock(vp, lock_flags | LK_RETRY); vunref(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) { - AUTOFS_DEBUG("VI_DOOMED"); + if (VN_IS_DOOMED(vp)) { + AUTOFS_DEBUG("VIRF_DOOMED"); return (ENOENT); } @@ -661,7 +661,7 @@ autofs_node_vn(struct autofs_node *anp, struct mount *mp, int flags, sx_xunlock(&anp->an_vnode_lock); return (error); } - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { /* * We got forcibly unmounted. */ diff --git a/sys/fs/cd9660/cd9660_vnops.c b/sys/fs/cd9660/cd9660_vnops.c index a923bd2da551..b1bf2ce42344 100644 --- a/sys/fs/cd9660/cd9660_vnops.c +++ b/sys/fs/cd9660/cd9660_vnops.c @@ -260,7 +260,7 @@ cd9660_ioctl(ap) vp = ap->a_vp; vn_lock(vp, LK_SHARED | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); return (EBADF); } diff --git a/sys/fs/devfs/devfs_vnops.c b/sys/fs/devfs/devfs_vnops.c index eaff5f9a7df1..bb2832682b7c 100644 --- a/sys/fs/devfs/devfs_vnops.c +++ b/sys/fs/devfs/devfs_vnops.c @@ -252,7 +252,7 @@ devfs_populate_vp(struct vnode *vp) devfs_unmount_final(dmp); return (ERESTART); } - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { sx_xunlock(&dmp->dm_lock); return (ERESTART); } @@ -441,7 +441,7 @@ devfs_allocv(struct devfs_dirent *de, struct mount *mp, int lockmode, vput(vp); return (ENOENT); } - else if ((vp->v_iflag & VI_DOOMED) != 0) { + else if (VN_IS_DOOMED(vp)) { mtx_lock(&devfs_de_interlock); if (de->de_vnode == vp) { de->de_vnode = NULL; @@ -592,7 +592,7 @@ devfs_close(struct vop_close_args *ap) SESS_LOCK(p->p_session); VI_LOCK(vp); if (vp->v_usecount == 2 && vcount(vp) == 1 && - (vp->v_iflag & VI_DOOMED) == 0) { + !VN_IS_DOOMED(vp)) { p->p_session->s_ttyvp = NULL; p->p_session->s_ttydp = NULL; oldvp = vp; @@ -622,7 +622,7 @@ devfs_close(struct vop_close_args *ap) VI_LOCK(vp); if (vp->v_usecount == 1 && vcount(vp) == 1) dflags |= FLASTCLOSE; - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { /* Forced close. */ dflags |= FREVOKE | FNONBLOCK; } else if (dsw->d_flags & D_TRACKCLOSE) { @@ -1562,7 +1562,7 @@ devfs_rioctl(struct vop_ioctl_args *ap) vp = ap->a_vp; vn_lock(vp, LK_SHARED | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); return (EBADF); } diff --git a/sys/fs/ext2fs/ext2_lookup.c b/sys/fs/ext2fs/ext2_lookup.c index c41c70e9020e..bcd9bb37d22f 100644 --- a/sys/fs/ext2fs/ext2_lookup.c +++ b/sys/fs/ext2fs/ext2_lookup.c @@ -665,7 +665,7 @@ ext2_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp pdp = vdp; if (flags & ISDOTDOT) { error = vn_vget_ino(pdp, ino, cnp->cn_lkflags, &tdp); - if (pdp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(pdp)) { if (error == 0) vput(tdp); error = ENOENT; diff --git a/sys/fs/fdescfs/fdesc_vnops.c b/sys/fs/fdescfs/fdesc_vnops.c index 127bdccd8c40..9b319fbc8f17 100644 --- a/sys/fs/fdescfs/fdesc_vnops.c +++ b/sys/fs/fdescfs/fdesc_vnops.c @@ -347,7 +347,7 @@ fdesc_lookup(struct vop_lookup_args *ap) vn_lock(dvp, LK_RETRY | LK_EXCLUSIVE); vdrop(dvp); fvp = dvp; - if ((dvp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(dvp)) error = ENOENT; } else { /* diff --git a/sys/fs/fuse/fuse_io.c b/sys/fs/fuse/fuse_io.c index f1b1e0c66ffc..6f4644af280e 100644 --- a/sys/fs/fuse/fuse_io.c +++ b/sys/fs/fuse/fuse_io.c @@ -1116,7 +1116,7 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td) struct fuse_vnode_data *fvdat = VTOFUD(vp); int error = 0; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return 0; ASSERT_VOP_ELOCKED(vp, "fuse_io_invalbuf"); diff --git a/sys/fs/nfsclient/nfs_clport.c b/sys/fs/nfsclient/nfs_clport.c index 04bb167b1bff..9e97540612af 100644 --- a/sys/fs/nfsclient/nfs_clport.c +++ b/sys/fs/nfsclient/nfs_clport.c @@ -149,13 +149,13 @@ nfscl_nget(struct mount *mntp, struct vnode *dvp, struct nfsfh *nfhp, * get called on this vnode between when NFSVOPLOCK() drops * the VI_LOCK() and vget() acquires it again, so that it * hasn't yet had v_usecount incremented. If this were to - * happen, the VI_DOOMED flag would be set, so check for + * happen, the VIRF_DOOMED flag would be set, so check for * that here. Since we now have the v_usecount incremented, - * we should be ok until we vrele() it, if the VI_DOOMED + * we should be ok until we vrele() it, if the VIRF_DOOMED * flag isn't set now. */ VI_LOCK(nvp); - if ((nvp->v_iflag & VI_DOOMED)) { + if (VN_IS_DOOMED(nvp)) { VI_UNLOCK(nvp); vrele(nvp); error = ENOENT; @@ -350,7 +350,7 @@ nfscl_ngetreopen(struct mount *mntp, u_int8_t *fhp, int fhsize, vfs_hash_ref(mntp, hash, td, &nvp, newnfs_vncmpf, nfhp); if (nvp == NULL) { error = ENOENT; - } else if ((nvp->v_iflag & VI_DOOMED) != 0) { + } else if (VN_IS_DOOMED(nvp)) { error = ENOENT; vrele(nvp); } else { diff --git a/sys/fs/nfsclient/nfs_clvnops.c b/sys/fs/nfsclient/nfs_clvnops.c index f79a25fa3eec..7d721ae8da3e 100644 --- a/sys/fs/nfsclient/nfs_clvnops.c +++ b/sys/fs/nfsclient/nfs_clvnops.c @@ -1235,7 +1235,7 @@ nfs_lookup(struct vop_lookup_args *ap) vrele(newvp); *vpp = NULLVP; } else if (error == ENOENT) { - if (dvp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(dvp)) return (ENOENT); /* * We only accept a negative hit in the cache if the @@ -1340,7 +1340,7 @@ nfs_lookup(struct vop_lookup_args *ap) error = vfs_busy(mp, 0); NFSVOPLOCK(dvp, ltype | LK_RETRY); vfs_rel(mp); - if (error == 0 && (dvp->v_iflag & VI_DOOMED)) { + if (error == 0 && VN_IS_DOOMED(dvp)) { vfs_unbusy(mp); error = ENOENT; } @@ -1355,7 +1355,7 @@ nfs_lookup(struct vop_lookup_args *ap) vfs_unbusy(mp); if (newvp != dvp) NFSVOPLOCK(dvp, ltype | LK_RETRY); - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { if (error == 0) { if (newvp == dvp) vrele(newvp); @@ -3139,7 +3139,7 @@ nfs_advlock(struct vop_advlock_args *ap) else cred = td->td_ucred; NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { error = EBADF; goto out; } @@ -3169,7 +3169,7 @@ nfs_advlock(struct vop_advlock_args *ap) if (error) return (EINTR); NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { error = EBADF; goto out; } diff --git a/sys/fs/nfsserver/nfs_nfsdport.c b/sys/fs/nfsserver/nfs_nfsdport.c index 9dc7da1a74c8..48513080f224 100644 --- a/sys/fs/nfsserver/nfs_nfsdport.c +++ b/sys/fs/nfsserver/nfs_nfsdport.c @@ -1457,7 +1457,7 @@ nfsvno_link(struct nameidata *ndp, struct vnode *vp, struct ucred *cred, } if (!error) { NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) == 0) + if (!VN_IS_DOOMED(vp)) error = VOP_LINK(ndp->ni_dvp, vp, &ndp->ni_cnd); else error = EPERM; @@ -1738,7 +1738,7 @@ nfsvno_open(struct nfsrv_descript *nd, struct nameidata *ndp, * Updates the file rev and sets the mtime and ctime * to the current clock time, returning the va_filerev and va_Xtime * values. - * Return ESTALE to indicate the vnode is VI_DOOMED. + * Return ESTALE to indicate the vnode is VIRF_DOOMED. */ int nfsvno_updfilerev(struct vnode *vp, struct nfsvattr *nvap, @@ -1750,7 +1750,7 @@ nfsvno_updfilerev(struct vnode *vp, struct nfsvattr *nvap, vfs_timestamp(&va.va_mtime); if (NFSVOPISLOCKED(vp) != LK_EXCLUSIVE) { NFSVOPLOCK(vp, LK_UPGRADE | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) return (ESTALE); } (void) VOP_SETATTR(vp, &va, nd->nd_cred); diff --git a/sys/fs/nfsserver/nfs_nfsdserv.c b/sys/fs/nfsserver/nfs_nfsdserv.c index 3bfe8f6444fd..830fd23d6f1c 100644 --- a/sys/fs/nfsserver/nfs_nfsdserv.c +++ b/sys/fs/nfsserver/nfs_nfsdserv.c @@ -3024,7 +3024,7 @@ nfsrvd_open(struct nfsrv_descript *nd, __unused int isdgram, } vp = dp; NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) == 0) + if (!VN_IS_DOOMED(vp)) nd->nd_repstat = nfsrv_opencheck(clientid, &stateid, stp, vp, nd, p, nd->nd_repstat); else diff --git a/sys/fs/nfsserver/nfs_nfsdstate.c b/sys/fs/nfsserver/nfs_nfsdstate.c index a509eb2bdb91..ce24aa62d636 100644 --- a/sys/fs/nfsserver/nfs_nfsdstate.c +++ b/sys/fs/nfsserver/nfs_nfsdstate.c @@ -2159,7 +2159,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, NFSUNLOCKSTATE(); NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); vnode_unlocked = 0; - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) ret = NFSERR_SERVERFAULT; NFSLOCKSTATE(); } @@ -2257,7 +2257,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, NFSUNLOCKSTATE(); NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); vnode_unlocked = 0; - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { error = NFSERR_SERVERFAULT; goto out; } @@ -2379,7 +2379,7 @@ nfsrv_lockctrl(vnode_t vp, struct nfsstate **new_stpp, } if (vnode_unlocked != 0) { NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) + if (error == 0 && VN_IS_DOOMED(vp)) error = NFSERR_SERVERFAULT; } if (other_lop) @@ -5133,7 +5133,7 @@ nfsrv_checkstable(struct nfsclient *clp) * Return 0 to indicate the conflict can't be revoked and 1 to indicate * the revocation worked and the conflicting client is "bye, bye", so it * can be tried again. - * Return 2 to indicate that the vnode is VI_DOOMED after NFSVOPLOCK(). + * Return 2 to indicate that the vnode is VIRF_DOOMED after NFSVOPLOCK(). * Unlocks State before a non-zero value is returned. */ static int @@ -5164,7 +5164,7 @@ nfsrv_clientconflict(struct nfsclient *clp, int *haslockp, vnode_t vp, *haslockp = 1; if (vp != NULL) { NFSVOPLOCK(vp, lktype | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) return (2); } return (1); @@ -5339,7 +5339,7 @@ nfsrv_delegconflict(struct nfsstate *stp, int *haslockp, NFSPROC_T *p, *haslockp = 1; if (vp != NULL) { NFSVOPLOCK(vp, lktype | LK_RETRY); - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { *haslockp = 0; NFSLOCKV4ROOTMUTEX(); nfsv4_unlock(&nfsv4rootfs_lock, 1); @@ -8313,7 +8313,7 @@ nfsrv_copymr(vnode_t vp, vnode_t fvp, vnode_t dvp, struct nfsdevice *ds, * changed until the copy is complete. */ NFSVOPLOCK(vp, LK_EXCLUSIVE | LK_RETRY); - if (ret == 0 && (vp->v_iflag & VI_DOOMED) != 0) { + if (ret == 0 && VN_IS_DOOMED(vp)) { NFSD_DEBUG(4, "nfsrv_copymr: lk_exclusive doomed\n"); ret = ESTALE; } diff --git a/sys/fs/nullfs/null_subr.c b/sys/fs/nullfs/null_subr.c index cf7aed525845..a6f31b976d92 100644 --- a/sys/fs/nullfs/null_subr.c +++ b/sys/fs/nullfs/null_subr.c @@ -225,7 +225,7 @@ null_nodeget(mp, lowervp, vpp) */ if (VOP_ISLOCKED(lowervp) != LK_EXCLUSIVE) { vn_lock(lowervp, LK_UPGRADE | LK_RETRY); - if ((lowervp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(lowervp)) { vput(lowervp); return (ENOENT); } diff --git a/sys/fs/nullfs/null_vfsops.c b/sys/fs/nullfs/null_vfsops.c index e2ef7e874d9f..352fa84c35b4 100644 --- a/sys/fs/nullfs/null_vfsops.c +++ b/sys/fs/nullfs/null_vfsops.c @@ -442,7 +442,7 @@ nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp) * extra unlock before allowing the final vdrop() to * free the vnode. */ - KASSERT((vp->v_iflag & VI_DOOMED) != 0, + KASSERT(VN_IS_DOOMED(vp), ("not reclaimed nullfs vnode %p", vp)); VOP_UNLOCK(vp, 0); } else { @@ -453,7 +453,7 @@ nullfs_unlink_lowervp(struct mount *mp, struct vnode *lowervp) * relevant for future reclamations. */ ASSERT_VOP_ELOCKED(vp, "unlink_lowervp"); - KASSERT((vp->v_iflag & VI_DOOMED) == 0, + KASSERT(!VN_IS_DOOMED(vp), ("reclaimed nullfs vnode %p", vp)); xp->null_flags &= ~NULLV_NOUNLOCK; } diff --git a/sys/fs/nullfs/null_vnops.c b/sys/fs/nullfs/null_vnops.c index 7b01da41b15e..03b16ccc17b0 100644 --- a/sys/fs/nullfs/null_vnops.c +++ b/sys/fs/nullfs/null_vnops.c @@ -396,7 +396,7 @@ null_lookup(struct vop_lookup_args *ap) * doomed state and return error. */ if ((error == 0 || error == EJUSTRETURN) && - (dvp->v_iflag & VI_DOOMED) != 0) { + VN_IS_DOOMED(dvp)) { error = ENOENT; if (lvp != NULL) vput(lvp); diff --git a/sys/fs/pseudofs/pseudofs_vnops.c b/sys/fs/pseudofs/pseudofs_vnops.c index ce15547e44f5..0a3c9e967706 100644 --- a/sys/fs/pseudofs/pseudofs_vnops.c +++ b/sys/fs/pseudofs/pseudofs_vnops.c @@ -290,7 +290,7 @@ pfs_ioctl(struct vop_ioctl_args *va) vn = va->a_vp; vn_lock(vn, LK_SHARED | LK_RETRY); - if (vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vn)) { VOP_UNLOCK(vn, 0); return (EBADF); } @@ -512,7 +512,7 @@ pfs_lookup(struct vop_cachedlookup_args *va) vfs_rel(mp); if (error != 0) PFS_RETURN(ENOENT); - if (vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vn)) { vfs_unbusy(mp); PFS_RETURN(ENOENT); } @@ -581,13 +581,13 @@ pfs_lookup(struct vop_cachedlookup_args *va) if (cnp->cn_flags & ISDOTDOT) { vfs_unbusy(mp); vn_lock(vn, LK_EXCLUSIVE | LK_RETRY); - if (vn->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vn)) { vput(*vpp); *vpp = NULL; PFS_RETURN(ENOENT); } } - if (cnp->cn_flags & MAKEENTRY && !(vn->v_iflag & VI_DOOMED)) + if (cnp->cn_flags & MAKEENTRY && !VN_IS_DOOMED(vn)) cache_enter(vn, *vpp, cnp); PFS_RETURN (0); failed: diff --git a/sys/fs/smbfs/smbfs_io.c b/sys/fs/smbfs/smbfs_io.c index ab1eb0574c1b..4e84e571e93a 100644 --- a/sys/fs/smbfs/smbfs_io.c +++ b/sys/fs/smbfs/smbfs_io.c @@ -637,7 +637,7 @@ smbfs_vinvalbuf(struct vnode *vp, struct thread *td) struct smbnode *np = VTOSMB(vp); int error = 0; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return 0; while (np->n_flag & NFLUSHINPROG) { diff --git a/sys/fs/smbfs/smbfs_vnops.c b/sys/fs/smbfs/smbfs_vnops.c index 456d501d225e..9d94c14bef6b 100644 --- a/sys/fs/smbfs/smbfs_vnops.c +++ b/sys/fs/smbfs/smbfs_vnops.c @@ -1345,7 +1345,7 @@ smbfs_lookup(ap) error = ENOENT; goto out; } - if ((dvp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dvp)) { vfs_unbusy(mp); error = ENOENT; goto out; @@ -1355,7 +1355,7 @@ smbfs_lookup(ap) error = smbfs_nget(mp, dvp, name, nmlen, NULL, &vp); vfs_unbusy(mp); vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY); - if ((dvp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dvp)) { if (error == 0) vput(vp); error = ENOENT; diff --git a/sys/fs/tmpfs/tmpfs_subr.c b/sys/fs/tmpfs/tmpfs_subr.c index 94b518166898..1ba043ab5063 100644 --- a/sys/fs/tmpfs/tmpfs_subr.c +++ b/sys/fs/tmpfs/tmpfs_subr.c @@ -598,15 +598,15 @@ tmpfs_alloc_vp(struct mount *mp, struct tmpfs_node *node, int lkflag, MPASS((node->tn_vpstate & TMPFS_VNODE_DOOMED) == 0); VI_LOCK(vp); if ((node->tn_type == VDIR && node->tn_dir.tn_parent == NULL) || - ((vp->v_iflag & VI_DOOMED) != 0 && - (lkflag & LK_NOWAIT) != 0)) { + (VN_IS_DOOMED(vp) && + (lkflag & LK_NOWAIT) != 0)) { VI_UNLOCK(vp); TMPFS_NODE_UNLOCK(node); error = ENOENT; vp = NULL; goto out; } - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); node->tn_vpstate |= TMPFS_VNODE_WRECLAIM; while ((node->tn_vpstate & TMPFS_VNODE_WRECLAIM) != 0) { diff --git a/sys/fs/tmpfs/tmpfs_vnops.c b/sys/fs/tmpfs/tmpfs_vnops.c index b1dbdf02bab2..5df3c07b69cc 100644 --- a/sys/fs/tmpfs/tmpfs_vnops.c +++ b/sys/fs/tmpfs/tmpfs_vnops.c @@ -1578,7 +1578,7 @@ tmpfs_vptocnp(struct vop_vptocnp_args *ap) tmpfs_free_node(tm, tnp); return (0); } - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { tmpfs_free_node(tm, tnp); return (ENOENT); } diff --git a/sys/fs/unionfs/union_subr.c b/sys/fs/unionfs/union_subr.c index faa95ea49041..85b8db5709ac 100644 --- a/sys/fs/unionfs/union_subr.c +++ b/sys/fs/unionfs/union_subr.c @@ -127,7 +127,8 @@ unionfs_get_cached_vnode(struct vnode *uvp, struct vnode *lvp, VI_LOCK_FLAGS(vp, MTX_DUPOK); VI_UNLOCK(dvp); vp->v_iflag &= ~VI_OWEINACT; - if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) { + if (VN_IS_DOOMED(vp) || + ((vp->v_iflag & VI_DOINGINACT) != 0)) { VI_UNLOCK(vp); vp = NULLVP; } else @@ -163,7 +164,8 @@ unionfs_ins_cached_vnode(struct unionfs_node *uncp, vp = UNIONFSTOV(unp); VI_LOCK_FLAGS(vp, MTX_DUPOK); vp->v_iflag &= ~VI_OWEINACT; - if ((vp->v_iflag & (VI_DOOMED | VI_DOINGINACT)) != 0) { + if (VN_IS_DOOMED(vp) || + ((vp->v_iflag & VI_DOINGINACT) != 0)) { LIST_INSERT_HEAD(hd, uncp, un_hash); VI_UNLOCK(vp); vp = NULLVP; diff --git a/sys/kern/kern_lockf.c b/sys/kern/kern_lockf.c index 6fb8692d4861..57c816346b24 100644 --- a/sys/kern/kern_lockf.c +++ b/sys/kern/kern_lockf.c @@ -592,7 +592,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, * the vnode interlock. */ VI_LOCK(vp); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); lf_free_lock(lock); return (ENOENT); @@ -622,7 +622,7 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, * trying to allocate memory. */ VI_LOCK(vp); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); sx_xlock(&lf_lock_states_lock); LIST_REMOVE(ls, ls_link); @@ -655,10 +655,10 @@ lf_advlockasync(struct vop_advlockasync_args *ap, struct lockf **statep, /* * Recheck the doomed vnode after state->ls_lock is * locked. lf_purgelocks() requires that no new threads add - * pending locks when vnode is marked by VI_DOOMED flag. + * pending locks when vnode is marked by VIRF_DOOMED flag. */ VI_LOCK(vp); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { state->ls_threads--; wakeup(state); VI_UNLOCK(vp); @@ -771,12 +771,12 @@ lf_purgelocks(struct vnode *vp, struct lockf **statep) /* * For this to work correctly, the caller must ensure that no * other threads enter the locking system for this vnode, - * e.g. by checking VI_DOOMED. We wake up any threads that are + * e.g. by checking VIRF_DOOMED. We wake up any threads that are * sleeping waiting for locks on this vnode and then free all * the remaining locks. */ VI_LOCK(vp); - KASSERT(vp->v_iflag & VI_DOOMED, + KASSERT(VN_IS_DOOMED(vp), ("lf_purgelocks: vp %p has not vgone yet", vp)); state = *statep; if (state == NULL) { diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index e7139a6dab50..420dd1255e98 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -518,7 +518,7 @@ kern_reroot(void) VOP_UNLOCK(vp, 0); return (ENOENT); } - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); vfs_unbusy(mp); return (ENOENT); diff --git a/sys/kern/tty.c b/sys/kern/tty.c index 4ab510fc7bb1..d8e893db4b29 100644 --- a/sys/kern/tty.c +++ b/sys/kern/tty.c @@ -1250,9 +1250,9 @@ tty_drop_ctty(struct tty *tp, struct proc *p) * If we did have a vnode, release our reference. Ordinarily we manage * these at the devfs layer, but we can't necessarily know that we were * invoked on the vnode referenced in the session (i.e. the vnode we - * hold a reference to). We explicitly don't check VBAD/VI_DOOMED here + * hold a reference to). We explicitly don't check VBAD/VIRF_DOOMED here * to avoid a vnode leak -- in circumstances elsewhere where we'd hit a - * VI_DOOMED vnode, release has been deferred until the controlling TTY + * VIRF_DOOMED vnode, release has been deferred until the controlling TTY * is either changed or released. */ if (vp != NULL) diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 6787d4d4af81..f5335fb14651 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -1158,7 +1158,7 @@ cache_lookup_dot(struct vnode *dvp, struct vnode **vpp, struct componentname *cn if (ltype != VOP_ISLOCKED(*vpp)) { if (ltype == LK_EXCLUSIVE) { vn_lock(*vpp, LK_UPGRADE | LK_RETRY); - if ((*vpp)->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED((*vpp))) { /* forced unmount */ vrele(*vpp); *vpp = NULL; @@ -1401,7 +1401,7 @@ cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp, error = vget_finish(*vpp, cnp->cn_lkflags, vs); if (cnp->cn_flags & ISDOTDOT) { vn_lock(dvp, ltype | LK_RETRY); - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { if (error == 0) vput(*vpp); *vpp = NULL; @@ -1706,9 +1706,9 @@ cache_enter_time(struct vnode *dvp, struct vnode *vp, struct componentname *cnp, u_long lnumcache; CTR3(KTR_VFS, "cache_enter(%p, %p, %s)", dvp, vp, cnp->cn_nameptr); - VNASSERT(vp == NULL || (vp->v_iflag & VI_DOOMED) == 0, vp, + VNASSERT(vp == NULL || !VN_IS_DOOMED(vp), vp, ("cache_enter: Adding a doomed vnode")); - VNASSERT(dvp == NULL || (dvp->v_iflag & VI_DOOMED) == 0, dvp, + VNASSERT(dvp == NULL || !VN_IS_DOOMED(dvp), dvp, ("cache_enter: Doomed vnode used as src")); #ifdef DEBUG_CACHE @@ -2365,7 +2365,7 @@ vn_vptocnp(struct vnode **vp, struct ucred *cred, char *buf, u_int *buflen) } *vp = dvp; - if (dvp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dvp)) { /* forced unmount */ vrele(dvp); error = ENOENT; @@ -2429,7 +2429,7 @@ vn_fullpath1(struct thread *td, struct vnode *vp, struct vnode *rdir, * mnt_vnodecovered can be NULL only for the * case of unmount. */ - if ((vp->v_iflag & VI_DOOMED) != 0 || + if (VN_IS_DOOMED(vp) || (vp1 = vp->v_mount->mnt_vnodecovered) == NULL || vp1->v_mountedhere != vp->v_mount) { vput(vp); diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index 8aee8abfbe7e..b12fe280bd09 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -594,7 +594,7 @@ vop_stdgetwritemount(ap) * Note that having a reference does not prevent forced unmount from * setting ->v_mount to NULL after the lock gets released. This is of * no consequence for typical consumers (most notably vn_start_write) - * since in this case the vnode is VI_DOOMED. Unmount might have + * since in this case the vnode is VIRF_DOOMED. Unmount might have * progressed far enough that its completion is only delayed by the * reference obtained here. The consumer only needs to concern itself * with releasing it. @@ -1019,7 +1019,7 @@ vop_stdadvise(struct vop_advise_args *ap) case POSIX_FADV_DONTNEED: error = 0; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); break; } diff --git a/sys/kern/vfs_lookup.c b/sys/kern/vfs_lookup.c index b127c9daccd3..10941014e031 100644 --- a/sys/kern/vfs_lookup.c +++ b/sys/kern/vfs_lookup.c @@ -867,7 +867,7 @@ lookup(struct nameidata *ndp) } if ((dp->v_vflag & VV_ROOT) == 0) break; - if (dp->v_iflag & VI_DOOMED) { /* forced unmount */ + if (VN_IS_DOOMED(dp)) { /* forced unmount */ error = ENOENT; goto bad; } @@ -911,7 +911,7 @@ lookup(struct nameidata *ndp) if ((cnp->cn_flags & LOCKPARENT) && (cnp->cn_flags & ISLASTCN) && dp != vp_crossmp && VOP_ISLOCKED(dp) == LK_SHARED) vn_lock(dp, LK_UPGRADE|LK_RETRY); - if ((dp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(dp)) { error = ENOENT; goto bad; } @@ -1028,7 +1028,7 @@ lookup(struct nameidata *ndp) ((cnp->cn_flags & FOLLOW) || (cnp->cn_flags & TRAILINGSLASH) || *ndp->ni_next == '/')) { cnp->cn_flags |= ISSYMLINK; - if (dp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dp)) { /* * We can't know whether the directory was mounted with * NOSYMFOLLOW, so we can't follow safely. @@ -1135,7 +1135,7 @@ lookup(struct nameidata *ndp) if (needs_exclusive_leaf(dp->v_mount, cnp->cn_flags) && VOP_ISLOCKED(dp) != LK_EXCLUSIVE) { vn_lock(dp, LK_UPGRADE | LK_RETRY); - if (dp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(dp)) { error = ENOENT; goto bad2; } diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 1977c3b2bba9..444d22d16959 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -137,7 +137,7 @@ static int v_inval_buf_range_locked(struct vnode *vp, struct bufobj *bo, /* * Number of vnodes in existence. Increased whenever getnewvnode() - * allocates a new vnode, decreased in vdropl() for VI_DOOMED vnode. + * allocates a new vnode, decreased in vdropl() for VIRF_DOOMED vnode. */ static unsigned long numvnodes; @@ -367,7 +367,7 @@ sysctl_try_reclaim_vnode(SYSCTL_HANDLER_ARGS) goto out; vp = nd.ni_vp; - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { /* * This vnode is being recycled. Return != 0 to let the caller * know that the sysctl had no effect. Return EAGAIN because a @@ -1033,7 +1033,7 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) if (vp->v_usecount || (!reclaim_nc_src && !LIST_EMPTY(&vp->v_cache_src)) || ((vp->v_iflag & VI_FREE) != 0) || - (vp->v_iflag & VI_DOOMED) != 0 || (vp->v_object != NULL && + VN_IS_DOOMED(vp) || (vp->v_object != NULL && vp->v_object->resident_page_count > trigger)) { VI_UNLOCK(vp); goto next_iter; @@ -1049,7 +1049,7 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) * v_usecount may have been bumped after VOP_LOCK() dropped * the vnode interlock and before it was locked again. * - * It is not necessary to recheck VI_DOOMED because it can + * It is not necessary to recheck VIRF_DOOMED because it can * only be set by another thread that holds both the vnode * lock and vnode interlock. If another thread has the * vnode lock before we get to VOP_LOCK() and obtains the @@ -1066,8 +1066,8 @@ vlrureclaim(struct mount *mp, bool reclaim_nc_src, int trigger) vdropl(vp); goto next_iter_mntunlocked; } - KASSERT((vp->v_iflag & VI_DOOMED) == 0, - ("VI_DOOMED unexpectedly detected in vlrureclaim()")); + KASSERT(!VN_IS_DOOMED(vp), + ("VIRF_DOOMED unexpectedly detected in vlrureclaim()")); counter_u64_add(recycles_count, 1); vgonel(vp); VOP_UNLOCK(vp, 0); @@ -1436,7 +1436,7 @@ vtryrecycle(struct vnode *vp) __func__, vp); return (EBUSY); } - if ((vp->v_iflag & VI_DOOMED) == 0) { + if (!VN_IS_DOOMED(vp)) { counter_u64_add(recycles_count, 1); vgonel(vp); } @@ -2696,7 +2696,7 @@ v_decr_devcount(struct vnode *vp) /* * Grab a particular vnode from the free list, increment its - * reference count and lock it. VI_DOOMED is set if the vnode + * reference count and lock it. VIRF_DOOMED is set if the vnode * is being destroyed. Only callers who specify LK_RETRY will * see doomed vnodes. If inactive processing was delayed in * vput try to do it here. @@ -2994,7 +2994,7 @@ vputx(struct vnode *vp, int func) * Since vgone performs inactive on its own there is nothing to do * here but to drop our hold count. */ - if (__predict_false(vp->v_iflag & VI_DOOMED) || + if (__predict_false(VN_IS_DOOMED(vp)) || VOP_NEED_INACTIVE(vp) == 0) { vdropl(vp); return; @@ -3143,7 +3143,7 @@ vholdnz(struct vnode *vp) /* * Drop the hold count of the vnode. If this is the last reference to * the vnode we place it on the free list unless it has been vgone'd - * (marked VI_DOOMED) in which case we will free it. + * (marked VIRF_DOOMED) in which case we will free it. * * Because the vnode vm object keeps a hold reference on the vnode if * there is at least one resident non-cached page, the vnode cannot @@ -3173,7 +3173,7 @@ _vdrop(struct vnode *vp, bool locked) VI_UNLOCK(vp); return; } - if ((vp->v_iflag & VI_DOOMED) == 0) { + if (!VN_IS_DOOMED(vp)) { /* * Mark a vnode as free: remove it from its active list * and put it up for recycling on the freelist. @@ -3269,6 +3269,7 @@ _vdrop(struct vnode *vp, bool locked) vp->v_rdev = NULL; vp->v_fifoinfo = NULL; vp->v_lasta = vp->v_clen = vp->v_cstart = vp->v_lastw = 0; + vp->v_irflag = 0; vp->v_iflag = 0; vp->v_vflag = 0; bo->bo_flag = 0; @@ -3584,9 +3585,9 @@ vgonel(struct vnode *vp) /* * Don't vgonel if we're already doomed. */ - if (vp->v_iflag & VI_DOOMED) + if (vp->v_irflag & VIRF_DOOMED) return; - vp->v_iflag |= VI_DOOMED; + vp->v_irflag |= VIRF_DOOMED; /* * Check to see if the vnode is in use. If so, we have to call @@ -3768,8 +3769,6 @@ vn_printf(struct vnode *vp, const char *fmt, ...) } if (vp->v_iflag & VI_MOUNT) strlcat(buf, "|VI_MOUNT", sizeof(buf)); - if (vp->v_iflag & VI_DOOMED) - strlcat(buf, "|VI_DOOMED", sizeof(buf)); if (vp->v_iflag & VI_FREE) strlcat(buf, "|VI_FREE", sizeof(buf)); if (vp->v_iflag & VI_ACTIVE) @@ -3780,12 +3779,19 @@ vn_printf(struct vnode *vp, const char *fmt, ...) strlcat(buf, "|VI_OWEINACT", sizeof(buf)); if (vp->v_iflag & VI_TEXT_REF) strlcat(buf, "|VI_TEXT_REF", sizeof(buf)); - flags = vp->v_iflag & ~(VI_MOUNT | VI_DOOMED | VI_FREE | - VI_ACTIVE | VI_DOINGINACT | VI_OWEINACT | VI_TEXT_REF); + flags = vp->v_iflag & ~(VI_MOUNT | VI_FREE | VI_ACTIVE | VI_DOINGINACT | + VI_OWEINACT | VI_TEXT_REF); if (flags != 0) { snprintf(buf2, sizeof(buf2), "|VI(0x%lx)", flags); strlcat(buf, buf2, sizeof(buf)); } + if (vp->v_irflag & VIRF_DOOMED) + strlcat(buf, "|VIRF_DOOMED", sizeof(buf)); + flags = vp->v_irflag & ~(VIRF_DOOMED); + if (flags != 0) { + snprintf(buf2, sizeof(buf2), "|VIRF(0x%lx)", flags); + strlcat(buf, buf2, sizeof(buf)); + } printf(" flags (%s)\n", buf + 1); if (mtx_owned(VI_MTX(vp))) printf(" VI_LOCKed"); @@ -5199,7 +5205,7 @@ vop_close_post(void *ap, int rc) struct vop_close_args *a = ap; if (!rc && (a->a_cred != NOCRED || /* filter out revokes */ - (a->a_vp->v_iflag & VI_DOOMED) == 0)) { + !VN_IS_DOOMED(a->a_vp))) { VFS_KNOTE_LOCKED(a->a_vp, (a->a_fflag & FWRITE) != 0 ? NOTE_CLOSE_WRITE : NOTE_CLOSE); } @@ -5668,7 +5674,7 @@ vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) MNT_ILOCK(mp); vp = mp->mnt_rootvnode; if (vp != NULL) { - if ((vp->v_iflag & VI_DOOMED) == 0) { + if (!VN_IS_DOOMED(vp)) { vrefact(vp); MNT_IUNLOCK(mp); error = vn_lock(vp, flags); @@ -5708,7 +5714,7 @@ vfs_cache_root_fallback(struct mount *mp, int flags, struct vnode **vpp) mp->mnt_rootvnode = *vpp; } else { if (mp->mnt_rootvnode != *vpp) { - if ((mp->mnt_rootvnode->v_iflag & VI_DOOMED) == 0) { + if (!VN_IS_DOOMED(mp->mnt_rootvnode)) { panic("%s: mismatch between vnode returned " " by VFS_CACHEDROOT and the one cached " " (%p != %p)", @@ -5730,7 +5736,7 @@ vfs_cache_root(struct mount *mp, int flags, struct vnode **vpp) if (!vfs_op_thread_enter(mp)) return (vfs_cache_root_fallback(mp, flags, vpp)); vp = (struct vnode *)atomic_load_ptr(&mp->mnt_rootvnode); - if (vp == NULL || (vp->v_iflag & VI_DOOMED)) { + if (vp == NULL || VN_IS_DOOMED(vp)) { vfs_op_thread_exit(mp); return (vfs_cache_root_fallback(mp, flags, vpp)); } @@ -5788,11 +5794,11 @@ __mnt_vnode_next_all(struct vnode **mvp, struct mount *mp) KASSERT((*mvp)->v_mount == mp, ("marker vnode mount list mismatch")); for (vp = TAILQ_NEXT(*mvp, v_nmntvnodes); vp != NULL; vp = TAILQ_NEXT(vp, v_nmntvnodes)) { - /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ - if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) + /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ + if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) continue; VI_LOCK(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); continue; } @@ -5822,11 +5828,11 @@ __mnt_vnode_first_all(struct vnode **mvp, struct mount *mp) (*mvp)->v_type = VMARKER; TAILQ_FOREACH(vp, &mp->mnt_nvnodelist, v_nmntvnodes) { - /* Allow a racy peek at VI_DOOMED to save a lock acquisition. */ - if (vp->v_type == VMARKER || (vp->v_iflag & VI_DOOMED) != 0) + /* Allow a racy peek at VIRF_DOOMED to save a lock acquisition. */ + if (vp->v_type == VMARKER || VN_IS_DOOMED(vp)) continue; VI_LOCK(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); continue; } @@ -5983,7 +5989,7 @@ mnt_vnode_next_active(struct vnode **mvp, struct mount *mp) KASSERT(vp->v_type != VMARKER, ("locked marker %p", vp)); KASSERT(vp->v_mount == mp || vp->v_mount == NULL, ("alien vnode on the active list %p %p", vp, mp)); - if (vp->v_mount == mp && (vp->v_iflag & VI_DOOMED) == 0) + if (vp->v_mount == mp && !VN_IS_DOOMED(vp)) break; nvp = TAILQ_NEXT(vp, v_actfreelist); VI_UNLOCK(vp); diff --git a/sys/kern/vfs_syscalls.c b/sys/kern/vfs_syscalls.c index 64aef684c665..3f8d04fce74d 100644 --- a/sys/kern/vfs_syscalls.c +++ b/sys/kern/vfs_syscalls.c @@ -1820,7 +1820,7 @@ kern_funlinkat(struct thread *td, int dfd, const char *path, int fd, sb.st_ino != oldinum) { error = EIDRM; /* Identifier removed */ } else if (fp != NULL && fp->f_vnode != vp) { - if ((fp->f_vnode->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(fp->f_vnode)) error = EBADF; else error = EDEADLK; @@ -3779,7 +3779,7 @@ kern_frmdirat(struct thread *td, int dfd, const char *path, int fd, } if (fp != NULL && fp->f_vnode != vp) { - if ((fp->f_vnode->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(fp->f_vnode)) error = EBADF; else error = EDEADLK; diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c index c69010dd9995..73308bd26fea 100644 --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -328,7 +328,7 @@ vn_open_vnode_advlock(struct vnode *vp, int fmode, struct file *fp) fp->f_flag |= FHASLOCK; vn_lock(vp, lock_flags | LK_RETRY); - if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) + if (error == 0 && VN_IS_DOOMED(vp)) error = ENOENT; return (error); } @@ -1579,7 +1579,7 @@ _vn_lock(struct vnode *vp, int flags, char *file, int line) ("vn_lock: error %d incompatible with flags %#x", error, flags)); if ((flags & LK_RETRY) == 0) { - if (error == 0 && (vp->v_iflag & VI_DOOMED) != 0) { + if (error == 0 && VN_IS_DOOMED(vp)) { VOP_UNLOCK(vp, 0); error = ENOENT; } @@ -2132,7 +2132,7 @@ vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, vfs_rel(mp); if (error != 0) return (ENOENT); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { vfs_unbusy(mp); return (ENOENT); } @@ -2142,7 +2142,7 @@ vn_vget_ino_gen(struct vnode *vp, vn_get_ino_t alloc, void *alloc_arg, vfs_unbusy(mp); if (error != 0 || *rvp != vp) vn_lock(vp, ltype | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { if (error == 0) { if (*rvp == vp) vunref(vp); diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index b9a8d5458a0c..3b79009c1585 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -103,7 +103,8 @@ struct vnode { * Fields which define the identity of the vnode. These fields are * owned by the filesystem (XXX: and vgone() ?) */ - enum vtype v_type; /* u vnode type */ + enum vtype v_type:8; /* u vnode type */ + short v_irflag; /* i frequently read flags */ struct vop_vector *v_op; /* u vnode operations vector */ void *v_data; /* u private data for fs */ @@ -231,12 +232,13 @@ struct xvnode { * VI flags are protected by interlock and live in v_iflag * VV flags are protected by the vnode lock and live in v_vflag * - * VI_DOOMED is doubly protected by the interlock and vnode lock. Both + * VIRF_DOOMED is doubly protected by the interlock and vnode lock. Both * are required for writing but the status may be checked with either. */ +#define VIRF_DOOMED 0x0001 /* This vnode is being recycled */ + #define VI_TEXT_REF 0x0001 /* Text ref grabbed use ref */ #define VI_MOUNT 0x0020 /* Mount in progress */ -#define VI_DOOMED 0x0080 /* This vnode is being recycled */ #define VI_FREE 0x0100 /* This vnode is on the freelist */ #define VI_ACTIVE 0x0200 /* This vnode is on the active list */ #define VI_DOINGINACT 0x0800 /* VOP_INACTIVE is in progress */ @@ -889,6 +891,8 @@ do { \ #define VOP_UNSET_TEXT_CHECKED(vp) VOP_UNSET_TEXT((vp)) #endif +#define VN_IS_DOOMED(vp) ((vp)->v_irflag & VIRF_DOOMED) + void vput(struct vnode *vp); void vrele(struct vnode *vp); void vref(struct vnode *vp); diff --git a/sys/ufs/ffs/ffs_inode.c b/sys/ufs/ffs/ffs_inode.c index c504b263536d..55130de7b3c7 100644 --- a/sys/ufs/ffs/ffs_inode.c +++ b/sys/ufs/ffs/ffs_inode.c @@ -124,7 +124,7 @@ ffs_update(vp, waitfor) * * Hold a reference to the vnode to protect against * ffs_snapgone(). Since we hold a reference, it can only - * get reclaimed (VI_DOOMED flag) in a forcible downgrade + * get reclaimed (VIRF_DOOMED flag) in a forcible downgrade * or unmount. For an unmount, the entire filesystem will be * gone, so we cannot attempt to touch anything associated * with it while the vnode is unlocked; all we can do is @@ -137,7 +137,7 @@ ffs_update(vp, waitfor) pause("ffsupd", 1); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); vrele(vp); - if ((vp->v_iflag & VI_DOOMED) != 0) + if (VN_IS_DOOMED(vp)) return (ENOENT); goto loop; } diff --git a/sys/ufs/ffs/ffs_rawread.c b/sys/ufs/ffs/ffs_rawread.c index 200fe1a9b4e5..0ecb38726559 100644 --- a/sys/ufs/ffs/ffs_rawread.c +++ b/sys/ufs/ffs/ffs_rawread.c @@ -131,7 +131,7 @@ ffs_rawread_sync(struct vnode *vp) VI_LOCK(vp); /* Check if vnode was reclaimed while unlocked. */ - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { VI_UNLOCK(vp); if (upgraded != 0) VOP_LOCK(vp, LK_DOWNGRADE); diff --git a/sys/ufs/ffs/ffs_softdep.c b/sys/ufs/ffs/ffs_softdep.c index 6c0f4bae5750..6854aa69b5d8 100644 --- a/sys/ufs/ffs/ffs_softdep.c +++ b/sys/ufs/ffs/ffs_softdep.c @@ -12511,7 +12511,7 @@ softdep_fsync(vp) * not now, but then the user was not asking to have it * written, so we are not breaking any promises. */ - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) break; /* * We prevent deadlock by always fetching inodes from the @@ -12532,7 +12532,7 @@ softdep_fsync(vp) error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE, &pvp, FFSV_FORCEINSMQ); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vp)) { if (error == 0) vput(pvp); error = ENOENT; diff --git a/sys/ufs/ufs/ufs_inode.c b/sys/ufs/ufs/ufs_inode.c index 8fd54b384d3b..53c259f07f40 100644 --- a/sys/ufs/ufs/ufs_inode.c +++ b/sys/ufs/ufs/ufs_inode.c @@ -102,7 +102,7 @@ ufs_inactive(ap) loop: if (vn_start_secondary_write(vp, &mp, V_NOWAIT) != 0) { /* Cannot delete file while file system is suspended */ - if ((vp->v_iflag & VI_DOOMED) != 0) { + if (VN_IS_DOOMED(vp)) { /* Cannot return before file is deleted */ (void) vn_start_secondary_write(vp, &mp, V_WAIT); diff --git a/sys/ufs/ufs/ufs_lookup.c b/sys/ufs/ufs/ufs_lookup.c index ec0a328988fe..d18b5f3a891c 100644 --- a/sys/ufs/ufs/ufs_lookup.c +++ b/sys/ufs/ufs/ufs_lookup.c @@ -728,7 +728,7 @@ ufs_lookup_ino(struct vnode *vdp, struct vnode **vpp, struct componentname *cnp, * Relock for the "." case may left us with * reclaimed vnode. */ - if (vdp->v_iflag & VI_DOOMED) { + if (VN_IS_DOOMED(vdp)) { vrele(vdp); return (ENOENT); } diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index 98b8bf5e62ac..d8b4ee49e55d 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -2917,7 +2917,7 @@ swapongeom(struct vnode *vp) int error; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); - if (vp->v_type != VCHR || (vp->v_iflag & VI_DOOMED) != 0) { + if (vp->v_type != VCHR || VN_IS_DOOMED(vp)) { error = ENOENT; } else { g_topology_lock(); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index d8c7e2f5eea8..c57aa8d72e09 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -374,7 +374,7 @@ vnode_pager_haspage(vm_object_t object, vm_pindex_t pindex, int *before, * If no vp or vp is doomed or marked transparent to VM, we do not * have the page. */ - if (vp == NULL || vp->v_iflag & VI_DOOMED) + if (vp == NULL || VN_IS_DOOMED(vp)) return FALSE; /* * If the offset is beyond end of file we do @@ -553,7 +553,7 @@ vnode_pager_addr(struct vnode *vp, vm_ooffset_t address, daddr_t *rtaddress, if (address < 0) return -1; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return -1; bsize = vp->v_mount->mnt_stat.f_iosize; @@ -591,7 +591,7 @@ vnode_pager_input_smlfs(vm_object_t object, vm_page_t m) error = 0; vp = object->handle; - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return VM_PAGER_BAD; bsize = vp->v_mount->mnt_stat.f_iosize; @@ -815,7 +815,7 @@ vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m, int count, KASSERT(vp->v_type != VCHR && vp->v_type != VBLK, ("%s does not support devices", __func__)); - if (vp->v_iflag & VI_DOOMED) + if (VN_IS_DOOMED(vp)) return (VM_PAGER_BAD); object = vp->v_object;