diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 87328c1..245d00e 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -2149,37 +2151,44 @@ vrefcnt(struct vnode *vp) return (usecnt); } +#define VPUTX_VRELE 1 +#define VPUTX_VPUT 2 +#define VPUTX_VUNREF 3 -/* - * Vnode put/release. - * If count drops to zero, call inactive routine and return to freelist. - */ -void -vrele(struct vnode *vp) +static void +vputx(struct vnode *vp, int func) { - struct thread *td = curthread; /* XXX */ + int error; - KASSERT(vp != NULL, ("vrele: null vp")); + KASSERT(vp != NULL, ("vputx: null vp")); + if (func == VPUTX_VUNREF) + ASSERT_VOP_ELOCKED(vp, "vunref"); + else if (func == VPUTX_VPUT) + ASSERT_VOP_LOCKED(vp, "vput"); + else + KASSERT(func == VPUTX_VRELE, ("vputx: wrong func")); VFS_ASSERT_GIANT(vp->v_mount); - + CTR2(KTR_VFS, "%s: vp %p", __func__, vp); VI_LOCK(vp); /* Skip this v_writecount check if we're going to panic below. */ VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, - ("vrele: missed vn_close")); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); + ("vputx: missed vn_close")); + error = 0; if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && vp->v_usecount == 1)) { + if (func == VPUTX_VPUT) + VOP_UNLOCK(vp, 0); v_decr_usecount(vp); return; } + if (vp->v_usecount != 1) { #ifdef DIAGNOSTIC - vprint("vrele: negative ref count", vp); + vprint("vputx: negative ref count", vp); #endif - VI_UNLOCK(vp); - panic("vrele: negative ref cnt"); + panic("vputx: negative ref cnt"); } CTR2(KTR_VFS, "%s: return vnode %p to the freelist", __func__, vp); /* @@ -2193,22 +2202,36 @@ vrele(struct vnode *vp) * as VI_DOINGINACT to avoid recursion. */ vp->v_iflag |= VI_OWEINACT; - if (vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK) == 0) { + if (func == VPUTX_VRELE) { + error = vn_lock(vp, LK_EXCLUSIVE | LK_INTERLOCK); VI_LOCK(vp); - if (vp->v_usecount > 0) - vp->v_iflag &= ~VI_OWEINACT; - if (vp->v_iflag & VI_OWEINACT) - vinactive(vp, td); - VOP_UNLOCK(vp, 0); - } else { + } else if (func == VPUTX_VPUT && VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { + error = VOP_LOCK(vp, LK_UPGRADE | LK_INTERLOCK | LK_NOWAIT); VI_LOCK(vp); - if (vp->v_usecount > 0) - vp->v_iflag &= ~VI_OWEINACT; + } + if (vp->v_usecount > 0) + vp->v_iflag &= ~VI_OWEINACT; + if (error == 0) { + if (vp->v_iflag & VI_OWEINACT) + vinactive(vp, curthread); + if (func != VPUTX_VUNREF) + VOP_UNLOCK(vp, 0); } vdropl(vp); } /* + * Vnode put/release. + * If count drops to zero, call inactive routine and return to freelist. + */ +void +vrele(struct vnode *vp) +{ + + vputx(vp, VPUTX_VRELE); +} + +/* * Release an already locked vnode. This give the same effects as * unlock+vrele(), but takes less time and avoids releasing and * re-aquiring the lock (as vrele() acquires the lock internally.) @@ -2216,56 +2239,18 @@ vrele(struct vnode *vp) void vput(struct vnode *vp) { - struct thread *td = curthread; /* XXX */ - int error; - KASSERT(vp != NULL, ("vput: null vp")); - ASSERT_VOP_LOCKED(vp, "vput"); - VFS_ASSERT_GIANT(vp->v_mount); - CTR2(KTR_VFS, "%s: vp %p", __func__, vp); - VI_LOCK(vp); - /* Skip this v_writecount check if we're going to panic below. */ - VNASSERT(vp->v_writecount < vp->v_usecount || vp->v_usecount < 1, vp, - ("vput: missed vn_close")); - error = 0; + vputx(vp, VPUTX_VPUT); +} - if (vp->v_usecount > 1 || ((vp->v_iflag & VI_DOINGINACT) && - vp->v_usecount == 1)) { - VOP_UNLOCK(vp, 0); - v_decr_usecount(vp); - return; - } +/* + * Release an exclusively locked vnode. Do not unlock the vnode lock. + */ +void +vunref(struct vnode *vp) +{ - if (vp->v_usecount != 1) { -#ifdef DIAGNOSTIC - vprint("vput: negative ref count", vp); -#endif - panic("vput: negative ref cnt"); - } - CTR2(KTR_VFS, "%s: return to freelist the vnode %p", __func__, vp); - /* - * We want to hold the vnode until the inactive finishes to - * prevent vgone() races. We drop the use count here and the - * hold count below when we're done. - */ - v_decr_useonly(vp); - vp->v_iflag |= VI_OWEINACT; - if (VOP_ISLOCKED(vp) != LK_EXCLUSIVE) { - error = VOP_LOCK(vp, LK_UPGRADE|LK_INTERLOCK|LK_NOWAIT); - VI_LOCK(vp); - if (error) { - if (vp->v_usecount > 0) - vp->v_iflag &= ~VI_OWEINACT; - goto done; - } - } - if (vp->v_usecount > 0) - vp->v_iflag &= ~VI_OWEINACT; - if (vp->v_iflag & VI_OWEINACT) - vinactive(vp, td); - VOP_UNLOCK(vp, 0); -done: - vdropl(vp); + vputx(vp, VPUTX_VUNREF); } /* diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index fc919a9..6ed52ef 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -632,6 +634,7 @@ void vholdl(struct vnode *); int vinvalbuf(struct vnode *vp, int save, int slpflag, int slptimeo); int vtruncbuf(struct vnode *vp, struct ucred *cred, struct thread *td, off_t length, int blksize); +void vunref(struct vnode *); void vn_printf(struct vnode *vp, const char *fmt, ...) __printflike(2,3); #define vprint(label, vp) vn_printf((vp), "%s\n", (label)) int vrecycle(struct vnode *vp, struct thread *td); diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 19edce1..8f72142 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -951,6 +951,8 @@ rescan0: vnodes_skipped++; goto unlock_and_continue; } + KASSERT(mp != NULL, + ("vp %p with NULL v_mount", vp)); vm_page_unlock_queues(); vm_object_reference_locked(object); VM_OBJECT_UNLOCK(object); diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index faa6f37..37fbe64 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -250,13 +250,16 @@ static void vnode_pager_dealloc(object) vm_object_t object; { - struct vnode *vp = object->handle; + struct vnode *vp; + int refs; + vp = object->handle; if (vp == NULL) panic("vnode_pager_dealloc: pager already dealloced"); VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); vm_object_pip_wait(object, "vnpdea"); + refs = object->ref_count; object->handle = NULL; object->type = OBJT_DEAD; @@ -267,6 +270,8 @@ vnode_pager_dealloc(object) ASSERT_VOP_ELOCKED(vp, "vnode_pager_dealloc"); vp->v_object = NULL; vp->v_vflag &= ~VV_TEXT; + while (refs-- > 0) + vunref(vp); } static boolean_t