diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 1b10623..66b05fc 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -47,6 +47,7 @@ __FBSDID("$FreeBSD$"); #endif #include #include +#include #include @@ -64,6 +65,12 @@ CTASSERT(((LK_ADAPTIVE | LK_NOSHARE) & LO_CLASSFLAGS) == CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); +SDT_PROVIDER_DECLARE(vfs); +SDT_PROBE_DEFINE0(vfs, , lockmgr, lock_fallback); +SDT_PROBE_DEFINE0(vfs, , lockmgr, lock_fastpath); +SDT_PROBE_DEFINE0(vfs, , lockmgr, unlock_fallback); +SDT_PROBE_DEFINE0(vfs, , lockmgr, unlock_fastpath); + #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 @@ -449,6 +456,122 @@ lockdestroy(struct lock *lk) } int +__lockmgr_lock_fast_path(struct lock *lk, u_int flags, struct lock_object *ilk, + const char *file, int line) +{ + struct lock_class *class; + uintptr_t x, v, tid; + u_int op; + + op = flags & LK_TYPE_MASK; + switch (op) { + case LK_SHARED: + if (LK_CAN_WITNESS(flags)) + WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER, + file, line, flags & LK_INTERLOCK ? ilk : NULL); + for (;;) { + x = lk->lk_lock; + + if (LK_CAN_SHARE(x, flags)) { + if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, + x + LK_ONE_SHARER)) + goto out_got_lock; + continue; + } + break; + } + break; + case LK_EXCLUSIVE: + if (LK_CAN_WITNESS(flags)) + WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | + LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? + ilk : NULL); + tid = (uintptr_t)curthread; + if (lk->lk_lock == LK_UNLOCKED && + atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) + goto out_got_lock; + break; + case LK_UPGRADE: + case LK_TRYUPGRADE: + _lockmgr_assert(lk, KA_SLOCKED, file, line); + tid = (uintptr_t)curthread; + v = lk->lk_lock; + x = v & LK_ALL_WAITERS; + v &= LK_EXCLUSIVE_SPINNERS; + if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x | v, + tid | x)) { + LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, + line); + WITNESS_UPGRADE(&lk->lock_object, LOP_EXCLUSIVE | + LK_TRYWIT(flags), file, line); + TD_SLOCKS_DEC(curthread); + goto out_got_lock; + } + break; + default: + break; + } + SDT_PROBE0(vfs, , lockmgr, lock_fallback); + return (__lockmgr_args(lk, flags, ilk, LK_WMESG_DEFAULT, + LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, file, line)); +out_got_lock: + if (__predict_false(flags & LK_INTERLOCK)) { + class = LOCK_CLASS(ilk); + class->lc_unlock(ilk); + } + SDT_PROBE0(vfs, , lockmgr, lock_fastpath); + return (0); +} + +int +__lockmgr_unlock_fast_path(struct lock *lk, u_int flags, + struct lock_object *ilk) +{ + struct lock_class *class; + uintptr_t x, tid; + + _lockmgr_assert(lk, KA_LOCKED, __FILE__, __LINE__); + x = lk->lk_lock; + if ((x & LK_SHARE) == 0) { + tid = (uintptr_t)curthread; + if (!lockmgr_recursed(lk) && + atomic_cmpset_rel_ptr(&lk->lk_lock, tid, LK_UNLOCKED)) + goto out_success; + } else { + for (;;) { + if (LK_SHARERS(x) > 1) { + if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, + x - LK_ONE_SHARER)) + goto out_success; + x = lk->lk_lock; + continue; + } + + if ((x & LK_ALL_WAITERS) == 0) { + MPASS((x & ~LK_EXCLUSIVE_SPINNERS) == + LK_SHARERS_LOCK(1)); + if (atomic_cmpset_rel_ptr(&lk->lk_lock, x, + LK_UNLOCKED)) + goto out_success; + x = lk->lk_lock; + continue; + } + break; + } + } + SDT_PROBE0(vfs, , lockmgr, unlock_fallback); + return (__lockmgr_args(lk, flags | LK_RELEASE, ilk, LK_WMESG_DEFAULT, + LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE)); +out_success: + if (__predict_false(flags & LK_INTERLOCK)) { + class = LOCK_CLASS(ilk); + class->lc_unlock(ilk); + } + SDT_PROBE0(vfs, , lockmgr, unlock_fastpath); + return (0); +} + +int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, const char *wmesg, int pri, int timo, const char *file, int line) { diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c index 1e910f8..9dac4ad 100644 --- a/sys/kern/vfs_default.c +++ b/sys/kern/vfs_default.c @@ -54,6 +54,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include @@ -507,6 +508,10 @@ vop_stdpathconf(ap) /* NOTREACHED */ } +static u_int vfs_lockmgr_fast_path __read_mostly; +SYSCTL_UINT(_vfs, OID_AUTO, vfs_lockmgr_fast_path, CTLFLAG_RW, &vfs_lockmgr_fast_path, + 0, ""); + /* * Standard lock, unlock and islocked functions. */ @@ -520,10 +525,17 @@ vop_stdlock(ap) } */ *ap; { struct vnode *vp = ap->a_vp; - - return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), - LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, - ap->a_line)); + struct mtx *ilk; + + if (!vfs_lockmgr_fast_path) + return (_lockmgr_args(vp->v_vnlock, ap->a_flags, VI_MTX(vp), + LK_WMESG_DEFAULT, LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, ap->a_file, + ap->a_line)); + else { + ilk = VI_MTX(vp); + return (__lockmgr_lock_fast_path(vp->v_vnlock, ap->a_flags, + (ilk != NULL) ? &ilk->lock_object : NULL, ap->a_file, ap->a_line)); + } } /* See above. */ @@ -535,8 +547,16 @@ vop_stdunlock(ap) } */ *ap; { struct vnode *vp = ap->a_vp; - - return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, VI_MTX(vp))); + struct mtx *ilk; + + if (!vfs_lockmgr_fast_path) + return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE, + VI_MTX(vp))); + else { + ilk = VI_MTX(vp); + return (__lockmgr_unlock_fast_path(vp->v_vnlock, ap->a_flags, + (ilk != NULL) ? &ilk->lock_object : NULL)); + } } /* See above. */ diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 3b88c5c..dd36cdb 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -143,13 +143,6 @@ static u_int vfs_optional_inactive; SYSCTL_UINT(_vfs, OID_AUTO, vfs_optional_inactive, CTLFLAG_RW, &vfs_optional_inactive, 0, "is inactive optional?"); -static u_int vfs_inactive_avoided; -SYSCTL_UINT(_vfs, OID_AUTO, vfs_inactive_avoided, CTLFLAG_RW, &vfs_inactive_avoided, - 0, ""); - -static u_int vfs_inactive_done; -SYSCTL_UINT(_vfs, OID_AUTO, vfs_inactive_done, CTLFLAG_RW, &vfs_inactive_done, - 0, ""); /* * Conversion tables for conversion from vnode types to inode formats @@ -2797,10 +2790,8 @@ vputx(struct vnode *vp, int func) if (func == VPUTX_VPUT) VOP_UNLOCK(vp, 0); vdropl(vp); - vfs_inactive_avoided++; return; } - vfs_inactive_done++; } /* diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c index 5479453..643fbdf 100644 --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -419,18 +419,10 @@ vn_writechk(vp) return (0); } -static u_int vfs_optional_close; +static u_int vfs_optional_close __read_mostly; SYSCTL_UINT(_vfs, OID_AUTO, vfs_optional_close, CTLFLAG_RW, &vfs_optional_close, 0, "is close optional?"); -static u_int vfs_close_avoided; -SYSCTL_UINT(_vfs, OID_AUTO, vfs_close_avoided, CTLFLAG_RW, &vfs_close_avoided, - 0, ""); - -static u_int vfs_close_done; -SYSCTL_UINT(_vfs, OID_AUTO, vfs_close_done, CTLFLAG_RW, &vfs_close_done, - 0, ""); - /* * Vnode close call */ @@ -454,11 +446,9 @@ vn_close(vp, flags, file_cred, td) if ((flags & FWRITE) == 0 && vp->v_op->vop_want != NULL) { if (!VOP_WANT(vp, td, VFS_WANT_CLOSE)) { vrele(vp); - vfs_close_avoided++; return (0); } } - vfs_close_done++; } vn_start_write(vp, &mp, V_WAIT); diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h index 3e08b0c..d07dffe 100644 --- a/sys/sys/lockmgr.h +++ b/sys/sys/lockmgr.h @@ -68,6 +68,10 @@ struct thread; */ int __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, const char *wmesg, int prio, int timo, const char *file, int line); +int __lockmgr_lock_fast_path(struct lock *lk, u_int flags, + struct lock_object *ilk, const char *file, int line); +int __lockmgr_unlock_fast_path(struct lock *lk, u_int flags, + struct lock_object *ilk); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void _lockmgr_assert(const struct lock *lk, int what, const char *file, int line); #endif