diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 18a0053..7ad6507 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -402,16 +402,21 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) rval = 1; recursed = false; v = MTX_UNOWNED; - if (!_mtx_obtain_lock_fetch(m, &v, tid)) { + for (;;) { + if (_mtx_obtain_lock_fetch(m, &v, tid)) + break; + if (v == MTX_UNOWNED) + continue; if (v == tid && ((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || (opts & MTX_RECURSE) != 0)) { - m->mtx_recurse++; - atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); - recursed = true; - } else { - rval = 0; + m->mtx_recurse++; + atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); + recursed = true; + break; } + rval = 0; + break; } opts &= ~MTX_RECURSE; diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index d2310ed..2e70321 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -314,13 +314,18 @@ __rw_try_wlock(volatile uintptr_t *c, const char *file, int line) rval = 1; recursed = false; v = RW_UNLOCKED; - if (!atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) { + for (;;) { + if (atomic_fcmpset_acq_ptr(&rw->rw_lock, &v, tid)) + break; + if (v == RW_UNLOCKED) + continue; if (v == tid && (rw->lock_object.lo_flags & LO_RECURSABLE)) { rw->rw_recurse++; atomic_set_ptr(&rw->rw_lock, RW_LOCK_WRITER_RECURSED); - } else { - rval = 0; + break; } + rval = 0; + break; } LOCK_LOG_TRY("WLOCK", &rw->lock_object, 0, rval, file, line); diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index d7f3f13..3236e03 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -341,13 +341,18 @@ sx_try_xlock_(struct sx *sx, const char *file, int line) rval = 1; recursed = false; x = SX_LOCK_UNLOCKED; - if (!atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) { + for (;;) { + if (atomic_fcmpset_acq_ptr(&sx->sx_lock, &x, tid)) + break; + if (x == SX_LOCK_UNLOCKED) + continue; if (x == tid && (sx->lock_object.lo_flags & LO_RECURSABLE)) { sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); - } else { - rval = 0; + break; } + rval = 0; + break; } LOCK_LOG_TRY("XLOCK", &sx->lock_object, 0, rval, file, line); diff --git a/sys/kern/vfs_cache.c b/sys/kern/vfs_cache.c index 103e0a1..6cf8f6f 100644 --- a/sys/kern/vfs_cache.c +++ b/sys/kern/vfs_cache.c @@ -395,7 +395,7 @@ static int cache_yield; SYSCTL_INT(_vfs_cache, OID_AUTO, yield, CTLFLAG_RD, &cache_yield, 0, "Number of times cache called yield"); -static void +static void __noinline cache_maybe_yield(void) { @@ -956,7 +956,7 @@ out_relock: return (false); } -static int +static int __noinline cache_zap_locked_vnode(struct namecache *ncp, struct vnode *vp) { struct mtx *pvlp, *vlp1, *vlp2, *to_unlock; @@ -995,7 +995,7 @@ out: return (error); } -static int +static int __noinline cache_zap_rlocked_bucket(struct namecache *ncp, struct rwlock *blp) { struct mtx *dvlp, *vlp; @@ -1280,7 +1280,7 @@ success: return (ENOENT); } } - if (error) { + if (__predict_false(error != 0)) { *vpp = NULL; goto retry; } @@ -1299,7 +1299,7 @@ zap_and_exit: error = cache_zap_rlocked_bucket(ncp, blp); else error = cache_zap_locked_vnode(ncp, dvp); - if (error != 0) { + if (__predict_false(error != 0)) { zap_and_exit_bucket_fail++; cache_maybe_yield(); goto retry;