diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index 2ca1781..cd98c2b 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -455,12 +455,11 @@ _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) * sleep waiting for it), or if we need to recurse on it. */ void -__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, +__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, int opts, const char *file, int line) { struct mtx *m; struct turnstile *ts; - uintptr_t v; #ifdef ADAPTIVE_MUTEXES volatile struct thread *owner; #endif @@ -489,7 +488,6 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, lock_delay_arg_init(&lda, NULL); #endif m = mtxlock2mtx(c); - v = MTX_READ_VALUE(m); if (__predict_false(lv_mtx_owner(v) == (struct thread *)tid)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0 || @@ -520,9 +518,8 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, for (;;) { if (v == MTX_UNOWNED) { - if (_mtx_obtain_lock(m, tid)) + if (_mtx_obtain_lock_fetch(m, &v, tid)) break; - v = MTX_READ_VALUE(m); continue; } #ifdef KDTRACE_HOOKS @@ -674,12 +671,11 @@ _mtx_lock_spin_failed(struct mtx *m) * is handled inline. */ void -_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, - const char *file, int line) +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, + int opts, const char *file, int line) { struct mtx *m; struct lock_delay_arg lda; - uintptr_t v; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; @@ -706,12 +702,10 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, #ifdef KDTRACE_HOOKS spin_time -= lockstat_nsecs(&m->lock_object); #endif - v = MTX_READ_VALUE(m); for (;;) { if (v == MTX_UNOWNED) { - if (_mtx_obtain_lock(m, tid)) + if (_mtx_obtain_lock_fetch(m, &v, tid)) break; - v = MTX_READ_VALUE(m); continue; } /* Give interrupts a chance while we spin. */ @@ -892,7 +886,8 @@ thread_lock_set(struct thread *td, struct mtx *new) * need to wake up a blocked thread). */ void -__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) +__mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, + const char *file, int line) { struct mtx *m; struct turnstile *ts; @@ -902,6 +897,8 @@ __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) m = mtxlock2mtx(c); + v = MTX_READ_VALUE(m); + if (mtx_recursed(m)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); @@ -910,6 +907,11 @@ __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) return; } + if (v == (uintptr_t)curthread) { + if (_mtx_release_lock(m, (uintptr_t)curthread)) + return; + } + /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. diff --git a/sys/sys/lockstat.h b/sys/sys/lockstat.h index e550353..3b235d2 100644 --- a/sys/sys/lockstat.h +++ b/sys/sys/lockstat.h @@ -107,6 +107,8 @@ extern int lockstat_enabled; LOCKSTAT_RECORD1(probe, lp, a); \ } while (0) +#define LOCKSTAT_PROFILE_ENABLED(probe) SDT_PROBE_ENABLED(lockstat, , , probe) + struct lock_object; uint64_t lockstat_nsecs(struct lock_object *); @@ -130,6 +132,8 @@ uint64_t lockstat_nsecs(struct lock_object *); #define LOCKSTAT_PROFILE_RELEASE_RWLOCK(probe, lp, a) \ LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) +#define LOCKSTAT_PROFILE_ENABLED(probe) 0 + #endif /* !KDTRACE_HOOKS */ #endif /* _KERNEL */ #endif /* _SYS_LOCKSTAT_H */ diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index 11ed9d7..4d11481 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -98,20 +98,20 @@ void mtx_sysinit(void *arg); int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line); void mutex_init(void); -void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, +void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t v, uintptr_t tid, + int opts, const char *file, int line); +void __mtx_unlock_sleep(volatile uintptr_t *c, uintptr_t v, int opts, const char *file, int line); -void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, - int line); #ifdef SMP -void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, - const char *file, int line); +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t v, + uintptr_t tid, int opts, const char *file, int line); #endif void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line); -void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, - int line); +void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, + const char *file, int line); int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, @@ -140,13 +140,13 @@ void thread_lock_flags_(struct thread *, int, const char *, int); _mtx_destroy(&(m)->mtx_lock) #define mtx_trylock_flags_(m, o, f, l) \ _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) -#define _mtx_lock_sleep(m, t, o, f, l) \ - __mtx_lock_sleep(&(m)->mtx_lock, t, o, f, l) -#define _mtx_unlock_sleep(m, o, f, l) \ - __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) +#define _mtx_lock_sleep(m, v, t, o, f, l) \ + __mtx_lock_sleep(&(m)->mtx_lock, v, t, o, f, l) +#define _mtx_unlock_sleep(m, v, o, f, l) \ + __mtx_unlock_sleep(&(m)->mtx_lock, v, o, f, l) #ifdef SMP -#define _mtx_lock_spin(m, t, o, f, l) \ - _mtx_lock_spin_cookie(&(m)->mtx_lock, t, o, f, l) +#define _mtx_lock_spin(m, v, t, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, v, t, o, f, l) #endif #define _mtx_lock_flags(m, o, f, l) \ __mtx_lock_flags(&(m)->mtx_lock, o, f, l) @@ -171,10 +171,20 @@ void thread_lock_flags_(struct thread *, int, const char *, int); #define _mtx_obtain_lock(mp, tid) \ atomic_cmpset_acq_ptr(&(mp)->mtx_lock, MTX_UNOWNED, (tid)) +#define _mtx_obtain_lock_fetch(mp, vp, tid) ({ \ + *vp = MTX_UNOWNED; \ + atomic_fcmpset_acq_ptr(&(mp)->mtx_lock, vp, (tid)); \ +}) + /* Try to release mtx_lock if it is unrecursed and uncontested. */ #define _mtx_release_lock(mp, tid) \ atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), MTX_UNOWNED) +#define _mtx_release_lock_fetch(mp, vp, tid) ({ \ + *vp = tid; \ + atomic_fcmpset_rel_ptr(&(mp)->mtx_lock, vp, MTX_UNOWNED); \ +}) + /* Release mtx_lock quickly, assuming we own it. */ #define _mtx_release_lock_quick(mp) \ atomic_store_rel_ptr(&(mp)->mtx_lock, MTX_UNOWNED) @@ -188,9 +198,10 @@ void thread_lock_flags_(struct thread *, int, const char *, int); /* Lock a normal mutex. */ #define __mtx_lock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ + uintptr_t _v; \ \ - if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid)))\ - _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ + if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) \ + _mtx_lock_sleep((mp), _v, _tid, (opts), (file), (line));\ else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(adaptive__acquire, \ mp, 0, 0, file, line); \ @@ -205,13 +216,14 @@ void thread_lock_flags_(struct thread *, int, const char *, int); #ifdef SMP #define __mtx_lock_spin(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ + uintptr_t _v; \ \ spinlock_enter(); \ - if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ - if ((mp)->mtx_lock == _tid) \ + if (!_mtx_obtain_lock_fetch((mp), &_v, _tid)) { \ + if (_v == _tid) \ (mp)->mtx_recurse++; \ else \ - _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ + _mtx_lock_spin((mp), _v, _tid, (opts), (file), (line));\ } else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ mp, 0, 0, file, line); \ @@ -262,11 +274,12 @@ void thread_lock_flags_(struct thread *, int, const char *, int); /* Unlock a normal mutex. */ #define __mtx_unlock(mp, tid, opts, file, line) do { \ uintptr_t _tid = (uintptr_t)(tid); \ + uintptr_t _v; \ \ - if ((mp)->mtx_recurse == 0) \ + if (LOCKSTAT_PROFILE_ENABLED(adaptive__release) && (mp)->mtx_recurse == 0)\ LOCKSTAT_PROFILE_RELEASE_LOCK(adaptive__release, mp); \ - if ((mp)->mtx_lock != _tid || !_mtx_release_lock((mp), _tid)) \ - _mtx_unlock_sleep((mp), (opts), (file), (line)); \ + if (!_mtx_release_lock_fetch((mp), &_v, _tid)) \ + _mtx_unlock_sleep((mp), _v, (opts), (file), (line)); \ } while (0) /* diff --git a/sys/sys/sdt.h b/sys/sys/sdt.h index 25423d7..ed699bc 100644 --- a/sys/sys/sdt.h +++ b/sys/sys/sdt.h @@ -160,6 +160,9 @@ SET_DECLARE(sdt_argtypes_set, struct sdt_argtype); #define SDT_PROBE_DECLARE(prov, mod, func, name) \ extern struct sdt_probe sdt_##prov##_##mod##_##func##_##name[1] +#define SDT_PROBE_ENABLED(prov, mod, func, name) \ + (__predict_false(sdt_##prov##_##mod##_##func##_##name->id)) + #define SDT_PROBE(prov, mod, func, name, arg0, arg1, arg2, arg3, arg4) do { \ if (__predict_false(sdt_##prov##_##mod##_##func##_##name->id)) \ (*sdt_probe_func)(sdt_##prov##_##mod##_##func##_##name->id, \