Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 256019) +++ sys/kern/kern_mutex.c (working copy) @@ -146,6 +146,28 @@ struct lock_class lock_class_mtx_spin = { struct mtx blocked_lock; struct mtx Giant; +#define PTR24_LSB 5 /* lower bits all zero */ + +#ifdef ADAPTIVE_MUTEXES +static __inline void +mtx_adapt_spin(struct mtx *m, volatile struct thread *owner) +{ + u_long rnd; + uint32_t backoff, tback; + + + for (backoff = LOCK_BACKOFF_INIT; mtx_owner(m) == owner && + TD_IS_RUNNING(owner); backoff <<= LOCK_BACKOFF_SHIFT) { + if (backoff > LOCK_BACKOFF_CAP) + backoff = LOCK_BACKOFF_CAP; + rnd = (((long)curthread >> PTR24_LSB) ^ (long)ticks); + tback = (uint32_t)(rnd % (backoff)) + 1; + for (tback = 0; tback != backoff; tback++) + cpu_spinwait(); + } +} +#endif + void assert_mtx(const struct lock_object *lock, int what) { @@ -436,13 +458,10 @@ __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t CTR3(KTR_LOCK, "%s: spinning on %p held by %p", __func__, m, owner); - while (mtx_owner(m) == owner && - TD_IS_RUNNING(owner)) { - cpu_spinwait(); + mtx_adapt_spin(m, owner); #ifdef KDTRACE_HOOKS spin_cnt++; #endif - } continue; } } @@ -571,6 +590,7 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintp int contested = 0; uint64_t waittime = 0; #endif + uint32_t backoff, tback; if (SCHEDULER_STOPPED()) return; @@ -588,9 +608,14 @@ _mtx_lock_spin_cookie(volatile uintptr_t *c, uintp /* Give interrupts a chance while we spin. */ spinlock_exit(); + backoff = LOCK_BACKOFF_INIT; while (m->mtx_lock != MTX_UNOWNED) { if (i++ < 10000000) { - cpu_spinwait(); + if (backoff > LOCK_BACKOFF_CAP) + backoff = LOCK_BACKOFF_CAP; + for (tback = 0; tback != backoff; tback++) + cpu_spinwait(); + backoff <<= LOCK_BACKOFF_SHIFT; continue; } if (i < 60000000 || kdb_active || panicstr != NULL) @@ -624,6 +649,7 @@ thread_lock_flags_(struct thread *td, int opts, co #ifdef KDTRACE_HOOKS uint64_t spin_cnt = 0; #endif + uint32_t backoff, tback; i = 0; tid = (uintptr_t)curthread; @@ -661,9 +687,15 @@ retry: &contested, &waittime); /* Give interrupts a chance while we spin. */ spinlock_exit(); + backoff = LOCK_BACKOFF_INIT; while (m->mtx_lock != MTX_UNOWNED) { - if (i++ < 10000000) - cpu_spinwait(); + if (i++ < 10000000) { + if (backoff > LOCK_BACKOFF_CAP) + backoff = LOCK_BACKOFF_CAP; + for (tback = 0; tback != backoff; tback++) + cpu_spinwait(); + backoff <<= LOCK_BACKOFF_SHIFT; + } else if (i < 60000000 || kdb_active || panicstr != NULL) DELAY(1); Index: sys/sys/lock.h =================================================================== --- sys/sys/lock.h (revision 256019) +++ sys/sys/lock.h (working copy) @@ -189,6 +189,10 @@ struct lock_class { #define MPASS4(ex, what, file, line) \ KASSERT((ex), ("Assertion %s failed at %s:%d", what, file, line)) +#define LOCK_BACKOFF_CAP 0x400 +#define LOCK_BACKOFF_INIT 0x01 +#define LOCK_BACKOFF_SHIFT 0x01 + extern struct lock_class lock_class_mtx_sleep; extern struct lock_class lock_class_mtx_spin; extern struct lock_class lock_class_sx;