--- //depot/projects/smpng/sys/kern/kern_mutex.c 2007/02/25 01:36:24 +++ //depot/user/jhb/lock/kern/kern_mutex.c 2007/02/25 01:55:09 @@ -81,6 +81,11 @@ #define MUTEX_WAKE_ALL #endif +#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) +#define ADAPTIVE_MUTEXES +//#define ADAPTIVE_MUTEXES2 +#endif + /* * Internal utility macros. */ @@ -264,7 +310,7 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, int line) { -#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) +#ifdef ADAPTIVE_MUTEXES volatile struct thread *owner; #endif #ifdef KTR @@ -299,6 +345,31 @@ m->mtx_object.lo_name, (void *)m->mtx_lock, file, line); while (!_obtain_lock(m, tid)) { +#ifdef ADAPTIVE_MUTEXES + /* + * If the current owner of the lock is executing on another + * CPU, spin instead of blocking. + */ + v = m->mtx_lock; +#ifdef MUTEX_WAKE_ALL + if (v != MTX_UNOWNED && v != MTX_CONTESTED) { +#else + if (v != MTX_UNOWNED) { +#endif + owner = (struct thread *)(v & ~MTX_FLAGMASK); +#ifdef ADAPTIVE_GIANT + if (TD_IS_RUNNING(owner)) { +#else + if (m != &Giant && TD_IS_RUNNING(owner)) { +#endif + while (mtx_owner(m) == owner && + TD_IS_RUNNING(owner)) + cpu_spinwait(); + continue; + } + } +#endif /* ADAPTIVE_MUTEXES */ + lock_profile_obtain_lock_failed(&m->mtx_object, &contested); turnstile_lock(&m->mtx_object); v = m->mtx_lock; @@ -309,7 +380,6 @@ */ if (v == MTX_UNOWNED) { turnstile_release(&m->mtx_object); - cpu_spinwait(); continue; } @@ -329,7 +399,25 @@ } #endif +#ifdef ADAPTIVE_MUTEXES2 /* + * The current lock owner might have started executing + * on another CPU (or the lock could have changed owners) + * while we were waiting on the turnstile chain lock. If so, + * drop the turnstile lock and try again. + */ + owner = (struct thread *)(v & ~MTX_FLAGMASK); +#ifdef ADAPTIVE_GIANT + if (TD_IS_RUNNING(owner)) { +#else + if (m != &Giant && TD_IS_RUNNING(owner)) { +#endif + turnstile_release(&m->mtx_object); + continue; + } +#endif + + /* * If the mutex isn't already contested and a failure occurs * setting the contested bit, the mutex was either released * or the state of the MTX_RECURSED bit changed. @@ -337,29 +425,8 @@ if ((v & MTX_CONTESTED) == 0 && !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { turnstile_release(&m->mtx_object); - cpu_spinwait(); - continue; - } - -#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) - /* - * If the current owner of the lock is executing on another - * CPU, spin instead of blocking. - */ - owner = (struct thread *)(v & ~MTX_FLAGMASK); -#ifdef ADAPTIVE_GIANT - if (TD_IS_RUNNING(owner)) -#else - if (m != &Giant && TD_IS_RUNNING(owner)) -#endif - { - turnstile_release(&m->mtx_object); - while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { - cpu_spinwait(); - } continue; } -#endif /* SMP && !NO_ADAPTIVE_MUTEXES */ /* * We definitely must sleep for this lock. @@ -488,22 +552,11 @@ ts = turnstile_lookup(&m->mtx_object); if (LOCK_LOG_TEST(&m->mtx_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); - -#if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) - if (ts == NULL) { - _release_lock_quick(m); - if (LOCK_LOG_TEST(&m->mtx_object, opts)) - CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); - turnstile_release(&m->mtx_object); - return; - } -#else MPASS(ts != NULL); -#endif #ifndef PREEMPTION /* XXX */ td1 = turnstile_head(ts, TS_EXCLUSIVE_QUEUE); #endif #ifdef MUTEX_WAKE_ALL turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); _release_lock_quick(m);