--- /usr/src/sys/conf/NOTES Sat Oct 20 23:01:33 2007 +++ conf/NOTES Sun Oct 21 00:18:03 2007 @@ -205,12 +205,6 @@ # to disable it. options NO_ADAPTIVE_RWLOCKS -# ADAPTIVE_GIANT causes the Giant lock to also be made adaptive when -# running without NO_ADAPTIVE_MUTEXES. Normally, because Giant is assumed -# to be held for extended periods, contention on Giant will cause a thread -# to sleep rather than spinning. -options ADAPTIVE_GIANT - # ADAPTIVE_SX changes the behavior of sx locks to spin if the thread # that currently owns the lock is executing on another CPU. Note that # in addition to enabling this option, individual sx locks must be --- /usr/src/sys/conf/options Sat Oct 20 23:02:05 2007 +++ conf/options Sun Oct 21 00:18:05 2007 @@ -58,7 +58,6 @@ NO_SYSCTL_DESCR opt_global.h # Miscellaneous options. -ADAPTIVE_GIANT opt_adaptive_mutexes.h ADAPTIVE_SX ALQ AUDIT opt_global.h --- /usr/src/sys/kern/kern_mutex.c Sat Oct 20 22:12:34 2007 +++ kern/kern_mutex.c Sun Oct 21 00:18:30 2007 @@ -325,7 +325,23 @@ m->lock_object.lo_name, (void *)m->mtx_lock, file, line); while (!_obtain_lock(m, tid)) { - ts = turnstile_trywait(&m->lock_object); +#ifdef ADAPTIVE_MUTEXES + /* + * If the current owner of the lock is executing on another + * CPU, spin instead of blocking. + */ + v = m->mtx_lock; + if (v != MTX_UNOWNED) { + owner = (struct thread *)(v & ~MTX_FLAGMASK); + if (TD_IS_RUNNING(owner)) { + while (mtx_owner(m) == owner && + TD_IS_RUNNING(owner)) + cpu_spinwait(); + continue; + } + } +#endif + turnstile_chain_lock(&m->lock_object); v = m->mtx_lock; /* @@ -333,44 +349,37 @@ * the turnstile chain lock. */ if (v == MTX_UNOWNED) { - turnstile_cancel(ts); + turnstile_chain_unlock(&m->lock_object); cpu_spinwait(); continue; } MPASS(v != MTX_CONTESTED); +#ifdef ADAPTIVE_MUTEXES /* - * If the mutex isn't already contested and a failure occurs - * setting the contested bit, the mutex was either released - * or the state of the MTX_RECURSED bit changed. + * If the current owner of the lock is executing on another + * CPU quit the hard path and try to spin. */ - if ((v & MTX_CONTESTED) == 0 && - !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { - turnstile_cancel(ts); + owner = (struct thread *)(v & ~MTX_FLAGMASK); + if (TD_IS_RUNNING(owner)) { + turnstile_chain_unlock(&m->lock_object); cpu_spinwait(); continue; } +#endif -#ifdef ADAPTIVE_MUTEXES /* - * If the current owner of the lock is executing on another - * CPU, spin instead of blocking. + * If the mutex isn't already contested and a failure occurs + * setting the contested bit, the mutex was either released + * or the state of the MTX_RECURSED bit changed. */ - owner = (struct thread *)(v & ~MTX_FLAGMASK); -#ifdef ADAPTIVE_GIANT - if (TD_IS_RUNNING(owner)) -#else - if (m != &Giant && TD_IS_RUNNING(owner)) -#endif - { - turnstile_cancel(ts); - while (mtx_owner(m) == owner && TD_IS_RUNNING(owner)) { - cpu_spinwait(); - } + if ((v & MTX_CONTESTED) == 0 && + !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { + turnstile_chain_unlock(&m->lock_object); + cpu_spinwait(); continue; } -#endif /* ADAPTIVE_MUTEXES */ /* * We definitely must sleep for this lock. @@ -391,6 +400,9 @@ /* * Block on the turnstile. */ + ts = turnstile_lookup(&m->lock_object); + if (ts == NULL) + ts = turnstile_retrieve(&m->lock_object); turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); } #ifdef KTR @@ -574,22 +586,12 @@ * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ - turnstile_chain_lock(&m->lock_object); - ts = turnstile_lookup(&m->lock_object); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); - -#ifdef ADAPTIVE_MUTEXES - if (ts == NULL) { - _release_lock_quick(m); - if (LOCK_LOG_TEST(&m->lock_object, opts)) - CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p no sleepers", m); - turnstile_chain_unlock(&m->lock_object); - return; - } -#else + turnstile_chain_lock(&m->lock_object); + ts = turnstile_lookup(&m->lock_object); MPASS(ts != NULL); -#endif + turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); _release_lock_quick(m); /* --- /usr/src/sys/kern/subr_turnstile.c Sat Oct 20 23:33:07 2007 +++ kern/subr_turnstile.c Sun Oct 21 00:18:36 2007 @@ -596,6 +596,20 @@ return (NULL); } +struct turnstile * +turnstile_retrieve(struct lock_object *lock) +{ + struct turnstile *ts; + + ts = curthread->td_turnstile; + MPASS(ts != NULL); + mtx_lock_spin(&ts->ts_lock); + KASSERT(ts->ts_lockobj == NULL, ("stale ts_lockobj pointer")); + ts->ts_lockobj = lock; + + return (ts); +} + /* * Unlock the turnstile chain associated with a given lock. */ --- /usr/src/sys/sys/turnstile.h Sat Oct 20 23:38:14 2007 +++ sys/turnstile.h Sun Oct 21 00:18:44 2007 @@ -100,6 +100,7 @@ void turnstile_free(struct turnstile *); struct thread *turnstile_head(struct turnstile *, int); struct turnstile *turnstile_lookup(struct lock_object *); +struct turnstile *turnstile_retrieve(struct lock_object *); int turnstile_signal(struct turnstile *, int); struct turnstile *turnstile_trywait(struct lock_object *); void turnstile_unpend(struct turnstile *, int);