Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 203663) +++ sys/kern/kern_mutex.c (working copy) @@ -557,7 +557,6 @@ { struct mtx *lock; - spinlock_enter(); THREAD_LOCK_ASSERT(td, MA_OWNED); lock = td->td_lock; td->td_lock = &blocked_lock; @@ -572,7 +571,6 @@ mtx_assert(new, MA_OWNED); MPASS(td->td_lock == &blocked_lock); atomic_store_rel_ptr((volatile void *)&td->td_lock, (uintptr_t)new); - spinlock_exit(); } void Index: sys/kern/sched_ule.c =================================================================== --- sys/kern/sched_ule.c (revision 203663) +++ sys/kern/sched_ule.c (working copy) @@ -318,7 +318,6 @@ static void sched_balance_group(struct tdq_group *); static void sched_balance_pair(struct tdq *, struct tdq *); static inline struct tdq *sched_setcpu(struct td_sched *, int, int); -static inline struct mtx *thread_block_switch(struct thread *); static inline void thread_unblock_switch(struct thread *, struct mtx *); static struct mtx *sched_switch_migrate(struct tdq *, struct thread *, int); #endif @@ -989,9 +988,11 @@ * The hard case, migration, we need to block the thread first to * prevent order reversals with other cpus locks. */ + spinlock_enter(); thread_lock_block(td); TDQ_LOCK(tdq); thread_lock_unblock(td, TDQ_LOCKPTR(tdq)); + spinlock_exit(); return (tdq); } @@ -1789,23 +1790,6 @@ } /* - * Block a thread for switching. Similar to thread_block() but does not - * bump the spin count. - */ -static inline struct mtx * -thread_block_switch(struct thread *td) -{ - struct mtx *lock; - - THREAD_LOCK_ASSERT(td, MA_OWNED); - lock = td->td_lock; - td->td_lock = &blocked_lock; - mtx_unlock_spin(lock); - - return (lock); -} - -/* * Handle migration from sched_switch(). This happens only for * cpu binding. */ @@ -1822,7 +1806,7 @@ * not holding either run-queue lock. */ spinlock_enter(); - thread_block_switch(td); /* This releases the lock on tdq. */ + thread_lock_block(td); /* This releases the lock on tdq. */ /* * Acquire both run-queue locks before placing the thread on the new @@ -1848,7 +1832,8 @@ } /* - * Release a thread that was blocked with thread_block_switch(). + * Variadic version of thread_lock_unblock() that does not assume td_lock + * is blocked. */ static inline void thread_unblock_switch(struct thread *td, struct mtx *mtx) @@ -1907,7 +1892,7 @@ } else { /* This thread must be going to sleep. */ TDQ_LOCK(tdq); - mtx = thread_block_switch(td); + mtx = thread_lock_block(td); tdq_load_rem(tdq, ts); } /* Index: sys/kern/sched_4bsd.c =================================================================== --- sys/kern/sched_4bsd.c (revision 203663) +++ sys/kern/sched_4bsd.c (working copy) @@ -824,9 +824,11 @@ void sched_switch(struct thread *td, struct thread *newtd, int flags) { + struct mtx *tmtx; struct td_sched *ts; struct proc *p; + tmtx = NULL; ts = td->td_sched; p = td->td_proc; @@ -835,17 +837,20 @@ /* * Switch to the sched lock to fix things up and pick * a new thread. + * Block the td_lock in order to avoid breaking the critical path. */ if (td->td_lock != &sched_lock) { mtx_lock_spin(&sched_lock); - thread_unlock(td); + tmtx = thread_lock_block(td); } if ((p->p_flag & P_NOLOAD) == 0) sched_load_rem(); - if (newtd) + if (newtd) { + MPASS(newtd->td_lock == &sched_lock); newtd->td_flags |= (td->td_flags & TDF_NEEDRESCHED); + } td->td_lastcpu = td->td_oncpu; td->td_flags &= ~TDF_NEEDRESCHED; @@ -888,8 +893,8 @@ sched_load_add(); } else { newtd = choosethread(); + MPASS(newtd->td_lock == &sched_lock); } - MPASS(newtd->td_lock == &sched_lock); if (td != newtd) { #ifdef HWPMC_HOOKS @@ -907,7 +912,7 @@ (*dtrace_vtime_switch_func)(newtd); #endif /* I feel sleepy */ - cpu_switch(td, newtd, td->td_lock); + cpu_switch(td, newtd, tmtx != NULL ? tmtx : td->td_lock); /* * Where am I? What year is it? * We are in the same thread that went to sleep above,