Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 228324) +++ sys/kern/kern_mutex.c (working copy) @@ -192,6 +192,8 @@ void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock() of destroyed mutex @ %s:%d", file, line)); @@ -211,6 +213,9 @@ _mtx_lock_flags(struct mtx *m, int opts, const cha void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) { + + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); @@ -232,6 +237,8 @@ void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); @@ -254,6 +261,8 @@ void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); @@ -282,6 +291,9 @@ mtx_trylock_flags_(struct mtx *m, int opts, const #endif int rval; + if (SCHEDULER_STOPPED()) + return (1); + MPASS(curthread != NULL); KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_trylock() of destroyed mutex @ %s:%d", file, line)); @@ -338,6 +350,9 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int int64_t sleep_time = 0; #endif + if (SCHEDULER_STOPPED()) + return; + if (mtx_owned(m)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", @@ -508,6 +523,9 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int o uint64_t waittime = 0; #endif + if (SCHEDULER_STOPPED()) + return; + if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); @@ -555,6 +573,10 @@ thread_lock_flags_(struct thread *td, int opts, co i = 0; tid = (uintptr_t)curthread; + + if (SCHEDULER_STOPPED()) + return; + for (;;) { retry: spinlock_enter(); @@ -656,6 +678,9 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const c { struct turnstile *ts; + if (SCHEDULER_STOPPED()) + return; + if (mtx_recursed(m)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); Index: sys/kern/kern_shutdown.c =================================================================== --- sys/kern/kern_shutdown.c (revision 228324) +++ sys/kern/kern_shutdown.c (working copy) @@ -121,6 +121,11 @@ SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG &sync_on_panic, 0, "Do a sync before rebooting from a panic"); TUNABLE_INT("kern.sync_on_panic", &sync_on_panic); +static int stop_scheduler_on_panic = 0; +SYSCTL_INT(_kern, OID_AUTO, stop_scheduler_on_panic, CTLFLAG_RW | CTLFLAG_TUN, + &stop_scheduler_on_panic, 0, "stop scheduler upon entering panic"); +TUNABLE_INT("kern.stop_scheduler_on_panic", &stop_scheduler_on_panic); + static SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment"); @@ -138,6 +143,7 @@ SYSCTL_INT(_kern_shutdown, OID_AUTO, show_busybufs */ const char *panicstr; +int stop_scheduler; /* system stopped CPUs for panic */ int dumping; /* system is dumping */ int rebooting; /* system is rebooting */ static struct dumperinfo dumper; /* our selected dumper */ @@ -294,10 +300,12 @@ kern_reboot(int howto) * systems don't shutdown properly (i.e., ACPI power off) if we * run on another processor. */ - thread_lock(curthread); - sched_bind(curthread, 0); - thread_unlock(curthread); - KASSERT(PCPU_GET(cpuid) == 0, ("%s: not running on cpu 0", __func__)); + if (!SCHEDULER_STOPPED()) { + thread_lock(curthread); + sched_bind(curthread, 0); + thread_unlock(curthread); + KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0")); + } #endif /* We're in the process of rebooting. */ rebooting = 1; @@ -547,13 +555,18 @@ panic(const char *fmt, ...) { #ifdef SMP static volatile u_int panic_cpu = NOCPU; + cpuset_t other_cpus; #endif struct thread *td = curthread; int bootopt, newpanic; va_list ap; static char buf[256]; - critical_enter(); + if (stop_scheduler_on_panic) + spinlock_enter(); + else + critical_enter(); + #ifdef SMP /* * We don't want multiple CPU's to panic at the same time, so we @@ -566,6 +579,15 @@ panic(const char *fmt, ...) PCPU_GET(cpuid)) == 0) while (panic_cpu != NOCPU) ; /* nothing */ + + if (stop_scheduler_on_panic) { + if (panicstr == NULL && !kdb_active) { + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); + stop_cpus_hard(other_cpus); + stop_scheduler = 1; + } + } #endif bootopt = RB_AUTOBOOT; @@ -604,7 +626,8 @@ panic(const char *fmt, ...) /* thread_unlock(td); */ if (!sync_on_panic) bootopt |= RB_NOSYNC; - critical_exit(); + if (!stop_scheduler_on_panic) + critical_exit(); kern_reboot(bootopt); } Index: sys/kern/subr_kdb.c =================================================================== --- sys/kern/subr_kdb.c (revision 228324) +++ sys/kern/subr_kdb.c (working copy) @@ -226,13 +226,7 @@ kdb_sysctl_trap_code(SYSCTL_HANDLER_ARGS) void kdb_panic(const char *msg) { -#ifdef SMP - cpuset_t other_cpus; - other_cpus = all_cpus; - CPU_CLR(PCPU_GET(cpuid), &other_cpus); - stop_cpus_hard(other_cpus); -#endif printf("KDB: panic\n"); panic("%s", msg); } @@ -594,6 +588,9 @@ kdb_trap(int type, int code, struct trapframe *tf) struct kdb_dbbe *be; register_t intr; int handled; +#ifdef SMP + int did_stop_cpus; +#endif be = kdb_dbbe; if (be == NULL || be->dbbe_trap == NULL) @@ -606,9 +603,13 @@ kdb_trap(int type, int code, struct trapframe *tf) intr = intr_disable(); #ifdef SMP - other_cpus = all_cpus; - CPU_CLR(PCPU_GET(cpuid), &other_cpus); - stop_cpus_hard(other_cpus); + if (!SCHEDULER_STOPPED()) { + other_cpus = all_cpus; + CPU_CLR(PCPU_GET(cpuid), &other_cpus); + stop_cpus_hard(other_cpus); + did_stop_cpus = 1; + } else + did_stop_cpus = 0; #endif kdb_active++; @@ -634,7 +635,8 @@ kdb_trap(int type, int code, struct trapframe *tf) kdb_active--; #ifdef SMP - restart_cpus(stopped_cpus); + if (did_stop_cpus) + restart_cpus(stopped_cpus); #endif intr_restore(intr); Index: sys/kern/kern_synch.c =================================================================== --- sys/kern/kern_synch.c (revision 228324) +++ sys/kern/kern_synch.c (working copy) @@ -158,7 +158,7 @@ _sleep(void *ident, struct lock_object *lock, int else class = NULL; - if (cold) { + if (cold || SCHEDULER_STOPPED()) { /* * During autoconfiguration, just return; * don't run any other threads or panic below, @@ -260,7 +260,7 @@ msleep_spin(void *ident, struct mtx *mtx, const ch KASSERT(p != NULL, ("msleep1")); KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); - if (cold) { + if (cold || SCHEDULER_STOPPED()) { /* * During autoconfiguration, just return; * don't run any other threads or panic below, @@ -429,6 +429,8 @@ mi_switch(int flags, struct thread *newtd) */ if (kdb_active) kdb_switch(); + if (SCHEDULER_STOPPED()) + return; if (flags & SW_VOL) { td->td_ru.ru_nvcsw++; td->td_swvoltick = ticks; Index: sys/kern/subr_lock.c =================================================================== --- sys/kern/subr_lock.c (revision 228324) +++ sys/kern/subr_lock.c (working copy) @@ -532,6 +532,9 @@ lock_profile_obtain_lock_success(struct lock_objec struct lock_profile_object *l; int spin; + if (SCHEDULER_STOPPED()) + return; + /* don't reset the timer when/if recursing */ if (!lock_prof_enable || (lo->lo_flags & LO_NOPROFILE)) return; @@ -596,6 +599,8 @@ lock_profile_release_lock(struct lock_object *lo) struct lpohead *head; int spin; + if (SCHEDULER_STOPPED()) + return; if (lo->lo_flags & LO_NOPROFILE) return; spin = (LOCK_CLASS(lo)->lc_flags & LC_SPINLOCK) ? 1 : 0; Index: sys/kern/kern_rwlock.c =================================================================== --- sys/kern/kern_rwlock.c (revision 228324) +++ sys/kern/kern_rwlock.c (working copy) @@ -233,6 +233,8 @@ void _rw_wlock(struct rwlock *rw, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_wlock() of destroyed rwlock @ %s:%d", file, line)); @@ -249,6 +251,9 @@ _rw_try_wlock(struct rwlock *rw, const char *file, { int rval; + if (SCHEDULER_STOPPED()) + return (1); + KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); @@ -273,6 +278,8 @@ void _rw_wunlock(struct rwlock *rw, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_wunlock() of destroyed rwlock @ %s:%d", file, line)); @@ -317,6 +324,9 @@ _rw_rlock(struct rwlock *rw, const char *file, int int64_t sleep_time = 0; #endif + if (SCHEDULER_STOPPED()) + return; + KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_rlock() of destroyed rwlock @ %s:%d", file, line)); KASSERT(rw_wowner(rw) != curthread, @@ -499,6 +509,9 @@ _rw_try_rlock(struct rwlock *rw, const char *file, { uintptr_t x; + if (SCHEDULER_STOPPED()) + return (1); + for (;;) { x = rw->rw_lock; KASSERT(rw->rw_lock != RW_DESTROYED, @@ -525,6 +538,9 @@ _rw_runlock(struct rwlock *rw, const char *file, i struct turnstile *ts; uintptr_t x, v, queue; + if (SCHEDULER_STOPPED()) + return; + KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); _rw_assert(rw, RA_RLOCKED, file, line); @@ -650,6 +666,9 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, c int64_t sleep_time = 0; #endif + if (SCHEDULER_STOPPED()) + return; + if (rw_wlocked(rw)) { KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, ("%s: recursing but non-recursive rw %s @ %s:%d\n", @@ -814,6 +833,9 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, uintptr_t v; int queue; + if (SCHEDULER_STOPPED()) + return; + if (rw_wlocked(rw) && rw_recursed(rw)) { rw->rw_recurse--; if (LOCK_LOG_TEST(&rw->lock_object, 0)) @@ -876,6 +898,9 @@ _rw_try_upgrade(struct rwlock *rw, const char *fil struct turnstile *ts; int success; + if (SCHEDULER_STOPPED()) + return (1); + KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_upgrade() of destroyed rwlock @ %s:%d", file, line)); _rw_assert(rw, RA_RLOCKED, file, line); @@ -946,6 +971,9 @@ _rw_downgrade(struct rwlock *rw, const char *file, uintptr_t tid, v; int rwait, wwait; + if (SCHEDULER_STOPPED()) + return; + KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_downgrade() of destroyed rwlock @ %s:%d", file, line)); _rw_assert(rw, RA_WLOCKED | RA_NOTRECURSED, file, line); Index: sys/kern/kern_sx.c =================================================================== --- sys/kern/kern_sx.c (revision 228324) +++ sys/kern/kern_sx.c (working copy) @@ -241,6 +241,8 @@ _sx_slock(struct sx *sx, int opts, const char *fil { int error = 0; + if (SCHEDULER_STOPPED()) + return (0); MPASS(curthread != NULL); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_slock() of destroyed sx @ %s:%d", file, line)); @@ -260,6 +262,9 @@ sx_try_slock_(struct sx *sx, const char *file, int { uintptr_t x; + if (SCHEDULER_STOPPED()) + return (1); + for (;;) { x = sx->sx_lock; KASSERT(x != SX_LOCK_DESTROYED, @@ -283,6 +288,8 @@ _sx_xlock(struct sx *sx, int opts, const char *fil { int error = 0; + if (SCHEDULER_STOPPED()) + return (0); MPASS(curthread != NULL); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_xlock() of destroyed sx @ %s:%d", file, line)); @@ -304,6 +311,9 @@ sx_try_xlock_(struct sx *sx, const char *file, int { int rval; + if (SCHEDULER_STOPPED()) + return (1); + MPASS(curthread != NULL); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); @@ -330,6 +340,8 @@ void _sx_sunlock(struct sx *sx, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_sunlock() of destroyed sx @ %s:%d", file, line)); @@ -345,6 +357,8 @@ void _sx_xunlock(struct sx *sx, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; MPASS(curthread != NULL); KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_xunlock() of destroyed sx @ %s:%d", file, line)); @@ -369,6 +383,9 @@ sx_try_upgrade_(struct sx *sx, const char *file, i uintptr_t x; int success; + if (SCHEDULER_STOPPED()) + return (1); + KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_upgrade() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_SLOCKED, file, line); @@ -399,6 +416,9 @@ sx_downgrade_(struct sx *sx, const char *file, int uintptr_t x; int wakeup_swapper; + if (SCHEDULER_STOPPED()) + return; + KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_downgrade() of destroyed sx @ %s:%d", file, line)); _sx_assert(sx, SA_XLOCKED | SA_NOTRECURSED, file, line); @@ -481,6 +501,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int o int64_t sleep_time = 0; #endif + if (SCHEDULER_STOPPED()) + return (0); + /* If we already hold an exclusive lock, then recurse. */ if (sx_xlocked(sx)) { KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, @@ -681,6 +704,9 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, con uintptr_t x; int queue, wakeup_swapper; + if (SCHEDULER_STOPPED()) + return; + MPASS(!(sx->sx_lock & SX_LOCK_SHARED)); /* If the lock is recursed, then unrecurse one level. */ @@ -753,6 +779,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char int64_t sleep_time = 0; #endif + if (SCHEDULER_STOPPED()) + return (0); + /* * As with rwlocks, we don't make any attempt to try to block * shared locks once there is an exclusive waiter. @@ -919,6 +948,9 @@ _sx_sunlock_hard(struct sx *sx, const char *file, uintptr_t x; int wakeup_swapper; + if (SCHEDULER_STOPPED()) + return; + for (;;) { x = sx->sx_lock; Index: sys/kern/kern_lock.c =================================================================== --- sys/kern/kern_lock.c (revision 228324) +++ sys/kern/kern_lock.c (working copy) @@ -1232,6 +1232,9 @@ _lockmgr_disown(struct lock *lk, const char *file, { uintptr_t tid, x; + if (SCHEDULER_STOPPED()) + return; + tid = (uintptr_t)curthread; _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); Index: sys/kern/kern_rmlock.c =================================================================== --- sys/kern/kern_rmlock.c (revision 228324) +++ sys/kern/kern_rmlock.c (working copy) @@ -344,6 +344,9 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker struct thread *td = curthread; struct pcpu *pc; + if (SCHEDULER_STOPPED()) + return (1); + tracker->rmp_flags = 0; tracker->rmp_thread = td; tracker->rmp_rmlock = rm; @@ -413,6 +416,9 @@ _rm_runlock(struct rmlock *rm, struct rm_priotrack struct pcpu *pc; struct thread *td = tracker->rmp_thread; + if (SCHEDULER_STOPPED()) + return; + td->td_critnest++; /* critical_enter(); */ pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_remove(pc, tracker); @@ -432,6 +438,9 @@ _rm_wlock(struct rmlock *rm) struct turnstile *ts; cpuset_t readcpus; + if (SCHEDULER_STOPPED()) + return; + if (rm->lock_object.lo_flags & RM_SLEEPABLE) sx_xlock(&rm->rm_lock_sx); else @@ -486,6 +495,9 @@ _rm_wunlock(struct rmlock *rm) void _rm_wlock_debug(struct rmlock *rm, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; + WITNESS_CHECKORDER(&rm->lock_object, LOP_NEWORDER | LOP_EXCLUSIVE, file, line, NULL); @@ -507,6 +519,9 @@ void _rm_wunlock_debug(struct rmlock *rm, const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; + curthread->td_locks--; if (rm->lock_object.lo_flags & RM_SLEEPABLE) WITNESS_UNLOCK(&rm->rm_lock_sx.lock_object, LOP_EXCLUSIVE, @@ -521,6 +536,10 @@ int _rm_rlock_debug(struct rmlock *rm, struct rm_priotracker *tracker, int trylock, const char *file, int line) { + + if (SCHEDULER_STOPPED()) + return (1); + if (!trylock && (rm->lock_object.lo_flags & RM_SLEEPABLE)) WITNESS_CHECKORDER(&rm->rm_lock_sx.lock_object, LOP_NEWORDER, file, line, NULL); @@ -544,6 +563,9 @@ _rm_runlock_debug(struct rmlock *rm, struct rm_pri const char *file, int line) { + if (SCHEDULER_STOPPED()) + return; + curthread->td_locks--; WITNESS_UNLOCK(&rm->lock_object, 0, file, line); LOCK_LOG_LOCK("RMRUNLOCK", &rm->lock_object, 0, 0, file, line); Index: sys/kern/subr_witness.c =================================================================== --- sys/kern/subr_witness.c (revision 228324) +++ sys/kern/subr_witness.c (working copy) @@ -2162,6 +2162,13 @@ witness_save(struct lock_object *lock, const char struct lock_instance *instance; struct lock_class *class; + /* + * This function is used independently in locking code to deal with + * Giant, SCHEDULER_STOPPED() check can be removed here after Giant + * is gone. + */ + if (SCHEDULER_STOPPED()) + return; KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) return; @@ -2188,6 +2195,13 @@ witness_restore(struct lock_object *lock, const ch struct lock_instance *instance; struct lock_class *class; + /* + * This function is used independently in locking code to deal with + * Giant, SCHEDULER_STOPPED() check can be removed here after Giant + * is gone. + */ + if (SCHEDULER_STOPPED()) + return; KASSERT(witness_cold == 0, ("%s: witness_cold", __func__)); if (lock->lo_witness == NULL || witness_watch == -1 || panicstr != NULL) return; Index: sys/sys/lockstat.h =================================================================== --- sys/sys/lockstat.h (revision 228324) +++ sys/sys/lockstat.h (working copy) @@ -185,17 +185,24 @@ extern uint64_t lockstat_nsecs(void); #define LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(probe, lp, c, wt, f, l) do { \ uint32_t id; \ \ - lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, f, l); \ - if ((id = lockstat_probemap[(probe)])) \ - (*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \ + if (!SCHEDULER_STOPPED()) { \ + lock_profile_obtain_lock_success(&(lp)->lock_object, c, wt, \ + f, l); \ + if ((id = lockstat_probemap[(probe)])) \ + (*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, \ + 0, 0); \ + } \ } while (0) #define LOCKSTAT_PROFILE_RELEASE_LOCK(probe, lp) do { \ uint32_t id; \ \ - lock_profile_release_lock(&(lp)->lock_object); \ - if ((id = lockstat_probemap[(probe)])) \ - (*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, 0, 0); \ + if (!SCHEDULER_STOPPED()) { \ + lock_profile_release_lock(&(lp)->lock_object); \ + if ((id = lockstat_probemap[(probe)])) \ + (*lockstat_probe_func)(id, (uintptr_t)(lp), 0, 0, \ + 0, 0); \ + } \ } while (0) #else /* !KDTRACE_HOOKS */ Index: sys/sys/systm.h =================================================================== --- sys/sys/systm.h (revision 228324) +++ sys/sys/systm.h (working copy) @@ -47,6 +47,7 @@ extern int cold; /* nonzero if we are doing a cold boot */ extern int rebooting; /* kern_reboot() has been called. */ +extern int stop_scheduler; /* only one thread runs after panic */ extern const char *panicstr; /* panic message */ extern char version[]; /* system version */ extern char copyright[]; /* system copyright */ @@ -109,6 +110,14 @@ enum VM_GUEST { VM_GUEST_NO = 0, VM_GUEST_VM, VM_G ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) /* + * If we have already panic'd and this is the thread that called + * panic(), then don't block on any mutexes but silently succeed. + * Otherwise, the kernel will deadlock since the scheduler isn't + * going to run the thread that holds any lock we need. + */ +#define SCHEDULER_STOPPED() __predict_false(stop_scheduler) + +/* * XXX the hints declarations are even more misplaced than most declarations * in this file, since they are needed in one file (per arch) and only used * in two files.