diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index c0bef50..f5a325e 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -348,6 +348,15 @@ _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, return; } + /* + * If we have already panic'd and this is the thread that called + * panic(), then don't block on any mutexes but silently succeed. + * Otherwise, the kernel will deadlock since the scheduler isn't + * going to run the thread that holds the lock we need. + */ + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) @@ -506,7 +515,10 @@ _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, int contested = 0; uint64_t waittime = 0; #endif - +#if 0 + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; +#endif if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); @@ -551,7 +563,10 @@ _thread_lock_flags(struct thread *td, int opts, const char *file, int line) #ifdef KDTRACE_HOOKS uint64_t spin_cnt = 0; #endif - +#if 0 + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; +#endif i = 0; tid = (uintptr_t)curthread; for (;;) { @@ -664,6 +679,15 @@ _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) } /* + * If we failed to unlock this lock and we are a thread that has + * called panic(), it may be due to the bypass in _mtx_lock_sleep() + * above. In that case, just return and leave the lock alone to + * avoid changing the state. + */ + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + + /* * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ diff --git a/sys/kern/kern_rmlock.c b/sys/kern/kern_rmlock.c index 0ab5d74..00f1518 100644 --- a/sys/kern/kern_rmlock.c +++ b/sys/kern/kern_rmlock.c @@ -280,6 +280,9 @@ _rm_rlock_hard(struct rmlock *rm, struct rm_priotracker *tracker, int trylock) return (1); } + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return (1); + /* * We allow readers to aquire a lock even if a writer is blocked if * the lock is recursive and the reader already holds the lock. @@ -386,6 +389,9 @@ _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) if (!tracker->rmp_flags) return; + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + mtx_lock_spin(&rm_spinlock); LIST_REMOVE(tracker, rmp_qentry); @@ -437,6 +443,9 @@ _rm_wlock(struct rmlock *rm) else mtx_lock(&rm->rm_lock_mtx); + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + if (rm->rm_writecpus != all_cpus) { /* Get all read tokens back */ diff --git a/sys/kern/kern_rwlock.c b/sys/kern/kern_rwlock.c index 81b4c5f..73c54fe 100644 --- a/sys/kern/kern_rwlock.c +++ b/sys/kern/kern_rwlock.c @@ -323,6 +323,9 @@ _rw_rlock(struct rwlock *rw, const char *file, int line) rw->lock_object.lo_name, file, line)); WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line, NULL); + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + for (;;) { #ifdef KDTRACE_HOOKS spin_cnt++; @@ -532,6 +535,9 @@ _rw_runlock(struct rwlock *rw, const char *file, int line) WITNESS_UNLOCK(&rw->lock_object, 0, file, line); LOCK_LOG_LOCK("RUNLOCK", &rw->lock_object, 0, 0, file, line); + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + /* TODO: drop "owner of record" here. */ for (;;) { @@ -659,6 +665,9 @@ _rw_wlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) return; } + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, rw->lock_object.lo_name, (void *)rw->rw_lock, file, line); @@ -820,6 +829,9 @@ _rw_wunlock_hard(struct rwlock *rw, uintptr_t tid, const char *file, int line) return; } + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + KASSERT(rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS), ("%s: neither of the waiter flags are set", __func__)); diff --git a/sys/kern/kern_shutdown.c b/sys/kern/kern_shutdown.c index 018bd8e..239a426 100644 --- a/sys/kern/kern_shutdown.c +++ b/sys/kern/kern_shutdown.c @@ -117,6 +117,13 @@ SYSCTL_INT(_kern, OID_AUTO, sync_on_panic, CTLFLAG_RW | CTLFLAG_TUN, &sync_on_panic, 0, "Do a sync before rebooting from a panic"); TUNABLE_INT("kern.sync_on_panic", &sync_on_panic); +#ifdef SMP +static int stop_cpus_on_panic = 1; +SYSCTL_INT(_kern, OID_AUTO, stop_cpus_on_panic, CTLFLAG_RW | CTLFLAG_TUN, + &stop_cpus_on_panic, 0, "stop other CPUs when entering the debugger"); +TUNABLE_INT("kern.stop_cpus_on_panic", &stop_cpus_on_panic); +#endif + SYSCTL_NODE(_kern, OID_AUTO, shutdown, CTLFLAG_RW, 0, "Shutdown environment"); /* @@ -129,6 +136,11 @@ int dumping; /* system is dumping */ int rebooting; /* system is rebooting */ static struct dumperinfo dumper; /* our selected dumper */ +#ifdef SMP +static int panic_stopped_cpus; +static u_int panic_cpu = NOCPU; +#endif + /* Context information for dump-debuggers. */ static struct pcb dumppcb; /* Registers. */ static lwpid_t dumptid; /* Thread ID. */ @@ -280,10 +292,12 @@ boot(int howto) * systems don't shutdown properly (i.e., ACPI power off) if we * run on another processor. */ - thread_lock(curthread); - sched_bind(curthread, 0); - thread_unlock(curthread); - KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0")); + if (!panic_stopped_cpus) { + thread_lock(curthread); + sched_bind(curthread, 0); + thread_unlock(curthread); + KASSERT(PCPU_GET(cpuid) == 0, ("boot: not running on cpu 0")); + } #endif /* We're in the process of rebooting. */ rebooting = 1; @@ -297,7 +311,8 @@ boot(int howto) /* * Do any callouts that should be done BEFORE syncing the filesystems. */ - EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); + if (!panic_stopped_cpus) + EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); /* * Now sync filesystems @@ -413,7 +428,8 @@ boot(int howto) * Ok, now do things that assume all filesystem activity has * been completed. */ - EVENTHANDLER_INVOKE(shutdown_post_sync, howto); + if (!panic_stopped_cpus) + EVENTHANDLER_INVOKE(shutdown_post_sync, howto); if ((howto & (RB_HALT|RB_DUMP)) == RB_DUMP && !cold && !dumping) doadump(); @@ -503,7 +519,8 @@ shutdown_reset(void *junk, int howto) * For the !SMP case it just needs to handle the former problem. */ #ifdef SMP - mtx_lock_spin(&smp_ipi_mtx); + if (!panic_stopped_cpus) + mtx_lock_spin(&smp_ipi_mtx); #else spinlock_enter(); #endif @@ -513,10 +530,6 @@ shutdown_reset(void *junk, int howto) /* NOTREACHED */ /* assuming reset worked */ } -#ifdef SMP -static u_int panic_cpu = NOCPU; -#endif - /* * Panic is called on unresolvable fatal errors. It prints "panic: mesg", * and then reboots. If we are called twice, then we avoid trying to sync @@ -530,7 +543,7 @@ panic(const char *fmt, ...) va_list ap; static char buf[256]; - critical_enter(); + spinlock_enter(); #ifdef SMP /* * We don't want multiple CPU's to panic at the same time, so we @@ -543,6 +556,9 @@ panic(const char *fmt, ...) PCPU_GET(cpuid)) == 0) while (panic_cpu != NOCPU) ; /* nothing */ + + if ((panic_stopped_cpus = stop_cpus_on_panic) != 0) + stop_cpus_hard(PCPU_GET(other_cpus)); #endif bootopt = RB_AUTOBOOT | RB_DUMP; @@ -579,7 +595,11 @@ panic(const char *fmt, ...) if (panicstr == NULL) { #ifdef SMP atomic_store_rel_int(&panic_cpu, NOCPU); + if (panic_stopped_cpus) + restart_cpus(stopped_cpus); #endif + + spinlock_exit(); return; } #endif @@ -587,9 +607,10 @@ panic(const char *fmt, ...) /*thread_lock(td); */ td->td_flags |= TDF_INPANIC; /* thread_unlock(td); */ - if (!sync_on_panic) + + if (panic_stopped_cpus || !sync_on_panic) bootopt |= RB_NOSYNC; - critical_exit(); + boot(bootopt); } diff --git a/sys/kern/kern_sx.c b/sys/kern/kern_sx.c index 35ee91c..b97593b 100644 --- a/sys/kern/kern_sx.c +++ b/sys/kern/kern_sx.c @@ -494,6 +494,9 @@ _sx_xlock_hard(struct sx *sx, uintptr_t tid, int opts, const char *file, return (0); } + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return (0); + if (LOCK_LOG_TEST(&sx->lock_object, 0)) CTR5(KTR_LOCK, "%s: %s contested (lock=%p) at %s:%d", __func__, sx->lock_object.lo_name, (void *)sx->sx_lock, file, line); @@ -692,6 +695,10 @@ _sx_xunlock_hard(struct sx *sx, uintptr_t tid, const char *file, int line) CTR2(KTR_LOCK, "%s: %p unrecursing", __func__, sx); return; } + + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + MPASS(sx->sx_lock & (SX_LOCK_SHARED_WAITERS | SX_LOCK_EXCLUSIVE_WAITERS)); if (LOCK_LOG_TEST(&sx->lock_object, 0)) @@ -754,6 +761,9 @@ _sx_slock_hard(struct sx *sx, int opts, const char *file, int line) int64_t sleep_time = 0; #endif + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return (0); + /* * As with rwlocks, we don't make any attempt to try to block * shared locks once there is an exclusive waiter. @@ -920,6 +930,9 @@ _sx_sunlock_hard(struct sx *sx, const char *file, int line) uintptr_t x; int wakeup_swapper; + if (panicstr != NULL && curthread->td_flags & TDF_INPANIC) + return; + for (;;) { x = sx->sx_lock; diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 30a8bb3..93cba9c 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -158,7 +158,7 @@ _sleep(void *ident, struct lock_object *lock, int priority, else class = NULL; - if (cold) { + if (cold || (panicstr != NULL && curthread->td_flags & TDF_INPANIC)) { /* * During autoconfiguration, just return; * don't run any other threads or panic below, @@ -260,7 +260,7 @@ msleep_spin(void *ident, struct mtx *mtx, const char *wmesg, int timo) KASSERT(p != NULL, ("msleep1")); KASSERT(ident != NULL && TD_IS_RUNNING(td), ("msleep")); - if (cold) { + if (cold || (panicstr != NULL && curthread->td_flags & TDF_INPANIC)) { /* * During autoconfiguration, just return; * don't run any other threads or panic below,