diff --git a/sys/kern/kern_exit.c b/sys/kern/kern_exit.c index 4ae24bcd7059..25b17ff07e66 100644 --- a/sys/kern/kern_exit.c +++ b/sys/kern/kern_exit.c @@ -819,6 +819,7 @@ proc_reap(struct thread *td, struct proc *p, int *status, int options) PROC_LOCK_ASSERT(p, MA_OWNED); KASSERT(p->p_state == PRS_ZOMBIE, ("proc_reap: !PRS_ZOMBIE")); + mtx_spin_wait_unlocked(&p->p_slock); q = td->td_proc; if (status) diff --git a/sys/kern/kern_mutex.c b/sys/kern/kern_mutex.c index c652eca064cc..6cf5a2b08b8c 100644 --- a/sys/kern/kern_mutex.c +++ b/sys/kern/kern_mutex.c @@ -1226,6 +1226,26 @@ _mtx_lock_indefinite_check(struct mtx *m, struct lock_delay_arg *ldap) cpu_spinwait(); } +void +mtx_spin_wait_unlocked(struct mtx *m) +{ + struct lock_delay_arg lda; + + lda.spin_cnt = 0; + while (atomic_load_acq_ptr(&m->mtx_lock) != MTX_UNOWNED) { + if (__predict_true(lda.spin_cnt < 10000000)) { + cpu_spinwait(); + lda.spin_cnt++; + } else { + _mtx_lock_indefinite_check(m, &lda); + } + } + + if (lda.spin_cnt != 0) { + printf("%s: caught locked, spun for %d loops\n", __func__, lda.spin_cnt); + } +} + #ifdef DDB void db_show_mtx(const struct lock_object *lock) diff --git a/sys/sys/mutex.h b/sys/sys/mutex.h index 805df8adc40a..f9ce4e3bdca7 100644 --- a/sys/sys/mutex.h +++ b/sys/sys/mutex.h @@ -125,6 +125,8 @@ int __mtx_trylock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); +void mtx_spin_wait_unlocked(struct mtx *m); + #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line);