--- //depot/projects/smpng/sys/kern/kern_thread.c 2004/07/16 21:12:15 +++ //depot/user/jhb/preemption/kern/kern_thread.c 2004/07/16 21:13:27 @@ -1066,9 +1066,11 @@ mtx_assert(&sched_lock, MA_OWNED); PROC_LOCK_ASSERT(p, MA_OWNED); if (!P_SHOULDSTOP(p)) { + critical_enter(); while ((td = TAILQ_FIRST(&p->p_suspended))) { thread_unsuspend_one(td); } + critical_exit(); } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && (p->p_numthreads == p->p_suspcount)) { /* --- //depot/projects/smpng/sys/kern/subr_sleepqueue.c 2004/07/02 19:16:11 +++ //depot/user/jhb/preemption/kern/subr_sleepqueue.c 2004/07/09 20:08:09 @@ -695,11 +695,13 @@ sleepq_release(wchan); /* Resume all the threads on the temporary list. */ + critical_enter(); while (!TAILQ_EMPTY(&list)) { td = TAILQ_FIRST(&list); TAILQ_REMOVE(&list, td, td_slpq); sleepq_resume_thread(td, pri); } + critical_exit(); } /* --- //depot/projects/smpng/sys/kern/subr_turnstile.c 2004/07/02 19:16:11 +++ //depot/user/jhb/preemption/kern/subr_turnstile.c 2004/07/09 20:08:09 @@ -727,6 +726,7 @@ * in turnstile_wait(). Set a flag to force it to try to acquire * the lock again instead of blocking. */ + critical_enter(); while (!TAILQ_EMPTY(&pending_threads)) { td = TAILQ_FIRST(&pending_threads); TAILQ_REMOVE(&pending_threads, td, td_lockq); @@ -742,6 +742,7 @@ MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); } } + critical_exit(); mtx_unlock_spin(&sched_lock); } --- //depot/projects/smpng/sys/vm/vm_glue.c 2004/07/02 04:13:06 +++ //depot/user/jhb/preemption/vm/vm_glue.c 2004/07/02 20:33:30 @@ -753,6 +753,7 @@ vm_thread_swapin(td); PROC_LOCK(p); + critical_enter(); mtx_lock_spin(&sched_lock); p->p_sflag &= ~PS_SWAPPINGIN; p->p_sflag |= PS_INMEM; @@ -767,6 +768,7 @@ /* Allow other threads to swap p out now. */ --p->p_lock; + critical_exit(); } #endif /* NO_SWAPPING */ }