Index: kern/kern_exit.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_exit.c,v retrieving revision 1.262 diff -u -r1.262 kern_exit.c --- kern/kern_exit.c 5 May 2005 06:42:02 -0000 1.262 +++ kern/kern_exit.c 19 May 2005 02:56:12 -0000 @@ -503,15 +503,7 @@ critical_enter(); mtx_unlock_spin(&sched_lock); wakeup(p->p_pptr); - /* - * XXX hack, swap in parent process, please see TDP_WAKEPROC0 - * code, because TDP_WAKEPROC0 is only useful if thread is - * leaving critical region, but here we never leave and - * thread_exit() will call cpu_throw(), TDP_WAKEPROC0 is never - * cleared. - */ - if (p->p_pptr->p_sflag & PS_SWAPINREQ) - wakeup(&proc0); + PROC_UNLOCK(p->p_pptr); mtx_lock_spin(&sched_lock); critical_exit(); Index: kern/kern_switch.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_switch.c,v retrieving revision 1.112 diff -u -r1.112 kern_switch.c --- kern/kern_switch.c 19 May 2005 01:08:30 -0000 1.112 +++ kern/kern_switch.c 19 May 2005 02:56:12 -0000 @@ -593,15 +593,9 @@ td = curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); +#ifdef PREEMPTION if (td->td_critnest == 1) { - if (td->td_pflags & TDP_WAKEPROC0) { - td->td_pflags &= ~TDP_WAKEPROC0; - wakeup(&proc0); - } - td->td_critnest = 0; - -#ifdef PREEMPTION mtx_assert(&sched_lock, MA_NOTOWNED); if (td->td_owepreempt) { td->td_critnest = 1; @@ -610,12 +604,11 @@ mi_switch(SW_INVOL, NULL); mtx_unlock_spin(&sched_lock); } - + } else #endif - - } else { td->td_critnest--; - } + + CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); } Index: kern/kern_synch.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_synch.c,v retrieving revision 1.269 diff -u -r1.269 kern_synch.c --- kern/kern_synch.c 8 Apr 2005 03:37:53 -0000 1.269 +++ kern/kern_synch.c 19 May 2005 02:56:12 -0000 @@ -410,11 +410,10 @@ p->p_sflag |= PS_SWAPINREQ; /* * due to a LOR between sched_lock and - * the sleepqueue chain locks, delay - * wakeup proc0 until thread leaves - * critical region. + * the sleepqueue chain locks, use + * lower level scheduling functions. */ - curthread->td_pflags |= TDP_WAKEPROC0; + kick_proc0(); } } else sched_wakeup(td); Index: sys/proc.h =================================================================== RCS file: /cvsroot/src/sys/sys/proc.h,v retrieving revision 1.428 diff -u -r1.428 proc.h --- sys/proc.h 23 Apr 2005 02:32:32 -0000 1.428 +++ sys/proc.h 19 May 2005 02:56:18 -0000 @@ -370,7 +370,7 @@ #define TDP_SA 0x00000080 /* A scheduler activation based thread. */ #define TDP_UNUSED8 0x00000100 /* --available -- */ #define TDP_OWEUPC 0x00000200 /* Call addupc() at next AST. */ -#define TDP_WAKEPROC0 0x00000400 /* Wants caller to wakeup(&proc0) */ +#define TDP_UNUSED10 0x00000400 /* --available -- */ #define TDP_CAN_UNBIND 0x00000800 /* Only temporarily bound. */ #define TDP_SCHED1 0x00001000 /* Reserved for scheduler private use */ #define TDP_SCHED2 0x00002000 /* Reserved for scheduler private use */ @@ -848,6 +848,7 @@ struct trapframe *); void fork_return(struct thread *, struct trapframe *); int inferior(struct proc *p); +void kick_proc0(void); int leavepgrp(struct proc *p); int maybe_preempt(struct thread *td); void mi_switch(int flags, struct thread *newtd); Index: vm/vm_glue.c =================================================================== RCS file: /cvsroot/src/sys/vm/vm_glue.c,v retrieving revision 1.210 diff -u -r1.210 vm_glue.c --- vm/vm_glue.c 22 Jan 2005 19:21:29 -0000 1.210 +++ vm/vm_glue.c 19 May 2005 02:56:18 -0000 @@ -113,6 +113,10 @@ static void swapout(struct proc *); #endif + +static volatile int proc0_rescan; + + /* * MPSAFE * @@ -578,7 +582,8 @@ * * XXXKSE - process with the thread with highest priority counts.. * - * Giant is still held at this point, to be released in tsleep. + * Giant is still held at this point. + * ups: Do we still need Giant here? */ /* ARGSUSED*/ static void @@ -597,6 +602,9 @@ loop: if (vm_page_count_min()) { VM_WAIT; + mtx_lock_spin(&sched_lock); + proc0_rescan = 0; + mtx_unlock_spin(&sched_lock); goto loop; } @@ -642,7 +650,15 @@ * Nothing to do, back to sleep. */ if ((p = pp) == NULL) { - tsleep(&proc0, PVM, "sched", maxslp * hz / 2); + mtx_unlock(&Giant); + mtx_lock_spin(&sched_lock); + if (!proc0_rescan) { + TD_SET_IWAIT(&thread0); + mi_switch(SW_VOL, NULL); + } + proc0_rescan = 0; + mtx_unlock_spin(&sched_lock); + mtx_lock(&Giant); goto loop; } PROC_LOCK(p); @@ -654,6 +670,9 @@ */ if (p->p_sflag & (PS_INMEM | PS_SWAPPINGOUT | PS_SWAPPINGIN)) { PROC_UNLOCK(p); + mtx_lock_spin(&sched_lock); + proc0_rescan = 0; + mtx_unlock_spin(&sched_lock); goto loop; } @@ -669,9 +688,28 @@ PROC_UNLOCK(p); mtx_lock_spin(&sched_lock); p->p_swtime = 0; + proc0_rescan = 0; mtx_unlock_spin(&sched_lock); goto loop; } + +void kick_proc0(void) +{ + struct thread *td = &thread0; + + + if (TD_AWAITING_INTR(td)) { + CTR2(KTR_INTR, "%s: setrunqueue %d", __func__, 0); + TD_CLR_IWAIT(td); + setrunqueue(td, SRQ_INTR); + } else { + proc0_rescan = 1; + CTR2(KTR_INTR, "%s: state %d", + __func__, td->td_state); + } + +} + #ifndef NO_SWAPPING