Index: sys/kern/kern_condvar.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/kern/kern_condvar.c,v retrieving revision 1.4.2.1 diff -u -r1.4.2.1 kern_condvar.c --- sys/kern/kern_condvar.c 13 Dec 2007 10:42:05 -0000 1.4.2.1 +++ sys/kern/kern_condvar.c 20 Aug 2008 13:54:26 -0000 @@ -395,13 +395,17 @@ void cv_signal(struct cv *cvp) { + int wakeup_swapper; + wakeup_swapper = 0; sleepq_lock(cvp); if (cvp->cv_waiters > 0) { cvp->cv_waiters--; - sleepq_signal(cvp, SLEEPQ_CONDVAR, -1, 0); + wakeup_swapper = sleepq_signal(cvp, SLEEPQ_CONDVAR, -1, 0); } sleepq_release(cvp); + if (wakeup_swapper) + kick_proc0(); } /* Index: sys/kern/kern_kse.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/kern/kern_kse.c,v retrieving revision 1.2.4.1.4.1 diff -u -r1.2.4.1.4.1 kern_kse.c --- sys/kern/kern_kse.c 25 Jul 2008 07:18:00 -0000 1.2.4.1.4.1 +++ sys/kern/kern_kse.c 20 Aug 2008 13:54:26 -0000 @@ -211,7 +211,7 @@ struct kse_upcall *ku; struct kse_thr_mailbox *tmbx; uint32_t flags; - int error; + int error, wakeup_swapper; p = td->td_proc; @@ -239,6 +239,7 @@ PROC_UNLOCK(p); return (ESRCH); } + wakeup_swapper = 0; thread_lock(td2); PROC_SUNLOCK(p); if (uap->cmd == KSE_INTR_SENDSIG) { @@ -258,8 +259,11 @@ else td2->td_intrval = ERESTART; if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) - sleepq_abort(td2, td2->td_intrval); + wakeup_swapper = + sleepq_abort(td2, td2->td_intrval); thread_unlock(td2); + if (wakeup_swapper) + kick_proc0(); } PROC_UNLOCK(p); break; Index: sys/kern/kern_sig.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/kern/kern_sig.c,v retrieving revision 1.2.4.1.4.1 diff -u -r1.2.4.1.4.1 kern_sig.c --- sys/kern/kern_sig.c 25 Jul 2008 07:18:01 -0000 1.2.4.1.4.1 +++ sys/kern/kern_sig.c 20 Aug 2008 13:54:26 -0000 @@ -85,7 +85,7 @@ static int killpg1(struct thread *td, int sig, int pgid, int all); static int issignal(struct thread *p); static int sigprop(int sig); -static void tdsigwakeup(struct thread *, int, sig_t, int); +static int tdsigwakeup(struct thread *, int, sig_t, int); static void sig_suspend_threads(struct thread *, struct proc *, int); static int filt_sigattach(struct knote *kn); static void filt_sigdetach(struct knote *kn); @@ -2044,7 +2044,7 @@ sigqueue_t *sigqueue; int prop; struct sigacts *ps; - int intrval; + int intrval, wakeup_swapper; int ret = 0; PROC_LOCK_ASSERT(p, MA_OWNED); @@ -2273,11 +2273,14 @@ * the PROCESS runnable, leave it stopped. * It may run a bit until it hits a thread_suspend_check(). */ + wakeup_swapper = 0; thread_lock(td); if (TD_ON_SLEEPQ(td) && (td->td_flags & TDF_SINTR)) - sleepq_abort(td, intrval); + wakeup_swapper = sleepq_abort(td, intrval); thread_unlock(td); PROC_SUNLOCK(p); + if (wakeup_swapper) + kick_proc0(); goto out; /* * Mutexes are short lived. Threads waiting on them will @@ -2286,9 +2289,11 @@ } else if (p->p_state == PRS_NORMAL) { if (p->p_flag & P_TRACED || action == SIG_CATCH) { thread_lock(td); - tdsigwakeup(td, sig, action, intrval); + wakeup_swapper = tdsigwakeup(td, sig, action, intrval); thread_unlock(td); PROC_SUNLOCK(p); + if (wakeup_swapper) + kick_proc0(); goto out; } @@ -2334,10 +2339,12 @@ runfast: thread_lock(td); - tdsigwakeup(td, sig, action, intrval); + wakeup_swapper = tdsigwakeup(td, sig, action, intrval); thread_unlock(td); thread_unsuspend(p); PROC_SUNLOCK(p); + if (wakeup_swapper) + kick_proc0(); out: /* If we jump here, proc slock should not be owned. */ PROC_SLOCK_ASSERT(p, MA_NOTOWNED); @@ -2349,16 +2356,17 @@ * thread. We need to see what we can do about knocking it * out of any sleep it may be in etc. */ -static void +static int tdsigwakeup(struct thread *td, int sig, sig_t action, int intrval) { struct proc *p = td->td_proc; - register int prop; + int prop, wakeup_swapper; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); THREAD_LOCK_ASSERT(td, MA_OWNED); prop = sigprop(sig); + wakeup_swapper = 0; /* * Bring the priority of a thread up if we want it to get @@ -2375,7 +2383,7 @@ * trap() or syscall(). */ if ((td->td_flags & TDF_SINTR) == 0) - return; + return (0); /* * If SIGCONT is default (or ignored) and process is * asleep, we are finished; the process should not @@ -2392,7 +2400,7 @@ sigqueue_delete(&td->td_sigqueue, sig); PROC_SLOCK(p); thread_lock(td); - return; + return (0); } /* @@ -2401,7 +2409,7 @@ if (td->td_priority > PUSER) sched_prio(td, PUSER); - sleepq_abort(td, intrval); + wakeup_swapper = sleepq_abort(td, intrval); } else { /* * Other states do nothing with the signal immediately, @@ -2413,6 +2421,7 @@ forward_signal(td); #endif } + return (wakeup_swapper); } static void Index: sys/kern/kern_synch.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/kern/kern_synch.c,v retrieving revision 1.6.2.2.2.1.2.1 diff -u -r1.6.2.2.2.1.2.1 kern_synch.c --- sys/kern/kern_synch.c 24 Apr 2008 04:01:15 -0000 1.6.2.2.2.1.2.1 +++ sys/kern/kern_synch.c 20 Aug 2008 13:54:27 -0000 @@ -371,10 +371,13 @@ wakeup_one(ident) register void *ident; { + int wakeup_swapper; sleepq_lock(ident); - sleepq_signal(ident, SLEEPQ_SLEEP, -1, 0); + wakeup_swapper = sleepq_signal(ident, SLEEPQ_SLEEP, -1, 0); sleepq_release(ident); + if (wakeup_swapper) + kick_proc0(); } static void @@ -480,7 +483,7 @@ * placing it on the run queue if it is in memory, * and awakening the swapper if it isn't in memory. */ -void +int setrunnable(struct thread *td) { @@ -490,15 +493,15 @@ switch (td->td_state) { case TDS_RUNNING: case TDS_RUNQ: - return; + return (0); case TDS_INHIBITED: /* * If we are only inhibited because we are swapped out * then arange to swap in this process. Otherwise just return. */ if (td->td_inhibitors != TDI_SWAPPED) - return; - /* XXX: intentional fall-through ? */ + return (0); + /* FALLTHROUGH. */ case TDS_CAN_RUN: break; default: @@ -508,15 +511,11 @@ if ((td->td_flags & TDF_INMEM) == 0) { if ((td->td_flags & TDF_SWAPINREQ) == 0) { td->td_flags |= TDF_SWAPINREQ; - /* - * due to a LOR between the thread lock and - * the sleepqueue chain locks, use - * lower level scheduling functions. - */ - kick_proc0(); + return (1); } } else sched_wakeup(td); + return (0); } /* Index: sys/kern/kern_thread.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/kern/kern_thread.c,v retrieving revision 1.2.4.3.2.1.2.4 diff -u -r1.2.4.3.2.1.2.4 kern_thread.c --- sys/kern/kern_thread.c 25 Jul 2008 07:18:01 -0000 1.2.4.3.2.1.2.4 +++ sys/kern/kern_thread.c 20 Aug 2008 13:54:27 -0000 @@ -384,6 +384,7 @@ struct thread *td; struct thread *td2; struct proc *p; + int wakeup_swapper; td = curthread; p = td->td_proc; @@ -456,8 +457,11 @@ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount) { thread_lock(p->p_singlethread); + wakeup_swapper = thread_unsuspend_one(p->p_singlethread); thread_unlock(p->p_singlethread); + if (wakeup_swapper) + kick_proc0(); } } @@ -627,7 +631,7 @@ struct thread *td; struct thread *td2; struct proc *p; - int remaining; + int remaining, wakeup_swapper; td = curthread; p = td->td_proc; @@ -667,6 +671,7 @@ FOREACH_THREAD_IN_PROC(p, td2) { if (td2 == td) continue; + wakeup_swapper = 0; thread_lock(td2); td2->td_flags |= TDF_ASTPENDING; if (TD_IS_INHIBITED(td2)) { @@ -675,18 +680,22 @@ if (td->td_flags & TDF_DBSUSPEND) td->td_flags &= ~TDF_DBSUSPEND; if (TD_IS_SUSPENDED(td2)) - thread_unsuspend_one(td2); + wakeup_swapper = + thread_unsuspend_one(td2); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) - sleepq_abort(td2, EINTR); + wakeup_swapper = + sleepq_abort(td2, EINTR); break; case SINGLE_BOUNDARY: if (TD_IS_SUSPENDED(td2) && !(td2->td_flags & TDF_BOUNDARY)) - thread_unsuspend_one(td2); + wakeup_swapper = + thread_unsuspend_one(td2); if (TD_ON_SLEEPQ(td2) && (td2->td_flags & TDF_SINTR)) - sleepq_abort(td2, ERESTART); + wakeup_swapper = + sleepq_abort(td2, ERESTART); break; default: if (TD_IS_SUSPENDED(td2)) { @@ -709,6 +718,8 @@ } #endif thread_unlock(td2); + if (wakeup_swapper) + kick_proc0(); } if (mode == SINGLE_EXIT) remaining = p->p_numthreads; @@ -789,6 +800,7 @@ { struct thread *td; struct proc *p; + int wakeup_swapper; td = curthread; p = td->td_proc; @@ -832,8 +844,11 @@ if (P_SHOULDSTOP(p) == P_STOPPED_SINGLE) { if (p->p_numthreads == p->p_suspcount + 1) { thread_lock(p->p_singlethread); - thread_unsuspend_one(p->p_singlethread); + wakeup_swapper = + thread_unsuspend_one(p->p_singlethread); thread_unlock(p->p_singlethread); + if (wakeup_swapper) + kick_proc0(); } } PROC_UNLOCK(p); @@ -900,7 +915,7 @@ TD_SET_SUSPENDED(td); } -void +int thread_unsuspend_one(struct thread *td) { struct proc *p = td->td_proc; @@ -910,7 +925,7 @@ KASSERT(TD_IS_SUSPENDED(td), ("Thread not suspended")); TD_CLR_SUSPENDED(td); p->p_suspcount--; - setrunnable(td); + return (setrunnable(td)); } /* @@ -920,16 +935,19 @@ thread_unsuspend(struct proc *p) { struct thread *td; + int wakeup_swapper; + wakeup_swapper = 0; PROC_LOCK_ASSERT(p, MA_OWNED); PROC_SLOCK_ASSERT(p, MA_OWNED); if (!P_SHOULDSTOP(p)) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); - if (TD_IS_SUSPENDED(td)) { - thread_unsuspend_one(td); - } + if (TD_IS_SUSPENDED(td)) + wakeup_swapper = thread_unsuspend_one(td); thread_unlock(td); + if (wakeup_swapper) + kick_proc0(); } } else if ((P_SHOULDSTOP(p) == P_STOPPED_SINGLE) && (p->p_numthreads == p->p_suspcount)) { @@ -939,8 +957,10 @@ * let it continue. */ thread_lock(p->p_singlethread); - thread_unsuspend_one(p->p_singlethread); + wakeup_swapper = thread_unsuspend_one(p->p_singlethread); thread_unlock(p->p_singlethread); + if (wakeup_swapper) + kick_proc0(); } } @@ -952,9 +972,11 @@ { struct thread *td; struct proc *p; + int wakeup_swapper; td = curthread; p = td->td_proc; + wakeup_swapper = 0; PROC_LOCK_ASSERT(p, MA_OWNED); p->p_flag &= ~(P_STOPPED_SINGLE | P_SINGLE_EXIT | P_SINGLE_BOUNDARY); PROC_SLOCK(p); @@ -968,10 +990,11 @@ if ((p->p_numthreads != 1) && (!P_SHOULDSTOP(p))) { FOREACH_THREAD_IN_PROC(p, td) { thread_lock(td); - if (TD_IS_SUSPENDED(td)) { - thread_unsuspend_one(td); - } + if (TD_IS_SUSPENDED(td)) + wakeup_swapper = thread_unsuspend_one(td); thread_unlock(td); + if (wakeup_swapper) + kick_proc0(); } } PROC_SUNLOCK(p); Index: sys/kern/subr_sleepqueue.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/kern/subr_sleepqueue.c,v retrieving revision 1.6.2.3.2.1.2.1 diff -u -r1.6.2.3.2.1.2.1 subr_sleepqueue.c --- sys/kern/subr_sleepqueue.c 24 Apr 2008 04:01:17 -0000 1.6.2.3.2.1.2.1 +++ sys/kern/subr_sleepqueue.c 20 Aug 2008 13:54:27 -0000 @@ -157,7 +157,7 @@ static void sleepq_dtor(void *mem, int size, void *arg); #endif static int sleepq_init(void *mem, int size, int flags); -static void sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, +static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri); static void sleepq_switch(void *wchan); static void sleepq_timeout(void *arg); @@ -439,7 +439,15 @@ */ if (TD_ON_SLEEPQ(td)) { sq = sleepq_lookup(wchan); - sleepq_resume_thread(sq, td, -1); + if (sleepq_resume_thread(sq, td, -1)) { +#ifdef INVARIANTS + /* + * This thread has not gone to sleep yet, so it + * should not be swapped out. + */ + panic("not waking up swapper"); +#endif + } } mtx_unlock_spin(&sc->sc_lock); MPASS(td->td_lock != &sc->sc_lock); @@ -479,7 +487,15 @@ if (td->td_flags & TDF_TIMEOUT) { MPASS(TD_ON_SLEEPQ(td)); sq = sleepq_lookup(wchan); - sleepq_resume_thread(sq, td, -1); + if (sleepq_resume_thread(sq, td, -1)) { +#ifdef INVARIANTS + /* + * This thread has not gone to sleep yet, so it + * should not be swapped out. + */ + panic("not waking up swapper"); +#endif + } mtx_unlock_spin(&sc->sc_lock); return; } @@ -652,7 +668,7 @@ * Removes a thread from a sleep queue and makes it * runnable. */ -static void +static int sleepq_resume_thread(struct sleepqueue *sq, struct thread *td, int pri) { struct sleepqueue_chain *sc; @@ -704,7 +720,7 @@ MPASS(pri == -1 || (pri >= PRI_MIN && pri <= PRI_MAX)); if (pri != -1 && td->td_priority > pri) sched_prio(td, pri); - setrunnable(td); + return (setrunnable(td)); } #ifdef INVARIANTS @@ -743,18 +759,19 @@ /* * Find the highest priority thread sleeping on a wait channel and resume it. */ -void +int sleepq_signal(void *wchan, int flags, int pri, int queue) { struct sleepqueue *sq; struct thread *td, *besttd; + int wakeup_swapper; CTR2(KTR_PROC, "sleepq_signal(%p, %d)", wchan, flags); KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); MPASS((queue >= 0) && (queue < NR_SLEEPQS)); sq = sleepq_lookup(wchan); if (sq == NULL) - return; + return (0); KASSERT(sq->sq_type == (flags & SLEEPQ_TYPE), ("%s: mismatch between sleep/wakeup and cv_*", __func__)); @@ -771,8 +788,9 @@ } MPASS(besttd != NULL); thread_lock(besttd); - sleepq_resume_thread(sq, besttd, pri); + wakeup_swapper = sleepq_resume_thread(sq, besttd, pri); thread_unlock(besttd); + return (wakeup_swapper); } /* @@ -783,6 +801,7 @@ { struct sleepqueue *sq; struct thread *td; + int wakeup_swapper; CTR2(KTR_PROC, "sleepq_broadcast(%p, %d)", wchan, flags); KASSERT(wchan != NULL, ("%s: invalid NULL wait channel", __func__)); @@ -796,13 +815,17 @@ ("%s: mismatch between sleep/wakeup and cv_*", __func__)); /* Resume all blocked threads on the sleep queue. */ + wakeup_swapper = 0; while (!TAILQ_EMPTY(&sq->sq_blocked[queue])) { td = TAILQ_FIRST(&sq->sq_blocked[queue]); thread_lock(td); - sleepq_resume_thread(sq, td, pri); + if (sleepq_resume_thread(sq, td, pri)) + wakeup_swapper = 1; thread_unlock(td); } sleepq_release(wchan); + if (wakeup_swapper) + kick_proc0(); } /* @@ -816,8 +839,10 @@ struct sleepqueue *sq; struct thread *td; void *wchan; + int wakeup_swapper; td = arg; + wakeup_swapper = 0; CTR3(KTR_PROC, "sleepq_timeout: thread %p (pid %ld, %s)", (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); @@ -833,8 +858,10 @@ sq = sleepq_lookup(wchan); MPASS(sq != NULL); td->td_flags |= TDF_TIMEOUT; - sleepq_resume_thread(sq, td, -1); + wakeup_swapper = sleepq_resume_thread(sq, td, -1); thread_unlock(td); + if (wakeup_swapper) + kick_proc0(); return; } @@ -863,10 +890,12 @@ MPASS(TD_IS_SLEEPING(td)); td->td_flags &= ~TDF_TIMEOUT; TD_CLR_SLEEPING(td); - setrunnable(td); + wakeup_swapper = setrunnable(td); } else td->td_flags |= TDF_TIMOFAIL; thread_unlock(td); + if (wakeup_swapper) + kick_proc0(); } /* @@ -877,6 +906,7 @@ sleepq_remove(struct thread *td, void *wchan) { struct sleepqueue *sq; + int wakeup_swapper; /* * Look up the sleep queue for this wait channel, then re-check @@ -900,16 +930,18 @@ thread_lock(td); MPASS(sq != NULL); MPASS(td->td_wchan == wchan); - sleepq_resume_thread(sq, td, -1); + wakeup_swapper = sleepq_resume_thread(sq, td, -1); thread_unlock(td); sleepq_release(wchan); + if (wakeup_swapper) + kick_proc0(); } /* * Abort a thread as if an interrupt had occurred. Only abort * interruptible waits (unfortunately it isn't safe to abort others). */ -void +int sleepq_abort(struct thread *td, int intrval) { struct sleepqueue *sq; @@ -926,7 +958,7 @@ * timeout is scheduled anyhow. */ if (td->td_flags & TDF_TIMEOUT) - return; + return (0); CTR3(KTR_PROC, "sleepq_abort: thread %p (pid %ld, %s)", (void *)td, (long)td->td_proc->p_pid, (void *)td->td_proc->p_comm); @@ -938,14 +970,14 @@ * we have to do it here. */ if (!TD_IS_SLEEPING(td)) - return; + return (0); wchan = td->td_wchan; MPASS(wchan != NULL); sq = sleepq_lookup(wchan); MPASS(sq != NULL); /* Thread is asleep on sleep queue sq, so wake it up. */ - sleepq_resume_thread(sq, td, -1); + return (sleepq_resume_thread(sq, td, -1)); } #ifdef DDB Index: sys/netinet/cluster/alchemyos_interface.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/netinet/cluster/alchemyos_interface.c,v retrieving revision 1.12.2.6 diff -u -r1.12.2.6 alchemyos_interface.c --- sys/netinet/cluster/alchemyos_interface.c 4 Jan 2008 18:46:19 -0000 1.12.2.6 +++ sys/netinet/cluster/alchemyos_interface.c 20 Aug 2008 13:54:32 -0000 @@ -2264,6 +2264,7 @@ struct thread *td; struct App_Asynchronous_Call *async_call; struct proc *p = NULL; + int wakeup_swapper; if (!(Call->Flags & ASYNCHRONOUS_CALL_QUEUED)) { CLUSTER_RDLOCK(&IP_Cluster_Async_Lock); @@ -2296,11 +2297,15 @@ p->p_flag |= P_ASYNC_CALL; thread_lock(td); if (TD_ON_SLEEPQ(td)) { + wakeup_swapper = 0; if (td->td_wmesg != NULL && (strcmp(td->td_wmesg, "select") == 0)) - sleepq_abort(td, EWOULDBLOCK); + wakeup_swapper = + sleepq_abort(td, EWOULDBLOCK); PROC_UNLOCK(p); thread_unlock(td); + if (wakeup_swapper) + kick_proc0(); return; } thread_unlock(td); Index: sys/netinet/cluster/ipso-compatibility.h =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/netinet/cluster/ipso-compatibility.h,v retrieving revision 1.2.4.3 diff -u -r1.2.4.3 ipso-compatibility.h --- sys/netinet/cluster/ipso-compatibility.h 29 Oct 2007 09:24:32 -0000 1.2.4.3 +++ sys/netinet/cluster/ipso-compatibility.h 20 Aug 2008 13:54:32 -0000 @@ -249,6 +249,7 @@ if (!((XXXCall)->Flags & ASYNCHRONOUS_CALL_QUEUED)) { \ struct thread *td; \ struct proc *XXXp = pfind((XXXCall)->Pid); \ + int wakeup_swapper = 0; \ struct App_Asynchronous_Call *async_call = get_match_async_call((XXXCall)->Pid);\ if (XXXp) { \ mtx_lock_spin(&sched_lock); \ @@ -270,7 +271,7 @@ (XXXp)->p_flag |= P_ASYNC_CALL; \ if ((TD_ON_SLEEPQ(td)) && (td->td_wmesg) && \ (strcmp(td->td_wmesg, "select") == 0)) { \ - setrunnable(td); \ + wakeup_swapper = setrunnable(td); \ } \ } \ } else { \ @@ -278,6 +279,8 @@ free(XXXCall, M_IP_CLUSTER); \ } \ mtx_unlock_spin(&sched_lock); \ + if (wakeup_swapper) \ + kick_proc0(); \ } else { \ PROC_UNLOCK(XXXp); \ if ((XXXCall)->Flags & ASYNCHRONOUS_CALL_KERNEL_MALLOC) \ Index: sys/sys/proc.h =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/sys/proc.h,v retrieving revision 1.4.4.3.2.1.2.4 diff -u -r1.4.4.3.2.1.2.4 proc.h --- sys/sys/proc.h 25 Jul 2008 07:18:02 -0000 1.4.4.3.2.1.2.4 +++ sys/sys/proc.h 20 Aug 2008 13:54:32 -0000 @@ -882,7 +882,7 @@ int securelevel_ge(struct ucred *cr, int level); int securelevel_gt(struct ucred *cr, int level); void sessrele(struct session *); -void setrunnable(struct thread *); +int setrunnable(struct thread *); void setsugid(struct proc *p); int sigonstack(size_t sp); void sleepinit(void); @@ -939,7 +939,7 @@ struct thread *newtd); void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); -void thread_unsuspend_one(struct thread *td); +int thread_unsuspend_one(struct thread *td); void thread_unthread(struct thread *td); int thread_userret(struct thread *td, struct trapframe *frame); void thread_user_enter(struct thread *td); Index: sys/sys/sleepqueue.h =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/sys/sleepqueue.h,v retrieving revision 1.3.4.1 diff -u -r1.3.4.1 sleepqueue.h --- sys/sys/sleepqueue.h 13 Dec 2007 10:42:24 -0000 1.3.4.1 +++ sys/sys/sleepqueue.h 20 Aug 2008 13:54:32 -0000 @@ -90,7 +90,7 @@ #define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */ void init_sleepqueues(void); -void sleepq_abort(struct thread *td, int intrval); +int sleepq_abort(struct thread *td, int intrval); void sleepq_add(void *wchan, struct lock_object *lock, const char *wmesg, int flags, int queue); struct sleepqueue *sleepq_alloc(void); @@ -100,7 +100,7 @@ struct sleepqueue *sleepq_lookup(void *wchan); void sleepq_release(void *wchan); void sleepq_remove(struct thread *td, void *wchan); -void sleepq_signal(void *wchan, int flags, int pri, int queue); +int sleepq_signal(void *wchan, int flags, int pri, int queue); void sleepq_set_timeout(void *wchan, int timo); int sleepq_timedwait(void *wchan); int sleepq_timedwait_sig(void *wchan); Index: sys/vm/vm_glue.c =================================================================== RCS file: /CVS/CVS_IPSO/src/sys/vm/vm_glue.c,v retrieving revision 1.2.4.1.4.2 diff -u -r1.2.4.1.4.2 vm_glue.c --- sys/vm/vm_glue.c 25 Jul 2008 07:18:03 -0000 1.2.4.1.4.2 +++ sys/vm/vm_glue.c 20 Aug 2008 13:54:32 -0000 @@ -116,10 +116,6 @@ static void swapclear(struct proc *); #endif - -static volatile int proc0_rescan; - - /* * MPSAFE * @@ -686,9 +682,6 @@ loop: if (vm_page_count_min()) { VM_WAIT; - thread_lock(&thread0); - proc0_rescan = 0; - thread_unlock(&thread0); goto loop; } @@ -737,13 +730,7 @@ * Nothing to do, back to sleep. */ if ((p = pp) == NULL) { - thread_lock(&thread0); - if (!proc0_rescan) { - TD_SET_IWAIT(&thread0); - mi_switch(SW_VOL | SWT_IWAIT, NULL); - } - proc0_rescan = 0; - thread_unlock(&thread0); + tsleep(&proc0, PVM, "sched", maxslp * hz / 2); goto loop; } PROC_LOCK(p); @@ -755,9 +742,6 @@ */ if (p->p_flag & (P_INMEM | P_SWAPPINGOUT | P_SWAPPINGIN)) { PROC_UNLOCK(p); - thread_lock(&thread0); - proc0_rescan = 0; - thread_unlock(&thread0); goto loop; } @@ -767,32 +751,16 @@ */ faultin(p); PROC_UNLOCK(p); - thread_lock(&thread0); - proc0_rescan = 0; - thread_unlock(&thread0); goto loop; } -void kick_proc0(void) +void +kick_proc0(void) { - struct thread *td = &thread0; - /* XXX This will probably cause a LOR in some cases */ - thread_lock(td); - if (TD_AWAITING_INTR(td)) { - CTR2(KTR_INTR, "%s: sched_add %d", __func__, 0); - TD_CLR_IWAIT(td); - sched_add(td, SRQ_INTR); - } else { - proc0_rescan = 1; - CTR2(KTR_INTR, "%s: state %d", - __func__, td->td_state); - } - thread_unlock(td); - + wakeup(&proc0); } - #ifndef NO_SWAPPING /* @@ -994,7 +962,16 @@ td->td_flags &= ~TDF_SWAPINREQ; TD_CLR_SWAPPED(td); if (TD_CAN_RUN(td)) - setrunnable(td); + if (setrunnable(td)) { +#ifdef INVARIANTS + /* + * We just cleared TDI_SWAPPED above + * and set TDF_INMEM, so this should never + * happen. + */ + panic("not waking up swapper"); +#endif + } thread_unlock(td); } p->p_flag &= ~(P_SWAPPINGIN|P_SWAPPINGOUT);