--- //depot/projects/smpng/sys/compat/ndis/subr_hal.c 2004/08/10 18:03:16 +++ //depot/user/jhb/needresched/compat/ndis/subr_hal.c 2004/10/05 16:00:56 @@ -308,7 +308,6 @@ mtx_lock_spin(&sched_lock); oldirql = curthread->td_base_pri; sched_prio(curthread, PI_REALTIME); - curthread->td_base_pri = PI_REALTIME; mtx_unlock_spin(&sched_lock); return(oldirql); @@ -324,7 +323,6 @@ panic("IRQL_NOT_GREATER_THAN"); mtx_lock_spin(&sched_lock); - curthread->td_base_pri = oldirql; sched_prio(curthread, oldirql); mtx_unlock_spin(&sched_lock); --- //depot/projects/smpng/sys/dev/md/md.c 2004/09/22 15:31:15 +++ //depot/user/jhb/needresched/dev/md/md.c 2004/10/05 16:00:56 @@ -601,7 +601,9 @@ int error, hasgiant; sc = arg; - curthread->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(curthread, PRIBIO); + mtx_unlock_spin(&sched_lock); switch (sc->type) { case MD_VNODE: --- //depot/projects/smpng/sys/geom/geom_kern.c 2004/09/13 18:26:18 +++ //depot/user/jhb/needresched/geom/geom_kern.c 2004/10/05 16:00:56 @@ -87,7 +87,9 @@ struct thread *tp = FIRST_THREAD_IN_PROC(p); mtx_assert(&Giant, MA_NOTOWNED); - tp->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(tp, PRIBIO); + mtx_unlock_spin(&sched_lock); for(;;) { g_io_schedule_up(tp); } @@ -108,7 +110,9 @@ struct thread *tp = FIRST_THREAD_IN_PROC(p); mtx_assert(&Giant, MA_NOTOWNED); - tp->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(tp, PRIBIO); + mtx_unlock_spin(&sched_lock); for(;;) { g_io_schedule_down(tp); } @@ -129,7 +133,9 @@ struct thread *tp = FIRST_THREAD_IN_PROC(p); mtx_assert(&Giant, MA_NOTOWNED); - tp->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(tp, PRIBIO); + mtx_unlock_spin(&sched_lock); for(;;) { g_run_events(); tsleep(&g_wait_event, PRIBIO, "-", hz/10); --- //depot/projects/smpng/sys/geom/mirror/g_mirror.c 2004/10/05 19:15:26 +++ //depot/user/jhb/needresched/geom/mirror/g_mirror.c 2004/10/05 19:33:13 @@ -1351,7 +1351,9 @@ u_int nreqs; sc = arg; - curthread->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(curthread, PRIBIO); + mtx_unlock_spin(&sched_lock); nreqs = 0; for (;;) { --- //depot/projects/smpng/sys/geom/raid3/g_raid3.c 2004/10/01 14:25:19 +++ //depot/user/jhb/needresched/geom/raid3/g_raid3.c 2004/10/05 16:00:56 @@ -1599,7 +1599,9 @@ u_int nreqs; sc = arg; - curthread->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(curthread, PRIBIO); + mtx_unlock_spin(&sched_lock); nreqs = 0; for (;;) { --- //depot/projects/smpng/sys/kern/kern_resource.c 2004/10/05 19:15:26 +++ //depot/user/jhb/needresched/kern/kern_resource.c 2004/10/05 19:33:13 @@ -423,7 +423,6 @@ } sched_class(kg, rtp->type); if (curthread->td_ksegrp == kg) { - curthread->td_base_pri = kg->kg_user_pri; sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */ } return (0); --- //depot/projects/smpng/sys/kern/kern_synch.c 2004/10/05 19:15:26 +++ //depot/user/jhb/needresched/kern/kern_synch.c 2004/10/05 19:33:13 @@ -211,8 +211,6 @@ /* * Adjust this thread's priority. - * - * XXX: do we need to save priority in td_base_pri? */ mtx_lock_spin(&sched_lock); sched_prio(td, priority & PRIMASK); --- //depot/projects/smpng/sys/kern/sched_4bsd.c 2004/09/22 15:31:15 +++ //depot/user/jhb/needresched/kern/sched_4bsd.c 2004/10/05 16:00:56 @@ -727,7 +727,7 @@ * and moving a KSE in the system run queue. */ void -sched_prio(struct thread *td, u_char prio) +sched_lend_prio(struct thread *td, u_char prio) { mtx_assert(&sched_lock, MA_OWNED); @@ -739,12 +739,19 @@ } void +sched_prio(struct thread *td, u_char prio) +{ + + sched_lend_prio(td, prio); + td->td_base_pri = prio; +} + +void sched_sleep(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); td->td_ksegrp->kg_slptime = 0; - td->td_base_pri = td->td_priority; } static void remrunqueue(struct thread *td); @@ -1119,6 +1126,7 @@ if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; + td->td_base_pri = kg->kg_user_pri; mtx_unlock_spin(&sched_lock); } } --- //depot/projects/smpng/sys/kern/sched_ule.c 2004/09/22 15:31:15 +++ //depot/user/jhb/needresched/kern/sched_ule.c 2004/10/05 16:00:56 @@ -1192,7 +1192,7 @@ } void -sched_prio(struct thread *td, u_char prio) +sched_lend_prio(struct thread *td, u_char prio) { struct kse *ke; @@ -1224,6 +1224,14 @@ } void +sched_prio(struct thread *td, u_char prio) +{ + + sched_lend_prio(td, prio); + td->td_base_pri = prio; +} + +void sched_switch(struct thread *td, struct thread *newtd, int flags) { struct kse *ke; @@ -1328,7 +1337,6 @@ mtx_assert(&sched_lock, MA_OWNED); td->td_slptime = ticks; - td->td_base_pri = td->td_priority; CTR2(KTR_ULE, "sleep thread %p (tick: %d)", td, td->td_slptime); @@ -1615,6 +1623,7 @@ if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; + td->td_base_pri = kg->kg_user_pri; mtx_unlock_spin(&sched_lock); } } --- //depot/projects/smpng/sys/kern/subr_turnstile.c 2004/10/05 19:15:26 +++ //depot/user/jhb/needresched/kern/subr_turnstile.c 2004/10/05 19:33:13 @@ -198,10 +198,16 @@ return; /* - * If lock holder is actually running, just bump priority. + * Bump this thread's priority. + */ + sched_lend_prio(td, pri); + + /* + * If lock holder is actually running or on the run queue + * then we are done. */ - if (TD_IS_RUNNING(td)) { - td->td_priority = pri; + if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { + MPASS(td->td_blocked == NULL); return; } @@ -214,22 +220,6 @@ #endif /* - * If on run queue move to new run queue, and quit. - * XXXKSE this gets a lot more complicated under threads - * but try anyhow. - */ - if (TD_ON_RUNQ(td)) { - MPASS(td->td_blocked == NULL); - sched_prio(td, pri); - return; - } - - /* - * Bump this thread's priority. - */ - td->td_priority = pri; - - /* * If we aren't blocked on a lock, we should be. */ KASSERT(TD_ON_LOCK(td), ( @@ -455,7 +445,7 @@ */ mtx_lock_spin(&sched_lock); if (td->td_priority < owner->td_priority) - owner->td_priority = td->td_priority; + sched_lend_prio(owner, td->td_priority); mtx_unlock_spin(&sched_lock); } @@ -719,7 +709,7 @@ mtx_unlock_spin(&td_contested_lock); if (pri > td->td_base_pri) pri = td->td_base_pri; - td->td_priority = pri; + sched_lend_prio(td, pri); /* * Wake up all the pending threads. If a thread is not blocked --- //depot/projects/smpng/sys/sys/sched.h 2004/09/22 15:31:15 +++ //depot/user/jhb/needresched/sys/sched.h 2004/10/05 16:00:56 @@ -65,6 +65,7 @@ void sched_fork_thread(struct thread *td, struct thread *child); fixpt_t sched_pctcpu(struct thread *td); void sched_prio(struct thread *td, u_char prio); +void sched_lend_prio(struct thread *td, u_char prio); void sched_sleep(struct thread *td); void sched_switch(struct thread *td, struct thread *newtd, int flags); void sched_userret(struct thread *td); --- //depot/projects/smpng/sys/vm/vm_zeroidle.c 2004/09/09 19:48:37 +++ //depot/user/jhb/needresched/vm/vm_zeroidle.c 2004/10/05 16:00:56 @@ -142,9 +142,6 @@ td = curthread; p = td->td_proc; - mtx_lock_spin(&sched_lock); - sched_prio(td, PRI_MAX_IDLE); - mtx_unlock_spin(&sched_lock); idlezero_enable = idlezero_enable_default; for (;;) { @@ -181,6 +178,7 @@ pagezero_proc->p_flag |= P_NOLOAD; PROC_UNLOCK(pagezero_proc); mtx_lock_spin(&sched_lock); + sched_prio(td, PRI_MAX_IDLE); setrunqueue(FIRST_THREAD_IN_PROC(pagezero_proc), SRQ_BORING); mtx_unlock_spin(&sched_lock); }