--- //depot/projects/smpng/sys/compat/ndis/subr_hal.c 2004/08/10 18:03:16 +++ //depot/user/jhb/needresched/compat/ndis/subr_hal.c 2004/10/05 16:00:56 @@ -308,7 +308,6 @@ mtx_lock_spin(&sched_lock); oldirql = curthread->td_base_pri; sched_prio(curthread, PI_REALTIME); - curthread->td_base_pri = PI_REALTIME; mtx_unlock_spin(&sched_lock); return(oldirql); @@ -324,7 +323,6 @@ panic("IRQL_NOT_GREATER_THAN"); mtx_lock_spin(&sched_lock); - curthread->td_base_pri = oldirql; sched_prio(curthread, oldirql); mtx_unlock_spin(&sched_lock); --- //depot/projects/smpng/sys/dev/md/md.c 2004/11/04 21:07:58 +++ //depot/user/jhb/needresched/dev/md/md.c 2004/11/04 21:42:47 @@ -71,6 +71,7 @@ #include #include #include +#include #include #include #include @@ -617,7 +618,9 @@ int error, hasgiant; sc = arg; - curthread->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(curthread, PRIBIO); + mtx_unlock_spin(&sched_lock); switch (sc->type) { case MD_VNODE: --- //depot/projects/smpng/sys/geom/geom_kern.c 2004/09/13 18:26:18 +++ //depot/user/jhb/needresched/geom/geom_kern.c 2004/10/07 17:50:08 @@ -47,8 +47,9 @@ #include #include #include +#include +#include #include -#include #include #include @@ -87,7 +88,9 @@ struct thread *tp = FIRST_THREAD_IN_PROC(p); mtx_assert(&Giant, MA_NOTOWNED); - tp->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(tp, PRIBIO); + mtx_unlock_spin(&sched_lock); for(;;) { g_io_schedule_up(tp); } @@ -108,7 +111,9 @@ struct thread *tp = FIRST_THREAD_IN_PROC(p); mtx_assert(&Giant, MA_NOTOWNED); - tp->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(tp, PRIBIO); + mtx_unlock_spin(&sched_lock); for(;;) { g_io_schedule_down(tp); } @@ -129,7 +134,9 @@ struct thread *tp = FIRST_THREAD_IN_PROC(p); mtx_assert(&Giant, MA_NOTOWNED); - tp->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(tp, PRIBIO); + mtx_unlock_spin(&sched_lock); for(;;) { g_run_events(); tsleep(&g_wait_event, PRIBIO, "-", hz/10); --- //depot/projects/smpng/sys/geom/mirror/g_mirror.c 2004/11/05 19:22:55 +++ //depot/user/jhb/needresched/geom/mirror/g_mirror.c 2004/11/05 19:44:45 @@ -42,6 +42,7 @@ #include #include #include +#include #include @@ -1436,7 +1437,9 @@ u_int nreqs; sc = arg; - curthread->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(curthread, PRIBIO); + mtx_unlock_spin(&sched_lock); nreqs = 0; for (;;) { --- //depot/projects/smpng/sys/geom/raid3/g_raid3.c 2004/11/05 19:22:55 +++ //depot/user/jhb/needresched/geom/raid3/g_raid3.c 2004/11/05 19:44:45 @@ -43,6 +43,7 @@ #include #include #include +#include #include @@ -1693,7 +1694,9 @@ u_int nreqs; sc = arg; - curthread->td_base_pri = PRIBIO; + mtx_lock_spin(&sched_lock); + sched_prio(curthread, PRIBIO); + mtx_unlock_spin(&sched_lock); nreqs = 0; for (;;) { --- //depot/projects/smpng/sys/kern/kern_resource.c 2004/10/05 19:15:26 +++ //depot/user/jhb/needresched/kern/kern_resource.c 2004/10/08 18:38:03 @@ -423,7 +431,6 @@ } sched_class(kg, rtp->type); if (curthread->td_ksegrp == kg) { - curthread->td_base_pri = kg->kg_user_pri; sched_prio(curthread, kg->kg_user_pri); /* XXX dubious */ } return (0); --- //depot/projects/smpng/sys/kern/kern_synch.c 2004/11/12 17:46:12 +++ //depot/user/jhb/needresched/kern/kern_synch.c 2004/10/21 17:01:58 @@ -210,8 +210,6 @@ /* * Adjust this thread's priority. - * - * XXX: do we need to save priority in td_base_pri? */ mtx_lock_spin(&sched_lock); sched_prio(td, priority & PRIMASK); --- //depot/projects/smpng/sys/kern/sched_4bsd.c 2004/10/19 20:31:02 +++ //depot/user/jhb/needresched/kern/sched_4bsd.c 2004/10/21 17:01:58 @@ -50,6 +50,7 @@ #include #include #include +#include #include /* @@ -164,10 +165,12 @@ static void roundrobin(void *arg); static void schedcpu(void); static void schedcpu_thread(void); +static void sched_priority(struct thread *td, u_char prio); static void sched_setup(void *dummy); static void maybe_resched(struct thread *td); static void updatepri(struct ksegrp *kg); static void resetpriority(struct ksegrp *kg); +static void resetpriority_thread(struct thread *td, struct ksegrp *kg); #ifdef SMP static int forward_wakeup(int cpunum); #endif @@ -508,9 +511,7 @@ kg->kg_estcpu = decay_cpu(loadfac, kg->kg_estcpu); resetpriority(kg); FOREACH_THREAD_IN_GROUP(kg, td) { - if (td->td_priority >= PUSER) { - sched_prio(td, kg->kg_user_pri); - } + resetpriority_thread(td, kg); } } /* end of ksegrp loop */ mtx_unlock_spin(&sched_lock); @@ -553,7 +554,6 @@ newcpu = decay_cpu(loadfac, newcpu); kg->kg_estcpu = newcpu; } - resetpriority(kg); } /* @@ -565,7 +565,6 @@ resetpriority(struct ksegrp *kg) { register unsigned int newpriority; - struct thread *td; if (kg->kg_pri_class == PRI_TIMESHARE) { newpriority = PUSER + kg->kg_estcpu / INVERSE_ESTCPU_WEIGHT + @@ -574,9 +573,25 @@ PRI_MAX_TIMESHARE); kg->kg_user_pri = newpriority; } - FOREACH_THREAD_IN_GROUP(kg, td) { - maybe_resched(td); /* XXXKSE silly */ - } +} + +/* + * Update the thread's priority when the associated ksegroup's user + * priority changes. + */ +static void +resetpriority_thread(struct thread *td, struct ksegrp *kg) +{ + + /* Only change threads with a time sharing user priority. */ + if (td->td_priority < PRI_MIN_TIMESHARE || + td->td_priority > PRI_MAX_TIMESHARE) + return; + + /* XXX the whole needresched thing is broken, but not silly. */ + maybe_resched(td); + + sched_prio(td, kg->kg_user_pri); } /* ARGSUSED */ @@ -667,8 +682,7 @@ kg->kg_estcpu = ESTCPULIM(kg->kg_estcpu + 1); if ((kg->kg_estcpu % INVERSE_ESTCPU_WEIGHT) == 0) { resetpriority(kg); - if (td->td_priority >= PUSER) - td->td_priority = kg->kg_user_pri; + resetpriority_thread(td, kg); } } @@ -726,12 +740,16 @@ sched_nice(struct proc *p, int nice) { struct ksegrp *kg; + struct thread *td; PROC_LOCK_ASSERT(p, MA_OWNED); mtx_assert(&sched_lock, MA_OWNED); p->p_nice = nice; FOREACH_KSEGRP_IN_PROC(p, kg) { resetpriority(kg); + FOREACH_THREAD_IN_GROUP(kg, td) { + resetpriority_thread(td, kg); + } } } @@ -748,11 +766,13 @@ * changing the assignment of a kse to the thread, * and moving a KSE in the system run queue. */ -void -sched_prio(struct thread *td, u_char prio) +static void +sched_priority(struct thread *td, u_char prio) { mtx_assert(&sched_lock, MA_OWNED); + if (td->td_priority == prio) + return; if (TD_ON_RUNQ(td)) { adjustrunqueue(td, prio); } else { @@ -760,13 +780,76 @@ } } +/* + * Update a thread's priority when it is lent another thread's + * priority. + */ +void +sched_lend_prio(struct thread *td, u_char prio) +{ + + td->td_flags |= TDF_BORROWING; + sched_priority(td, prio); +} + +/* + * Restore a thread's priority when priority propagation is + * over. The prio argument is the minimum priority the thread + * needs to have to satisfy other possible priority lending + * requests. If the thread's regulary priority is less + * important than prio the thread will keep a priority boost + * of prio. + */ void +sched_unlend_prio(struct thread *td, u_char prio) +{ + u_char base_pri; + + if (td->td_base_pri >= PRI_MIN_TIMESHARE && + td->td_base_pri <= PRI_MAX_TIMESHARE) + base_pri = td->td_ksegrp->kg_user_pri; + else + base_pri = td->td_base_pri; + if (prio >= base_pri) { + td->td_flags &= ~TDF_BORROWING; + sched_prio(td, base_pri); + } else + sched_lend_prio(td, prio); +} + +void +sched_prio(struct thread *td, u_char prio) +{ + u_char oldprio; + + /* First, update the base priority. */ + td->td_base_pri = prio; + + /* + * If the thread is borrowing another thread's priority, don't ever + * lower the priority. + */ + if (td->td_flags & TDF_BORROWING && td->td_priority < prio) + return; + + /* Change the real priority. */ + oldprio = td->td_priority; + sched_priority(td, prio); + + /* + * If the thread is on a turnstile, then let the turnstile update + * its state. + */ + if (TD_ON_LOCK(td) && oldprio != prio) + turnstile_adjust(td, oldprio); +} + +void sched_sleep(struct thread *td) { mtx_assert(&sched_lock, MA_OWNED); td->td_ksegrp->kg_slptime = 0; - td->td_base_pri = td->td_priority; } static void remrunqueue(struct thread *td); @@ -874,8 +957,10 @@ mtx_assert(&sched_lock, MA_OWNED); kg = td->td_ksegrp; - if (kg->kg_slptime > 1) + if (kg->kg_slptime > 1) { updatepri(kg); + resetpriority(kg); + } kg->kg_slptime = 0; setrunqueue(td, SRQ_BORING); } @@ -1139,10 +1265,13 @@ * it here and returning to user mode, so don't waste time setting * it perfectly here. */ + KASSERT((td->td_flags & TDF_BORROWING) == 0, + ("thread with borrowed priority returning to userland")); kg = td->td_ksegrp; if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; + td->td_base_pri = kg->kg_user_pri; mtx_unlock_spin(&sched_lock); } } --- //depot/projects/smpng/sys/kern/sched_ule.c 2004/11/04 21:07:58 +++ //depot/user/jhb/needresched/kern/sched_ule.c 2004/11/05 19:58:02 @@ -46,6 +46,7 @@ #include #include #include +#include #include #ifdef KTRACE #include @@ -330,6 +331,7 @@ static void sched_add_internal(struct thread *td, int preemptive); static void sched_slice(struct kse *ke); static void sched_priority(struct ksegrp *kg); +static void sched_thread_priority(struct thread *td, u_char prio); static int sched_interact_score(struct ksegrp *kg); static void sched_interact_update(struct ksegrp *kg); static void sched_interact_fork(struct ksegrp *kg); @@ -1215,12 +1217,14 @@ } void -sched_prio(struct thread *td, u_char prio) +sched_thread_priority(struct thread *td, u_char prio) { struct kse *ke; ke = td->td_kse; mtx_assert(&sched_lock, MA_OWNED); + if (td->td_priority == prio) + return; if (TD_ON_RUNQ(td)) { /* * If the priority has been elevated due to priority @@ -1246,7 +1250,71 @@ td->td_priority = prio; } +/* + * Update a thread's priority when it is lent another thread's + * priority. + */ void +sched_lend_prio(struct thread *td, u_char prio) +{ + + td->td_flags |= TDF_BORROWING; + sched_thread_priority(td, prio); +} + +/* + * Restore a thread's priority when priority propagation is + * over. The prio argument is the minimum priority the thread + * needs to have to satisfy other possible priority lending + * requests. If the thread's regular priority is less + * important than prio, the thread will keep a priority boost + * of prio. + */ +void +sched_unlend_prio(struct thread *td, u_char prio) +{ + u_char base_pri; + + if (td->td_base_pri >= PRI_MIN_TIMESHARE && + td->td_base_pri <= PRI_MAX_TIMESHARE) + base_pri = td->td_ksegrp->kg_user_pri; + else + base_pri = td->td_base_pri; + if (prio >= base_pri) { + td->td_flags &= ~ TDF_BORROWING; + sched_thread_priority(td, base_pri); + } else + sched_lend_prio(td, prio); +} + +void +sched_prio(struct thread *td, u_char prio) +{ + u_char oldprio; + + /* First, update the base priority. */ + td->td_base_pri = prio; + + /* + * If the therad is borrowing another thread's priority, don't + * ever lower the priority. + */ + if (td->td_flags & TDF_BORROWING && td->td_priority < prio) + return; + + /* Change the real priority. */ + oldprio = td->td_priority; + sched_thread_priority(td, prio); + + /* + * If the thread is on a turnstile, then let the turnstile update + * its state. + */ + if (TD_ON_LOCK(td) && oldprio != prio) + turnstile_adjust(td, oldprio); +} + +void sched_switch(struct thread *td, struct thread *newtd, int flags) { struct kse *ke; @@ -1354,7 +1423,6 @@ mtx_assert(&sched_lock, MA_OWNED); td->td_slptime = ticks; - td->td_base_pri = td->td_priority; CTR2(KTR_ULE, "sleep thread %p (tick: %d)", td, td->td_slptime); @@ -1636,11 +1704,13 @@ { struct ksegrp *kg; - kg = td->td_ksegrp; - + KASSERT((td->td_flags & TDF_BORROWING) == 0, + ("thread with borrowed priority returning to userland")); + kg = td->td_ksegrp; if (td->td_priority != kg->kg_user_pri) { mtx_lock_spin(&sched_lock); td->td_priority = kg->kg_user_pri; + td->td_base_pri = kg->kg_user_pri; mtx_unlock_spin(&sched_lock); } } --- //depot/projects/smpng/sys/kern/subr_turnstile.c 2004/10/12 19:09:56 +++ //depot/user/jhb/needresched/kern/subr_turnstile.c 2004/10/27 21:29:57 @@ -145,7 +145,9 @@ #ifdef TURNSTILE_PROFILING static void init_turnstile_profiling(void *arg); #endif -static void propagate_priority(struct thread *); +static void propagate_priority(struct thread *td); +static int turnstile_adjust_thread(struct turnstile *ts, + struct thread *td); static void turnstile_setowner(struct turnstile *ts, struct thread *owner); /* @@ -158,7 +160,6 @@ { struct turnstile_chain *tc; struct turnstile *ts; - struct thread *td1; int pri; mtx_assert(&sched_lock, MA_OWNED); @@ -187,8 +188,8 @@ * isn't SRUN or SLOCK. */ KASSERT(!TD_IS_SLEEPING(td), - ("sleeping thread (pid %d) owns a non-sleepable lock", - td->td_proc->p_pid)); + ("sleeping thread (tid %d) owns a non-sleepable lock", + td->td_tid)); /* * If this thread already has higher priority than the @@ -198,10 +199,16 @@ return; /* - * If lock holder is actually running, just bump priority. + * Bump this thread's priority. + */ + sched_lend_prio(td, pri); + + /* + * If lock holder is actually running or on the run queue + * then we are done. */ - if (TD_IS_RUNNING(td)) { - td->td_priority = pri; + if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { + MPASS(td->td_blocked == NULL); return; } @@ -214,27 +221,11 @@ #endif /* - * If on run queue move to new run queue, and quit. - * XXXKSE this gets a lot more complicated under threads - * but try anyhow. - */ - if (TD_ON_RUNQ(td)) { - MPASS(td->td_blocked == NULL); - sched_prio(td, pri); - return; - } - - /* - * Bump this thread's priority. - */ - td->td_priority = pri; - - /* * If we aren't blocked on a lock, we should be. */ KASSERT(TD_ON_LOCK(td), ( - "process %d(%s):%d holds %s but isn't blocked on a lock\n", - td->td_proc->p_pid, td->td_proc->p_comm, td->td_state, + "thread %d(%s):%d holds %s but isn't blocked on a lock\n", + td->td_tid, td->td_proc->p_comm, td->td_state, ts->ts_lockobj->lo_name)); /* @@ -245,61 +236,81 @@ tc = TC_LOOKUP(ts->ts_lockobj); mtx_lock_spin(&tc->tc_lock); - /* - * This thread may not be blocked on this turnstile anymore - * but instead might already be woken up on another CPU - * that is waiting on sched_lock in turnstile_unpend() to - * finish waking this thread up. We can detect this case - * by checking to see if this thread has been given a - * turnstile by either turnstile_signal() or - * turnstile_broadcast(). In this case, treat the thread as - * if it was already running. - */ - if (td->td_turnstile != NULL) { + /* Resort td on the list if needed. */ + if (!turnstile_adjust_thread(ts, td)) { mtx_unlock_spin(&tc->tc_lock); return; } + mtx_unlock_spin(&tc->tc_lock); + } +} + +/* + * Adjust the thread's position on a turnstile after its priority has been + * changed. + */ +static int +turnstile_adjust_thread(struct turnstile *ts, struct thread *td) +{ + struct turnstile_chain *tc; + struct thread *td1, *td2; + + mtx_assert(&sched_lock, MA_OWNED); + MPASS(TD_ON_LOCK(td)); - /* - * Check if the thread needs to be moved up on - * the blocked chain. It doesn't need to be moved - * if it is already at the head of the list or if - * the item in front of it still has a higher priority. - */ - if (td == TAILQ_FIRST(&ts->ts_blocked)) { - mtx_unlock_spin(&tc->tc_lock); - continue; - } + /* + * This thread may not be blocked on this turnstile anymore + * but instead might already be woken up on another CPU + * that is waiting on sched_lock in turnstile_unpend() to + * finish waking this thread up. We can detect this case + * by checking to see if this thread has been given a + * turnstile by either turnstile_signal() or + * turnstile_broadcast(). In this case, treat the thread as + * if it was already running. + */ + if (td->td_turnstile != NULL) + return (0); - td1 = TAILQ_PREV(td, threadqueue, td_lockq); - if (td1->td_priority <= pri) { - mtx_unlock_spin(&tc->tc_lock); - continue; - } + /* + * Check if the thread needs to be moved on the blocked chain. + * It needs to be moved if either its priority is lower than + * the previous thread or higher than the next thread. + */ + tc = TC_LOOKUP(ts->ts_lockobj); + mtx_assert(&tc->tc_lock, MA_OWNED); + td1 = TAILQ_PREV(td, threadqueue, td_lockq); + td2 = TAILQ_NEXT(td, td_lockq); + if ((td1 != NULL && td->td_priority < td1->td_priority) || + (td2 != NULL && td->td_priority > td2->td_priority)) { /* * Remove thread from blocked chain and determine where - * it should be moved up to. Since we know that td1 has - * a lower priority than td, we know that at least one - * thread in the chain has a lower priority and that - * td1 will thus not be NULL after the loop. + * it should be moved to. */ mtx_lock_spin(&td_contested_lock); TAILQ_REMOVE(&ts->ts_blocked, td, td_lockq); TAILQ_FOREACH(td1, &ts->ts_blocked, td_lockq) { MPASS(td1->td_proc->p_magic == P_MAGIC); - if (td1->td_priority > pri) + if (td1->td_priority > td->td_priority) break; } - MPASS(td1 != NULL); - TAILQ_INSERT_BEFORE(td1, td, td_lockq); + if (td1 == NULL) + TAILQ_INSERT_TAIL(&ts->ts_blocked, td, td_lockq); + else + TAILQ_INSERT_BEFORE(td1, td, td_lockq); mtx_unlock_spin(&td_contested_lock); - CTR4(KTR_LOCK, - "propagate_priority: td %p moved before %p on [%p] %s", - td, td1, ts->ts_lockobj, ts->ts_lockobj->lo_name); - mtx_unlock_spin(&tc->tc_lock); + if (td1 == NULL) + CTR3(KTR_LOCK, + "turnstile_adjust_thread: td %d put at tail on [%p] %s", + td->td_tid, ts->ts_lockobj, ts->ts_lockobj->lo_name); + else + CTR4(KTR_LOCK, + "turnstile_adjust_thread: td %d moved before %d on [%p] %s", + td->td_tid, td1->td_tid, ts->ts_lockobj, + ts->ts_lockobj->lo_name); } + return (1); } /* @@ -355,6 +366,46 @@ SYSINIT(turnstile0, SI_SUB_LOCK, SI_ORDER_ANY, init_turnstile0, NULL); /* + * Update a thread on the turnstile list after it's priority has been changed. + * The old priority is passed in as an argument. + */ +void +turnstile_adjust(struct thread *td, u_char oldpri) +{ + struct turnstile_chain *tc; + struct turnstile *ts; + + mtx_assert(&sched_lock, MA_OWNED); + MPASS(TD_ON_LOCK(td)); + + /* + * Pick up the lock that td is blocked on. + */ + ts = td->td_blocked; + MPASS(ts != NULL); + tc = TC_LOOKUP(ts->ts_lockobj); + mtx_lock_spin(&tc->tc_lock); + + /* Resort the turnstile on the list. */ + if (!turnstile_adjust_thread(ts, td)) { + mtx_unlock_spin(&tc->tc_lock); + return; + } + + /* + * If our priority was lowered and we are at the head of the + * turnstile, then propagate our new priority up the chain. + * Note that we currently don't try to revoke lent priorities + * when our priority goes up. + */ + if (td == TAILQ_FIRST(&ts->ts_blocked) && td->td_priority < oldpri) { + mtx_unlock_spin(&tc->tc_lock); + propagate_priority(td); + } else + mtx_unlock_spin(&tc->tc_lock); +} + +/* * Set the owner of the lock this turnstile is attached to. */ static void @@ -470,7 +521,7 @@ */ mtx_lock_spin(&sched_lock); if (td->td_priority < owner->td_priority) - owner->td_priority = td->td_priority; + sched_lend_prio(owner, td->td_priority); mtx_unlock_spin(&sched_lock); } @@ -578,14 +629,14 @@ propagate_priority(td); if (LOCK_LOG_TEST(lock, 0)) - CTR4(KTR_LOCK, "%s: td %p blocked on [%p] %s", __func__, td, - lock, lock->lo_name); + CTR4(KTR_LOCK, "%s: td %d blocked on [%p] %s", __func__, + td->td_tid, lock, lock->lo_name); mi_switch(SW_VOL, NULL); if (LOCK_LOG_TEST(lock, 0)) - CTR4(KTR_LOCK, "%s: td %p free from blocked on [%p] %s", - __func__, td, lock, lock->lo_name); + CTR4(KTR_LOCK, "%s: td %d free from blocked on [%p] %s", + __func__, td->td_tid, lock, lock->lo_name); mtx_unlock_spin(&sched_lock); } @@ -692,7 +743,7 @@ TAILQ_HEAD( ,thread) pending_threads; struct turnstile_chain *tc; struct thread *td; - int cp, pri; + u_char cp, pri; MPASS(ts != NULL); MPASS(ts->ts_owner == curthread); @@ -739,9 +790,7 @@ pri = cp; } mtx_unlock_spin(&td_contested_lock); - if (pri > td->td_base_pri) - pri = td->td_base_pri; - td->td_priority = pri; + sched_unlend_prio(td, pri); /* * Wake up all the pending threads. If a thread is not blocked --- //depot/projects/smpng/sys/sys/proc.h 2004/11/04 21:07:58 +++ //depot/user/jhb/needresched/sys/proc.h 2004/11/04 21:42:47 @@ -327,6 +327,7 @@ * Flags kept in td_flags: * To change these you MUST have the scheduler lock. */ +#define TDF_BORROWING 0x00000001 /* Thread is borrowing pri from another. */ #define TDF_UNUSED0 0x00000001 /* --available -- */ #define TDF_INPANIC 0x00000002 /* Caused a panic, let it drive crashdump. */ #define TDF_SINTR 0x00000008 /* Sleep is interruptible. */ --- //depot/projects/smpng/sys/sys/sched.h 2004/09/22 15:31:15 +++ //depot/user/jhb/needresched/sys/sched.h 2004/10/08 15:25:39 @@ -65,8 +65,10 @@ void sched_fork_thread(struct thread *td, struct thread *child); fixpt_t sched_pctcpu(struct thread *td); void sched_prio(struct thread *td, u_char prio); +void sched_lend_prio(struct thread *td, u_char prio); void sched_sleep(struct thread *td); void sched_switch(struct thread *td, struct thread *newtd, int flags); +void sched_unlend_prio(struct thread *td, u_char prio); void sched_userret(struct thread *td); void sched_wakeup(struct thread *td); --- //depot/projects/smpng/sys/sys/turnstile.h 2004/10/12 19:09:56 +++ //depot/user/jhb/needresched/sys/turnstile.h 2004/10/13 17:28:21 @@ -74,6 +74,7 @@ #ifdef _KERNEL void init_turnstiles(void); +void turnstile_adjust(struct thread *, u_char); struct turnstile *turnstile_alloc(void); void turnstile_broadcast(struct turnstile *); void turnstile_claim(struct lock_object *);