diff -ur src.old/sys/kern/kern_intr.c src/sys/kern/kern_intr.c --- src.old/sys/kern/kern_intr.c Sat Apr 8 12:14:56 2006 +++ src/sys/kern/kern_intr.c Sat Apr 8 14:51:45 2006 @@ -81,6 +81,11 @@ void *softclock_ih; void *vm_ih; +static int intr_stolen = 0; +int intr_unstolen = 0; +SYSCTL_INT(_hw, OID_AUTO, intr_stolen, CTLFLAG_RW, &intr_stolen, 0, ""); +SYSCTL_INT(_hw, OID_AUTO, intr_unstolen, CTLFLAG_RW, &intr_unstolen, 0, ""); + static MALLOC_DEFINE(M_ITHREAD, "ithread", "Interrupt Threads"); static int intr_storm_threshold = 500; @@ -528,10 +533,21 @@ it->it_need = 1; mtx_lock_spin(&sched_lock); if (TD_AWAITING_INTR(td)) { - CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, - p->p_comm); TD_CLR_IWAIT(td); - setrunqueue(td, SRQ_INTR); + if (!cold && td->td_priority < ctd->td_priority && + ctd->td_critnest <= 1 && td->td_interrupted == NULL) { + intr_stolen++; + td->td_interrupted = ctd; + TD_SET_RUNNING(td); + TD_SET_CAN_RUN(ctd); + cpu_switch(ctd, td); + TD_SET_RUNNING(ctd); + sched_lock.mtx_lock = (uintptr_t)ctd; + } else { + CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, + p->p_comm); + setrunqueue(td, SRQ_INTR); + } } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); @@ -539,6 +555,26 @@ mtx_unlock_spin(&sched_lock); return (0); +} + +int +ithread_switch(struct thread *td, struct thread *newtd) +{ + if (td->td_interrupted == NULL) + return (0); + + if (newtd) + panic("unexpected newtd"); + + if (TD_IS_RUNNING(td)) + setrunqueue(td, SRQ_OURSELF|SRQ_YIELDING); + if (!TD_AWAITING_INTR(td)) + intr_unstolen++; + newtd = td->td_interrupted; + td->td_interrupted = NULL; + cpu_switch(td, newtd); + sched_lock.mtx_lock = (uintptr_t)td; + return (1); } /* diff -ur src.old/sys/kern/kern_synch.c src/sys/kern/kern_synch.c --- src.old/sys/kern/kern_synch.c Sat Apr 8 12:14:57 2006 +++ src/sys/kern/kern_synch.c Sat Apr 8 13:14:42 2006 @@ -61,6 +61,8 @@ #include #include #endif +#include +#include #include @@ -354,6 +356,10 @@ KASSERT((flags & (SW_INVOL | SW_VOL)) != 0, ("mi_switch: switch must be voluntary or involuntary")); KASSERT(newtd != curthread, ("mi_switch: preempting back to ourself")); + + if (td->td_pflags & TDP_ITHREAD) + if (ithread_switch(td, newtd)) + return; if (flags & SW_VOL) p->p_stats->p_ru.ru_nvcsw++; diff -ur src.old/sys/kern/subr_turnstile.c src/sys/kern/subr_turnstile.c --- src.old/sys/kern/subr_turnstile.c Sat Apr 8 12:14:58 2006 +++ src/sys/kern/subr_turnstile.c Sat Apr 8 16:15:55 2006 @@ -218,7 +218,7 @@ * If lock holder is actually running or on the run queue * then we are done. */ - if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { + if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td) || TD_CAN_RUN(td)) { MPASS(td->td_blocked == NULL); return; } @@ -871,7 +871,7 @@ setrunqueue(td, SRQ_BORING); } else { td->td_flags |= TDF_TSNOBLOCK; - MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td)); + MPASS(TD_IS_RUNNING(td) || TD_ON_RUNQ(td) || TD_CAN_RUN(td)); } } critical_exit(); diff -ur src.old/sys/sys/interrupt.h src/sys/sys/interrupt.h --- src.old/sys/sys/interrupt.h Sat Apr 8 12:15:29 2006 +++ src/sys/sys/interrupt.h Sat Apr 8 13:18:16 2006 @@ -102,6 +102,8 @@ extern void *softclock_ih; extern void *vm_ih; +struct thread; + /* Counts and names for statistics (defined in MD code). */ extern u_long eintrcnt[]; /* end of intrcnt[] */ extern char eintrnames[]; /* end of intrnames[] */ @@ -121,6 +123,7 @@ int intr_event_destroy(struct intr_event *ie); int intr_event_remove_handler(void *cookie); int intr_event_schedule_thread(struct intr_event *ie); +int ithread_switch(struct thread *td, struct thread *newtd); int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep); diff -ur src.old/sys/sys/proc.h src/sys/sys/proc.h --- src.old/sys/sys/proc.h Sat Apr 8 12:15:31 2006 +++ src/sys/sys/proc.h Sat Apr 8 12:29:20 2006 @@ -273,6 +273,7 @@ short td_locks; /* (k) DEBUG: lockmgr count of locks. */ u_char td_tsqueue; /* (j) Turnstile queue blocked on. */ struct turnstile *td_blocked; /* (j) Lock thread is blocked on. */ + struct thread *td_interrupted;/* (k*) Thread we interrupted */ const char *td_lockname; /* (j) Name of lock blocked on. */ LIST_HEAD(, turnstile) td_contested; /* (q) Contested locks. */ struct lock_list_entry *td_sleeplocks; /* (k) Held sleep locks. */