--- //depot/projects/smpng/sys/kern/kern_intr.c 2009/05/19 13:40:43 +++ //depot/user/jhb/intr_fast/kern/kern_intr.c 2009/05/20 17:51:14 @@ -67,6 +67,7 @@ struct intr_thread { struct intr_event *it_event; struct thread *it_thread; /* Kernel thread. */ + struct thread *it_preempted; /* Thread we preempted. */ int it_flags; /* (j) IT_* flags. */ int it_need; /* Needs service. */ }; @@ -768,8 +769,11 @@ * If no ithread or no handlers, then we have a stray interrupt. */ if (ie == NULL || TAILQ_EMPTY(&ie->ie_handlers) || - ie->ie_thread == NULL) + ie->ie_thread == NULL) { + if (ie != NULL && ie->ie_disable != NULL) + ie->ie_disable(ie->ie_source); return (EINVAL); + } ctd = curthread; it = ie->ie_thread; @@ -802,8 +806,32 @@ CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, td->td_name); TD_CLR_IWAIT(td); +#if 1 + if (it->it_preempted) + kdb_backtrace(); + if (!cold && td->td_proc < ctd->td_priority && + ctd->td_critnest <= 1 && it->it_preempted == NULL) { + it->it_preempted = ctd; + TD_SET_RUNNING(td); + TD_SET_CAN_RUN(ctd); + cpu_switch(ctd, td); + TD_SET_RUNNING(td); + sched_lock.mtx_lock = (uintptr_t)ctd; + } else { + if (ie->ie_disable != NULL) + ie->ie_disable(ie->ie_source); + setrunqueue(td, SRQ_INTR); + } +#else + if (ie->ie_disable != NULL) + ie->ie_disable(ie->ie_source); sched_add(td, SRQ_INTR); +#endif } else { + if (it->it_preempted && ie->ie_disable) + panic("eh?"); + if (ie->ie_disable != NULL) + ie->ie_disable(ie->ie_source); CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, td->td_name, it->it_need, td->td_state); } @@ -1186,7 +1214,7 @@ * Now that all the handlers have had a chance to run, reenable * the interrupt source. */ - if (ie->ie_post_ithread != NULL) + if (ie->ie_thread->it_preempted == NULL && ie->ie_post_ithread != NULL) ie->ie_post_ithread(ie->ie_source); } @@ -1217,6 +1245,8 @@ for (;;) { /* * If we are an orphaned thread, then just die. + * + * XXX: Jeff had this #if 0'd out, why? */ if (ithd->it_flags & IT_DEAD) { CTR3(KTR_INTR, "%s: pid %d (%s) exiting", __func__, @@ -1252,7 +1282,17 @@ if (!ithd->it_need && !(ithd->it_flags & IT_DEAD)) { TD_SET_IWAIT(td); ie->ie_count = 0; - mi_switch(SW_VOL | SWT_IWAIT, NULL); + if (ithd->it_preempted) { + struct thread *ctd; + + if (ie->ie_ack) + ie->ie_ack(ie->ie_source); + ctd = ithd->it_preempted; + ithd->it_preempted = NULL; + cpu_switch(td, ctd); + sched_lock.mtx_lock = (uintptr_t)td; + } else + mi_switch(SW_VOL, NULL); } thread_unlock(td); } @@ -1325,8 +1365,7 @@ } if (thread) { - if (ie->ie_pre_ithread != NULL) - ie->ie_pre_ithread(ie->ie_source); + td->td_owepreempt = 0; /* XXX */ } else { if (ie->ie_post_filter != NULL) ie->ie_post_filter(ie->ie_source); @@ -1525,8 +1564,7 @@ if (ie->ie_post_filter != NULL) ie->ie_post_filter(ie->ie_source); } else { - if (ie->ie_pre_ithread != NULL) - ie->ie_pre_ithread(ie->ie_source); + td->td_owepreempt = 0; /* XXX */ } critical_exit(); @@ -1547,6 +1585,29 @@ } #endif +int +ithread_switch(struct thread *td, struct thread *newtd) +{ + struct intr_thread *it; + struct intr_event *ie; + + it = td->td_ithread; /* XXX */ + if (it->it_preempted == NULL) + return (0); + if (newtd) + panic("unexpected newtd"); + ie = it->it_event; + if (ie->ie_disable != NULL) + ie->ie_disable(ie->ie_source); + if (TD_IS_RUNNING(td)) + setrunqueue(td, SRQ_OURSELF | SRQ_YIELDING); + newtd = it->it_preempted; + ithd->it_preempted = NULL; + cpu_switch(td, newtd); + sched_lock.mtx_lock = (uintptr_t)td; + return (1); +} + #ifdef DDB /* * Dump details about an interrupt handler --- //depot/projects/smpng/sys/kern/kern_synch.c 2009/02/27 15:49:22 +++ //depot/user/jhb/intr_fast/kern/kern_synch.c 2009/02/27 16:32:01 @@ -408,6 +408,14 @@ */ if (kdb_active) kdb_switch(); + /* + * XXX: Should become a fixup and move to turnstile code so + * choosethread can choose it_preempted or some other thread. + */ + if (td->td_pflags & TDP_ITHREAD) + if (ithread_switch(td, newtd)) + return; + if (flags & SW_VOL) td->td_ru.ru_nvcsw++; else --- //depot/projects/smpng/sys/kern/subr_turnstile.c 2008/09/17 20:27:47 +++ //depot/user/jhb/intr_fast/kern/subr_turnstile.c 2008/09/18 17:18:53 @@ -238,9 +238,14 @@ /* * If lock holder is actually running or on the run queue - * then we are done. + * then we are done. Also, if the lock holder has been + * preempted by an ithread, treat it as on the run queue. */ - if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td)) { + if (TD_IS_RUNNING(td) || TD_ON_RUNQ(td) || TD_CAN_RUN(td)) { + /* + * XXX: Assert for TD_CAN_RUN() that thread is + * ithread preempted? + */ MPASS(td->td_blocked == NULL); thread_unlock(td); return; --- //depot/projects/smpng/sys/notes 2009/02/18 22:05:55 +++ //depot/user/jhb/intr_fast/notes 2009/02/20 16:15:34 @@ -73,3 +73,13 @@ - jhb_socket - socket hacking Space reserved for child branches: +- Import jeff's original changes + + sys/interrupt.h + + kern/kern_intr.c + - td_ithread doesn't exist anymore + + kern/kern_synch.c + + kern/subr_turnstile.c + - kern/sched_4bsd.c? + + i386/i386/intr_machdep.c +- Rework ithread_switch() to just be a fixup, not force a switch back to + the original thread in case the ithread wakes up someone more important --- //depot/projects/smpng/sys/sys/interrupt.h 2008/09/17 20:27:47 +++ //depot/user/jhb/intr_fast/sys/interrupt.h 2008/09/18 17:18:53 @@ -109,6 +109,7 @@ struct mtx ie_lock; void *ie_source; /* Cookie used by MD code. */ struct intr_thread *ie_thread; /* Thread we are connected to. */ + void (*ie_ack)(void *); void (*ie_pre_ithread)(void *); void (*ie_post_ithread)(void *); void (*ie_post_filter)(void *); @@ -175,6 +176,8 @@ int intr_getaffinity(int irq, void *mask); void *intr_handler_source(void *cookie); int intr_setaffinity(int irq, void *mask); +/* XXX: Should really be fixup. */ +int intr_thread_switch(struct thread *td, struct thread *newtd); int swi_add(struct intr_event **eventp, const char *name, driver_intr_t handler, void *arg, int pri, enum intr_type flags, void **cookiep);