==== //depot/projects/bike_sched/sys/i386/i386/intr_machdep.c#1 - /home/peter/fbp4/bike_sched/sys/i386/i386/intr_machdep.c ==== @@ -60,9 +60,10 @@ typedef void (*mask_fn)(void *); static int intrcnt_index; -static struct intsrc *interrupt_sources[NUM_IO_INTS]; static struct mtx intr_table_lock; +struct intsrc *interrupt_sources[NUM_IO_INTS]; + #ifdef SMP static int assign_cpu; @@ -166,12 +167,12 @@ } void -intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) +intr_execute_handlers(struct intsrc *isrc, u_int vector, struct trapframe *frame) { struct thread *td; struct intr_event *ie; struct intr_handler *ih; - int error, vector, thread; + int error, thread; td = curthread; @@ -190,7 +191,6 @@ * XXX: We assume that IRQ 0 is only used for the ISA timer * device (clk). */ - vector = isrc->is_pic->pic_vector(isrc); if (vector == 0) clkintr_pending = 1; @@ -238,16 +238,14 @@ * mask the source as well as sending it an EOI. Otherwise, * just send it an EOI but leave it unmasked. */ - if (thread) + if (thread) { isrc->is_pic->pic_disable_source(isrc, PIC_EOI); - else - isrc->is_pic->pic_eoi_source(isrc); - critical_exit(); - - /* Schedule the ithread if needed. */ - if (thread) { + critical_exit(); error = intr_event_schedule_thread(ie); KASSERT(error == 0, ("bad stray interrupt")); + } else { + isrc->is_pic->pic_eoi_source(isrc); + critical_exit(); } td->td_intr_nesting_level--; } ==== //depot/projects/bike_sched/sys/i386/i386/local_apic.c#1 - /home/peter/fbp4/bike_sched/sys/i386/i386/local_apic.c ==== @@ -601,10 +601,11 @@ { struct intsrc *isrc; - if (vector == -1) - panic("Couldn't get vector from ISR!"); - isrc = intr_lookup_source(apic_idt_to_irq(vector)); - intr_execute_handlers(isrc, &frame); + KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL && + vector <= APIC_IO_INTS + APIC_NUM_IOINTS, + ("Vector %u does not map to an IRQ line", vector)); + isrc = interrupt_sources[ioint_irqs[vector - APIC_IO_INTS]]; + intr_execute_handlers(isrc, vector, &frame); } void @@ -740,17 +741,6 @@ mtx_unlock_spin(&icu_lock); } -/* Map an IDT vector (APIC) to an IRQ (interrupt source). */ -u_int -apic_idt_to_irq(u_int vector) -{ - - KASSERT(vector >= APIC_IO_INTS && vector != IDT_SYSCALL && - vector <= APIC_IO_INTS + APIC_NUM_IOINTS, - ("Vector %u does not map to an IRQ line", vector)); - return (ioint_irqs[vector - APIC_IO_INTS]); -} - #ifdef DDB /* * Dump data about APIC IDT vector mappings. ==== //depot/projects/bike_sched/sys/i386/include/apicvar.h#1 - /home/peter/fbp4/bike_sched/sys/i386/include/apicvar.h ==== @@ -176,7 +176,6 @@ u_int apic_alloc_vector(u_int irq); void apic_enable_vector(u_int vector); void apic_free_vector(u_int vector, u_int irq); -u_int apic_idt_to_irq(u_int vector); void apic_register_enumerator(struct apic_enumerator *enumerator); void *ioapic_create(uintptr_t addr, int32_t id, int intbase); int ioapic_disable_pin(void *cookie, u_int pin); ==== //depot/projects/bike_sched/sys/i386/include/intr_machdep.h#1 - /home/peter/fbp4/bike_sched/sys/i386/include/intr_machdep.h ==== @@ -109,6 +109,8 @@ u_int is_enabled:1; }; +extern struct intsrc *interrupt_sources[NUM_IO_INTS]; + struct trapframe; extern struct mtx icu_lock; @@ -128,7 +130,8 @@ void *arg, enum intr_type flags, void **cookiep); int intr_config_intr(int vector, enum intr_trigger trig, enum intr_polarity pol); -void intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame); +void intr_execute_handlers(struct intsrc *isrc, u_int vector, + struct trapframe *frame); struct intsrc *intr_lookup_source(int vector); int intr_register_source(struct intsrc *isrc); int intr_remove_handler(void *cookie); ==== //depot/projects/bike_sched/sys/i386/isa/atpic.c#1 - /home/peter/fbp4/bike_sched/sys/i386/isa/atpic.c ==== @@ -580,7 +580,7 @@ if ((isr & IRQ_MASK(7)) == 0) return; } - intr_execute_handlers(isrc, &frame); + intr_execute_handlers(isrc, vector, &frame); } #ifdef DEV_ISA ==== //depot/projects/bike_sched/sys/kern/kern_intr.c#2 - /home/peter/fbp4/bike_sched/sys/kern/kern_intr.c ==== @@ -531,7 +531,7 @@ CTR3(KTR_INTR, "%s: schedule pid %d (%s)", __func__, p->p_pid, p->p_comm); TD_CLR_IWAIT(td); - setrunqueue(td, SRQ_INTR); + sched_run_ithread(td); } else { CTR5(KTR_INTR, "%s: pid %d (%s): it_need %d, state %d", __func__, p->p_pid, p->p_comm, it->it_need, td->td_state); ==== //depot/projects/bike_sched/sys/kern/sched_4bsd.c#2 - /home/peter/fbp4/bike_sched/sys/kern/sched_4bsd.c ==== @@ -1083,6 +1083,45 @@ #endif /* SMP */ void +sched_run_ithread(struct thread *td) +{ + struct kse *ke = td->td_kse; + + /* Inline of setrunqueue */ + CTR2(KTR_RUNQ, "sched_run_ithread: td:%p pid:%d", + td, td->td_proc->p_pid); + CTR5(KTR_SCHED, "sched_run_ithread: %p(%s) prio %d by %p(%s)", + td, td->td_proc->p_comm, td->td_priority, ctd, + ctd->td_proc->p_comm); + mtx_assert(&sched_lock, MA_OWNED); + KASSERT((td->td_inhibitors == 0), + ("sched_run_ithread: trying to run inhibitted thread")); + KASSERT((TD_CAN_RUN(td) || TD_IS_RUNNING(td)), + ("sched_run_ithread: bad thread state")); + KASSERT(ke->ke_state != KES_ONRUNQ, + ("sched_run_ithread: kse %p (%s) already in run queue", ke, + td->td_proc->p_comm)); + KASSERT(td->td_proc->p_sflag & PS_INMEM, + ("sched_run_ithread: process swapped out")); + CTR5(KTR_SCHED, "sched_run_ithread: %p(%s) prio %d by %p(%s)", + td, td->td_proc->p_comm, td->td_priority, curthread, + curthread->td_proc->p_comm); + CTR2(KTR_RUNQ, "sched_run_ithread: adding kse:%p (td:%p) to runq", ke, td); + + TD_SET_RUNQ(td); + ke->ke_runq = &runq; + /* Preempt if we can. If we did, we're finished */ + if (maybe_preempt(td)) + return; + /* We didn't preempt. Place on runq */ + if ((td->td_proc->p_flag & P_NOLOAD) == 0) + sched_load_add(); + runq_add(ke->ke_runq, ke, SRQ_INTR); + ke->ke_state = KES_ONRUNQ; + maybe_resched(td); +} + +void sched_rem(struct thread *td) { struct kse *ke; @@ -1148,14 +1187,6 @@ } void -sched_userret(struct thread *td) -{ - - KASSERT((td->td_flags & TDF_BORROWING) == 0, - ("thread with borrowed priority returning to userland")); -} - -void sched_bind(struct thread *td, int cpu) { struct kse *ke; ==== //depot/projects/bike_sched/sys/kern/sched_ule.c#2 - /home/peter/fbp4/bike_sched/sys/kern/sched_ule.c ==== @@ -1641,14 +1641,6 @@ return (load); } -void -sched_userret(struct thread *td) -{ - - KASSERT((td->td_flags & TDF_BORROWING) == 0, - ("thread with borrowed priority returning to userland")); -} - struct kse * sched_choose(void) { ==== //depot/projects/bike_sched/sys/kern/subr_trap.c#2 - /home/peter/fbp4/bike_sched/sys/kern/subr_trap.c ==== @@ -120,10 +120,8 @@ if (p->p_flag & P_PROFIL) addupc_task(td, TRAPF_PC(frame), td->td_pticks * psratio); - /* - * Let the scheduler adjust our priority etc. - */ - sched_userret(td); + KASSERT((td->td_flags & TDF_BORROWING) == 0, + ("userret: thread with borrowed priority returning to userland")); KASSERT(td->td_locks == 0, ("userret: Returning with %d locks held.", td->td_locks)); } ==== //depot/projects/bike_sched/sys/sys/sched.h#2 - /home/peter/fbp4/bike_sched/sys/sys/sched.h ==== @@ -65,7 +65,6 @@ void sched_sleep(struct thread *td); void sched_switch(struct thread *td, struct thread *newtd, int flags); void sched_unlend_prio(struct thread *td, u_char prio); -void sched_userret(struct thread *td); void sched_wakeup(struct thread *td); /* @@ -74,6 +73,7 @@ void sched_add(struct thread *td, int flags); void sched_clock(struct thread *td); void sched_rem(struct thread *td); +void sched_run_ithread(struct thread *td); /* * Binding makes cpu affinity permanent while pinning is used to temporarily