Index: i386/i386/exception.s =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/exception.s,v retrieving revision 1.73 diff -u -r1.73 exception.s --- i386/i386/exception.s 2000/12/01 02:09:41 1.73 +++ i386/i386/exception.s 2000/12/07 05:34:27 @@ -335,6 +335,186 @@ MEXITCOUNT jmp _doreti +/* + * void ithd_fixup_return(void) + * + * Executed by a process that was pre-empted by an interrupt thread and then + * fixed up due to the thread blocking on a mutex. Release sched_lock, which + * was forced on the process by cpu_switch, and return to whatever it was + * doing. Note that we don't have to worry about clobbering the call-used + * registers. The process either scheduled a soft interrupt, which has + * function call semantics, or is in the hardware interrupt stub code where + * the registers have already been saved. + */ +ENTRY(ithd_trampoline) + MTX_EXIT(_sched_lock, %ecx) /* release sched_lock */ + popfl /* restore interrupt state */ + ret /* return to caller */ + +/* + * void ithd_schedule(struct ithd *) + * + * Schedule an interrupt thread. If possible do light-weight context switch + * and run handlers immediately, interrupted process remains pinned until the + * ithd blocks or finishes. This is called from C to schedule soft interrupts, + * so the call-safe registers must be saved in the pcb where cpu_switch will + * look for them, in case the thread blocks and the process needs to be + * fixed up. May be called with interrupts enabled or disabled and will + * return with the same state as when called. However, the interrupt thread + * will be run with interrupts enabled, so the caller must not depend on + * interrupts being disabled for the duration. In addition, must not be + * called with any spin locks held, in particular the scheduler lock must + * not be held by the caller. + */ +ENTRY(ithd_schedule) + movl 4(%esp),%edx /* point to ithd */ + movl $1,%eax /* non-zero value */ + + MPLOCKED /* mp-safe */ + movl %eax,IT_NEED(%edx) /* indicate ithd needs service */ + + pushfl /* save interrupt state */ + cli /* disable interrupts */ + xchgl %eax,IT_RUN(%edx) /* get ithd runlock */ + testl %eax,%eax /* did we get it? */ + jnz ithd_runlock_fail /* no, check for races */ + + /* + * Got the runlock, now check priorities. + * + * edx - ithd + */ +ithd_runlock_succeed: + movl _curproc,%eax /* point to current process */ + movb P_PRIORITY(%eax),%al /* load its priority */ + movl IT_PROC(%edx),%ecx /* point to ithd's process */ + cmpb %al,P_PRIORITY(%ecx) /* is ithd higher priority? */ + ja ithd_prio_fail /* no, put on run queue */ + +#ifdef INVARIANTS + cmpl $0,IT_INTERRUPTED(%edx) /* already hold interruptee? */ + jne ithd_ass_fail1 /* yes, bad, panic */ +#endif + + /* + * Do a lo-cal switch to interrupt thread. + * + * ecx - ithd.it_proc + * edx - ithd + */ + movl _curproc,%eax /* point to current process again */ + movl %eax,IT_INTERRUPTED(%edx) /* save interruptee */ + movl P_ADDR(%eax),%eax /* point to interruptee's u area */ + movl %ebp,PCB_EBP(%eax) /* save context */ + movl %edi,PCB_EDI(%eax) + movl %esi,PCB_ESI(%eax) + movl %ebx,PCB_EBX(%eax) + subl $4,%esp /* leave room for program counter */ + movl %esp,PCB_ESP(%eax) /* save stack */ + movl $ithd_trampoline,PCB_EIP(%eax) /* come back above */ + movl P_ADDR(%ecx),%eax /* point to ithd's u area */ + leal (UPAGES * PAGE_SIZE - 16)(%eax),%esp /* load clean stack */ + movb $SRUN,P_STAT(%ecx) /* now running, blow off locking */ +#ifdef SMP + movl _cpuid,%eax /* note which cpu its running on */ + movl %eax,P_ONCPU(%ecx) +#endif + movl %ecx,_curproc /* switch to new process */ + sti /* now can take interrupts again */ + pushl %edx /* pass ithd */ + call *IT_LOOP(%edx) /* call the handlers, clobber regs */ + popl %edx /* point to ithd again */ + +#ifdef INVARIANTS + cmpl $0,IT_INTERRUPTED(%edx) /* still hold interruptee? */ + je ithd_ass_fail2 /* no, bad, panic */ +#endif + + /* + * Finished calling handlers, do lo-cal switch back to interruptee. + * + * edx - ithd + */ + cli /* disable interrupts */ + movl IT_INTERRUPTED(%edx),%ecx /* point to interruptee */ + movl P_ADDR(%ecx),%eax /* point to interruptee's u area */ + movl PCB_ESP(%eax),%esp /* reload stack */ + addl $4,%esp /* remove space for program counter */ + xorl %eax,%eax /* zero */ + movl %eax,IT_INTERRUPTED(%edx) /* release interruptee */ + movl %ecx,_curproc /* switch back */ + + MPLOCKED /* mp-safe */ + movl %eax,IT_RUN(%edx) /* release runlock */ + MPLOCKED /* mp-safe */ + movl IT_NEED(%edx),%eax /* check for more interrupts */ + testl %eax,%eax /* more? */ + jnz ithd_runlock_fail /* yes, check for races and go again */ + + popfl /* restore interrupt state */ + ret /* return to caller */ + + /* + * Failed to obtain runlock, obtain sched_lock and check again. + * + * edx - ithd + */ +ithd_runlock_fail: + MTX_ENTER(sched_lock, %ecx) /* race with ithd_loop */ + movl $0x1,%eax /* non-zero value */ + xchgl %eax,IT_RUN(%edx) /* try again for run-lock */ + testl %eax,%eax /* did we get it? */ + jnz 1f /* no, just return */ + MTX_EXIT(sched_lock, %ecx) /* release sched_lock */ + jmp ithd_runlock_succeed /* run the thread again */ +1: + MTX_EXIT(sched_lock, %ecx) /* release sched_lock */ + popfl /* restore interrupt state */ + ret /* return to caller */ + + /* + * Current process is higher priority, put ithd on run queue. + * + * ecx - ithd.it_proc + * edx - ithd + */ +ithd_prio_fail: + movl P_ADDR(%ecx),%eax /* point to ithd's u area */ + movl %edx,PCB_EBX(%eax) /* pass ithd to fork_trampoline */ + movl IT_LOOP(%edx),%ecx /* load ithd's loop function */ + movl %ecx,PCB_ESI(%eax) /* pass to fork_trampoline */ + movl $fork_trampoline,PCB_EIP(%eax) /* come back above */ + leal (UPAGES * PAGE_SIZE - 16 - 4)(%eax),%ecx /* load clean stack */ + movl %ecx,PCB_ESP(%eax) /* save stack */ + MTX_ENTER(sched_lock, %ecx) /* protect run queue */ + movl IT_PROC(%edx),%ecx /* point to ithd's proc */ + movb $SRUN,P_STAT(%ecx) /* set runnable */ + pushl %ecx /* pass proc */ + call setrunqueue /* put on run queue */ + MTX_EXIT(sched_lock, %ecx) /* release sched_lock */ + addl $4,%esp /* discard proc */ + xorl %eax,%eax /* zero */ + movl %eax,IT_RUN(%edx) /* release runlock */ + orl $AST_PENDING|AST_RESCHED,_astpending /* need reschedule */ + popfl /* restore interrupt state */ + ret /* return to caller */ + +#ifdef INVARIANTS + .text +ithd_ass_fail1: + pushl $1f + call panic +1: + .asciz "ithd: %p, already hold interruptee" + + .text +ithd_ass_fail2: + pushl $1f + call panic +1: + .asciz "ithd: %p, no longer hold interruptee" +#endif + /* * Include vm86 call routines, which want to call _doreti. Index: i386/i386/genassym.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/genassym.c,v retrieving revision 1.94 diff -u -r1.94 genassym.c --- i386/i386/genassym.c 2000/10/29 16:57:40 1.94 +++ i386/i386/genassym.c 2000/12/05 09:04:01 @@ -83,6 +83,14 @@ ASSYM(P_STAT, offsetof(struct proc, p_stat)); ASSYM(P_WCHAN, offsetof(struct proc, p_wchan)); +ASSYM(IT_NEED, offsetof(struct ithd, it_need)); +ASSYM(IT_RUN, offsetof(struct ithd, it_run)); +ASSYM(IT_PROC, offsetof(struct ithd, it_proc)); +ASSYM(IT_INTERRUPTED, offsetof(struct ithd, it_interrupted)); +ASSYM(IT_LOOP, offsetof(struct ithd, it_loop)); + +ASSYM(P_PRIORITY, offsetof(struct proc, p_priority)); + #ifdef SMP ASSYM(P_ONCPU, offsetof(struct proc, p_oncpu)); ASSYM(P_LASTCPU, offsetof(struct proc, p_lastcpu)); Index: i386/i386/swtch.s =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/swtch.s,v retrieving revision 1.100 diff -u -r1.100 swtch.s --- i386/i386/swtch.s 2000/12/03 01:09:59 1.100 +++ i386/i386/swtch.s 2000/12/05 07:52:17 @@ -103,7 +103,8 @@ movl P_ADDR(%ecx),%edx - popl PCB_EIP(%edx) /* Hardware registers */ + movl (%esp),%eax /* Hardware registers */ + movl %eax,PCB_EIP(%edx) movl %ebx,PCB_EBX(%edx) movl %esp,PCB_ESP(%edx) movl %ebp,PCB_EBP(%edx) @@ -253,7 +254,8 @@ movl PCB_EBP(%edx),%ebp movl PCB_ESI(%edx),%esi movl PCB_EDI(%edx),%edi - pushl PCB_EIP(%edx) + movl PCB_EIP(%edx),%eax + movl %eax,(%esp) #ifdef SMP #ifdef GRAB_LOPRIO /* hold LOPRIO for INTs */ Index: i386/i386/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/vm_machdep.c,v retrieving revision 1.143 diff -u -r1.143 vm_machdep.c --- i386/i386/vm_machdep.c 2000/12/03 01:09:59 1.143 +++ i386/i386/vm_machdep.c 2000/12/05 07:52:17 @@ -168,7 +168,7 @@ pcb2->pcb_edi = 0; pcb2->pcb_esi = (int)fork_return; /* fork_trampoline argument */ pcb2->pcb_ebp = 0; - pcb2->pcb_esp = (int)p2->p_md.md_regs; + pcb2->pcb_esp = (int)p2->p_md.md_regs - sizeof(void *); pcb2->pcb_ebx = (int)p2; /* fork_trampoline argument */ pcb2->pcb_eip = (int)fork_trampoline; /* Index: i386/include/mutex.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/mutex.h,v retrieving revision 1.14 diff -u -r1.14 mutex.h --- i386/include/mutex.h 2000/12/04 12:38:03 1.14 +++ i386/include/mutex.h 2000/12/07 04:20:37 @@ -257,43 +257,40 @@ #else /* !LOCORE */ /* - * Simple assembly macros to get and release non-recursive spin locks + * Simple assembly macros to get and release spin locks + * + * Note that the non-recursive versions do not disable interrupts, and thus + * code that uses them must do so itself, as well as re-enable them when + * appropriate. The light-weight context switcher depends on this. + * When in doubt, use the recursive versions. */ #if defined(I386_CPU) -#define MTX_ENTER(reg, lck) \ - pushf; \ - cli; \ - movl reg,lck+MTX_LOCK; \ - popl lck+MTX_SAVEINTR +#define MTX_ENTER(lck, reg) \ + movl _curproc,reg; \ + movl reg,lck+MTX_LOCK #define MTX_EXIT(lck, reg) \ - pushl lck+MTX_SAVEINTR; \ - movl $ MTX_UNOWNED,lck+MTX_LOCK; \ - popf + movl $ MTX_UNOWNED,lck+MTX_LOCK #else /* I386_CPU */ -#define MTX_ENTER(reg, lck) \ - pushf; \ - cli; \ +#define MTX_ENTER(lck, reg) \ + movl _curproc,reg; \ 9: movl $ MTX_UNOWNED,%eax; \ MPLOCKED \ cmpxchgl reg,lck+MTX_LOCK; \ - jnz 9b; \ - popl lck+MTX_SAVEINTR; + jnz 9b /* Must use locked bus op (cmpxchg) when setting to unowned (barrier) */ -#define MTX_EXIT(lck,reg) \ - pushl lck+MTX_SAVEINTR; \ +#define MTX_EXIT(lck, reg) \ movl lck+MTX_LOCK,%eax; \ movl $ MTX_UNOWNED,reg; \ MPLOCKED \ - cmpxchgl reg,lck+MTX_LOCK; \ - popf + cmpxchgl reg,lck+MTX_LOCK -#define MTX_ENTER_WITH_RECURSION(reg, lck) \ +#define MTX_ENTER_WITH_RECURSION(lck, reg) \ pushf; \ cli; \ movl lck+MTX_LOCK,%eax; \ @@ -310,7 +307,7 @@ 8: add $4,%esp; \ 9: -#define MTX_EXIT_WITH_RECURSION(lck,reg) \ +#define MTX_EXIT_WITH_RECURSION(lck, reg) \ movl lck+MTX_RECURSE,%eax; \ decl %eax; \ js 8f; \ Index: kern/kern_fork.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_fork.c,v retrieving revision 1.89 diff -u -r1.89 kern_fork.c --- kern/kern_fork.c 2000/12/04 10:23:29 1.89 +++ kern/kern_fork.c 2000/12/05 20:24:38 @@ -369,6 +369,7 @@ mtx_init(&p2->p_mtx, "process lock", MTX_DEF); p2->p_aioinfo = NULL; + p2->p_ithd = NULL; /* * Duplicate sub-structures as needed. Index: kern/kern_mutex.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_mutex.c,v retrieving revision 1.26 diff -u -r1.26 kern_mutex.c --- kern/kern_mutex.c 2000/12/01 00:10:59 1.26 +++ kern/kern_mutex.c 2000/12/07 03:30:32 @@ -103,6 +103,7 @@ void _mtx_enter_giant_def(void); void _mtx_exit_giant_def(void); static void propagate_priority(struct proc *); +static void ithd_fixup(struct proc *); #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) #define mtx_owner(m) (mtx_unowned(m) ? NULL \ @@ -248,10 +249,28 @@ } } +static void +ithd_fixup(struct proc *p) +{ + struct ithd *it; + + mtx_assert(&sched_lock, MA_OWNED); + while ((it = p->p_ithd) != NULL) { + p = it->it_interrupted; + it->it_interrupted = NULL; + KASSERT(p != NULL, ("%s: no interruptee", __FUNCTION__)); + + setrunqueue(p); + } + + /* XXX save floating point state if interruptee is using it */ +} + void mtx_enter_hard(struct mtx *m, int type, int saveintr) { struct proc *p = CURPROC; + struct ithd *it; KASSERT(p != NULL, ("curproc is NULL in mutex")); @@ -321,22 +340,17 @@ /* We definitely have to sleep for this lock */ mtx_assert(m, MA_NOTOWNED); -#ifdef notyet /* * If we're borrowing an interrupted thread's VM * context must clean up before going to sleep. */ - if (p->p_flag & (P_ITHD | P_SITHD)) { - ithd_t *it = (ithd_t *)p; - - if (it->it_interrupted) { - CTR2(KTR_LOCK, - "mtx_enter: 0x%x interrupted 0x%x", - it, it->it_interrupted); - intr_thd_fixup(it); - } + it = p->p_ithd; + if (it != NULL && it->it_interrupted != NULL) { + CTR2(KTR_LOCK, + "mtx_enter: 0x%x interrupted 0x%x", + it, it->it_interrupted); + ithd_fixup(p); } -#endif /* Put us on the list of procs blocked on this mutex */ if (TAILQ_EMPTY(&m->mtx_blocked)) { Index: sys/proc.h =================================================================== RCS file: /home/ncvs/src/sys/sys/proc.h,v retrieving revision 1.131 diff -u -r1.131 proc.h --- sys/proc.h 2000/12/05 20:23:43 1.131 +++ sys/proc.h 2000/12/06 08:18:32 @@ -327,8 +327,10 @@ LIST_ENTRY(ithd) it_list; /* All interrupt threads. */ int it_need; /* Needs service. */ int irq; /* Vector. */ + int it_run; + void (*it_loop)(void *); struct intrhand *it_ih; /* Interrupt handlers. */ - struct ithd *it_interrupted; /* Who we interrupted. */ + struct proc *it_interrupted; /* Who we interrupted. */ void *it_md; /* Hook for MD interrupt code. */ };