A mapping of places manipulating the mp_lock in the SMP kernel =============================================================================== This mapping is relative to the 3.0-current srs/sys tree as of 970430. It is meant to give an idea of where we currently use the "giant lock" to enforce kernel integrity. It is from this point that we will start to "push down" the lock into the various sub-systems within the kernel. It is anticipated that this will be done to the syscall, trap and interrupt handler entry points. =============================================================================== get_mplock occurs in: /usr/SMP/src/sys/i386/i386/exception.s /usr/SMP/src/sys/i386/i386/mplock.s /usr/SMP/src/sys/i386/include/smp.h /usr/SMP/src/sys/i386/isa/vector.s /usr/SMP/src/sys/kern/init_smp.c =============================================================================== rel_mplock occurs in: /usr/SMP/src/sys/i386/i386/mplock.s /usr/SMP/src/sys/i386/include/smp.h /usr/SMP/src/sys/i386/isa/icu.s /usr/SMP/src/sys/i386/isa/vector.s /usr/SMP/src/sys/kern/init_smp.c =============================================================================== _MPgetlock and _MPrellock occur in: /usr/SMP/src/sys/i386/i386/mplock.s =============================================================================== mp_lock occurs in: /usr/SMP/src/sys/i386/i386/mp_machdep.c /usr/SMP/src/sys/i386/i386/mplock.s /usr/SMP/src/sys/i386/i386/swtch.s /usr/SMP/src/sys/i386/include/smp.h /usr/SMP/src/sys/kern/init_smp.c =============================================================================== merged list of occurrances: /usr/SMP/src/sys/i386/include/smp.h /usr/SMP/src/sys/kern/init_smp.c /usr/SMP/src/sys/i386/isa/icu.s /usr/SMP/src/sys/i386/isa/vector.s /usr/SMP/src/sys/i386/i386/exception.s /usr/SMP/src/sys/i386/i386/mp_machdep.c /usr/SMP/src/sys/i386/i386/mplock.s /usr/SMP/src/sys/i386/i386/swtch.s =============================================================================== /usr/SMP/src/sys/i386/include/smp.h extern u_int mp_lock; void get_mplock __P((void)); void rel_mplock __P((void)); void try_mplock __P((void)); =============================================================================== /usr/SMP/src/sys/kern/init_smp.c ---------------- secondary_main() { get_mplock(); ... curproc = NULL; /* ensure no context to save */ cpu_switch(curproc); /* start first process */ panic("switch returned!"); } --- smp_idleloop() { ... spl0(); rel_mplock(); while (1) { if (smp_cpus == 0 && smp_active != 0) { get_mplock(); ... rel_mplock(); } if (smp_active && smp_active <= cpunumber()) { get_mplock(); ... [ FREEZE ] rel_mplock(); ... [ cpu spins ] get_mplock(); ... [ UNFREEZE ] rel_mplock(); } if (whichqs || whichrtqs || (!ignore_idleprocs && whichidqs)) { get_mplock(); if (whichqs || whichrtqs || (!ignore_idleprocs && whichidqs)) { splhigh(); if (curproc) setrunqueue(curproc); cnt.v_swtch++; cpu_switch(curproc); microtime(&runtime); (void)spl0(); } rel_mplock(); } else { dcnt++; if (idle_debug && (dcnt % idle_debug) == 0) { get_mplock(); printf( ... ); rel_mplock(); } } } } =============================================================================== /usr/SMP/src/sys/i386/isa/icu.s -------- _doreti: ... doreti_next: ... cli andl _ipending,%ecx jne doreti_unpend doreti_exit: movl %eax,_cpl decb _intr_nesting_level MEXITCOUNT #ifdef SMP call _rel_mplock #endif /* SMP */ ... doreti_iret: iret ... doreti_unpend: sti bsfl %ecx,%ecx /* slow, but not worth optimizing */ #ifdef SMP lock #endif btrl %ecx,_ipending jnc doreti_next /* some intr cleared memory copy */ movl ihandlers(,%ecx,4),%edx testl %edx,%edx je doreti_next /* "can't happen" */ cmpl $NHWI,%ecx jae doreti_swi cli movl %eax,_cpl MEXITCOUNT jmp %edx doreti_swi: ... jmp doreti_next swi_ast: addl $8,%esp /* discard raddr & cpl to get trap frame */ testb $SEL_RPL_MASK,TRAPF_CS_OFF(%esp) je swi_ast_phantom ... jmp doreti_next swi_ast_phantom: ... cli #ifdef SMP lock #endif ... jmp doreti_exit /* SWI_AST is highest so we must be done */ =============================================================================== /usr/SMP/src/sys/i386/isa/vector.s --- #define GET_MPLOCK call _get_mplock #define REL_MPLOCK call _rel_mplock --- #define MAYBE_MASK_IRQ(irq_num,icu) \ testl $IRQ_BIT(irq_num),iactive ; /* lazy masking */ \ je 1f ; /* NOT currently active */ \ ... REL_MPLOCK ; /* SMP release global lock */ \ ... iret ; \ 1: ; \ orl $IRQ_BIT(irq_num),iactive --- #define FAST_INTR(irq_num, vec_name, enable_icus) \ IDTVEC(vec_name) ; \ ... GET_MPLOCK ; /* SMP Spin lock */ \ pushl _intr_unit + (irq_num) * 4 ; \ call *_intr_handler + (irq_num) * 4 ; /* do the work ASAP */ \ ... andl _ipending,%eax ; \ jne 2f ; /* yes, maybe handle them */ \ 1: ; \ REL_MPLOCK ; /* SMP release global lock */ \ ... iret ; \ 2: ; \ ... jmp _doreti --- #define INTR(irq_num, vec_name, icu, enable_icus, reg) \ IDTVEC(vec_name) ; \ ... GET_MPLOCK ; /* SMP Spin lock */ \ MAYBE_MASK_IRQ(irq_num,icu) ; \ enable_icus ; \ movl _cpl,%eax ; \ TEST_IRQ(irq_num,reg) ; \ jne 2f ; \ incb _intr_nesting_level ; \ __CONCAT(Xresume,irq_num): ; \ ... movl %eax,_cpl ; \ sti ; \ call *_intr_handler + (irq_num) * 4 ; \ cli ; /* must unmask _imen and icu atomically */ \ MAYBE_UNMASK_IRQ(irq_num,icu) ; \ sti ; /* XXX _doreti repeats the cli/sti */ \ ... jmp _doreti ; \ 2: ; \ ... REL_MPLOCK ; /* SMP release global lock */ \ ... iret =============================================================================== /usr/SMP/src/sys/i386/i386/exception.s ----------- IDTVEC(fpu) ... pushl $0 /* dummy unit to finish building intr frame */ #ifdef SMP call _get_mplock #endif /* SMP */ incl _cnt+V_TRAP orl $SWI_AST_MASK,%eax movl %eax,_cpl call _npxintr incb _intr_nesting_level MEXITCOUNT jmp _doreti ------------- IDTVEC(align) ... calltrap: #ifdef SMP call _get_mplock #endif /* SMP */ FAKE_MCOUNT(_btrap) /* init "from" _btrap -> calltrap */ incl _cnt+V_TRAP orl $SWI_AST_MASK,_cpl call _trap ... pushl %eax subl $4,%esp incb _intr_nesting_level MEXITCOUNT jmp _doreti --------------- IDTVEC(syscall) ... #ifdef SMP call _get_mplock #endif /* SMP */ incl _cnt+V_SYSCALL movl $SWI_AST_MASK,_cpl call _syscall /* * Return via _doreti to handle ASTs. */ pushl $0 /* cpl to restore */ subl $4,%esp movb $1,_intr_nesting_level MEXITCOUNT jmp _doreti ----------------------- IDTVEC(int0x80_syscall) ... #ifdef SMP call _get_mplock #endif /* SMP */ incl _cnt+V_SYSCALL movl $SWI_AST_MASK,_cpl call _syscall /* * Return via _doreti to handle ASTs. */ pushl $0 /* cpl to restore */ subl $4,%esp movb $1,_intr_nesting_level MEXITCOUNT jmp _doreti =============================================================================== /usr/SMP/src/sys/i386/i386/machdep.c init386(first) int first; { ... /* setup proc 0's pcb */ ... proc0.p_addr->u_pcb.pcb_mpnest = 1; } =============================================================================== /usr/SMP/src/sys/i386/i386/mp_machdep.c startAllAPs( u_int bootAddr ) { ... * get the initial mp_lock with a count of 1 for the BSP */ mp_lock = (apic_base[ APIC_ID ] & APIC_ID_MASK) + 1; ... } =============================================================================== /usr/SMP/src/sys/i386/i386/mplock.s NON_GPROF_ENTRY(MPgetlock) ... NON_GPROF_ENTRY(MPrellock) ... NON_GPROF_ENTRY(get_mplock) ... NON_GPROF_ENTRY(rel_mplock) ... =============================================================================== /usr/SMP/src/sys/i386/i386/swtch.s ENTRY(cpu_switch) ... testl %ecx,%ecx je sw1 #ifdef SMP movb P_ONCPU(%ecx), %al /* save "last" cpu */ movb %al, P_LASTCPU(%ecx) movb $0xff, P_ONCPU(%ecx) /* "leave" the cpu */ #endif ... #ifdef SMP movl _mp_lock, %eax cmpl $0xffffffff, %eax /* is it free? */ je badsw /* yes, bad medicine! */ andl $0x00ffffff, %eax /* clear CPU portion */ movl %eax,PCB_MPNEST(%ecx) /* store it */ #endif /* SMP */ ... movb $1,_intr_nesting_level /* charge Intr, not Sys/Idle */ SETCURPROC($0, %edi) sw1: cli ...... movb $0,_intr_nesting_level #ifdef SMP movl _apic_base, %eax /* base addr of LOCAL APIC */ ... movl APIC_ID(%eax), %eax /* APIC ID register */ andl $APIC_ID_MASK, %eax /* extract ID portion */ orl PCB_MPNEST(%edx), %eax /* add count from PROC */ movl %eax, _mp_lock /* load the mp_lock */ #endif /* SMP */ ... sti ret ===============================================================================