--- //depot/projects/smpng/sys/alpha/alpha/machdep.c 2005/01/05 22:38:38 +++ //depot/user/jhb/lock/alpha/alpha/machdep.c 2005/01/13 21:40:07 @@ -2406,3 +2406,27 @@ pcpu->pc_idlepcb.apcb_ptbr = thread0.td_pcb->pcb_hw.apcb_ptbr; pcpu->pc_current_asngen = 1; } + +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_ipl = intr_disable(); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(td->td_md.md_saved_ipl); +} --- //depot/projects/smpng/sys/alpha/alpha/mp_machdep.c 2004/12/15 21:41:34 +++ //depot/user/jhb/lock/alpha/alpha/mp_machdep.c 2005/01/13 22:04:55 @@ -144,6 +144,10 @@ /* Clear userland thread pointer. */ alpha_pal_wrunique(0); + /* Initialize curthread. */ + KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); + PCPU_SET(curthread, PCPU_GET(idlethread)); + /* * Point interrupt/exception vectors to our own. */ @@ -205,11 +209,24 @@ while (smp_started == 0) ; /* nothing */ + /* ok, now grab sched_lock and enter the scheduler */ + mtx_lock_spin(&sched_lock); + + /* + * Correct spinlock nesting. The idle thread context that we are + * borrowing was created so that it would start out with a single + * spin lock (sched_lock) held in fork_trampoline(). Since we've + * explicitly acquired locks in this function, the nesting count + * is now 2 rather than 1. Since we are nested, calling + * spinlock_exit() will simply adjust the counts without allowing + * spin lock using code to interrupt us. + */ + spinlock_exit(); + KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); + binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); - /* ok, now grab sched_lock and enter the scheduler */ - mtx_lock_spin(&sched_lock); cpu_throw(NULL, choosethread()); /* doesn't return */ panic("scheduler returned us to %s", __func__); --- //depot/projects/smpng/sys/alpha/alpha/vm_machdep.c 2005/01/13 21:06:03 +++ //depot/user/jhb/lock/alpha/alpha/vm_machdep.c 2005/01/14 15:57:39 @@ -202,6 +202,10 @@ */ td2->td_md.md_kernnest = 1; #endif + + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_ipl = ALPHA_PSL_IPL_0; } /* @@ -319,6 +323,10 @@ */ td->td_md.md_kernnest = 1; #endif + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_ipl = ALPHA_PSL_IPL_0; } void --- //depot/projects/smpng/sys/alpha/include/proc.h 2005/01/05 22:38:38 +++ //depot/user/jhb/lock/alpha/include/proc.h 2005/01/06 18:27:35 @@ -52,7 +52,8 @@ u_int64_t md_hae; /* user HAE register value */ void *osf_sigtramp; /* user-level signal trampoline */ u_int md_kernnest; /* nesting level in the kernel */ - register_t md_savecrit; /* save PSL for critical section */ + register_t md_saved_ipl; /* save IPL for critical section */ + u_int md_spinlock_count; }; #define MDP_UAC_NOPRINT 0x0010 /* Don't print unaligned traps */ --- //depot/projects/smpng/sys/amd64/amd64/machdep.c 2004/12/15 21:41:34 +++ //depot/user/jhb/lock/amd64/amd64/machdep.c 2005/01/13 21:40:07 @@ -1287,6 +1287,30 @@ pcpu->pc_acpi_id = 0xffffffff; } +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_flags = intr_disable(); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(td->td_md.md_saved_flags); +} + /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter --- //depot/projects/smpng/sys/amd64/amd64/mp_machdep.c 2004/11/05 19:22:55 +++ //depot/user/jhb/lock/amd64/amd64/mp_machdep.c 2005/01/13 22:04:55 @@ -452,6 +452,10 @@ panic("cpuid mismatch! boom!!"); } + /* Initialize curthread. */ + KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); + PCPU_SET(curthread, PCPU_GET(idlethread)); + mtx_lock_spin(&ap_boot_mtx); /* Init local apic for irq's */ @@ -490,6 +494,18 @@ /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); + /* + * Correct spinlock nesting. The idle thread context that we are + * borrowing was created so that it would start out with a single + * spin lock (sched_lock) held in fork_trampoline(). Since we've + * explicitly acquired locks in this function, the nesting count + * is now 2 rather than 1. Since we are nested, calling + * spinlock_exit() will simply adjust the counts without allowing + * spin lock using code to interrupt us. + */ + spinlock_exit(); + KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); + binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); --- //depot/projects/smpng/sys/amd64/amd64/vm_machdep.c 2005/01/13 21:06:03 +++ //depot/user/jhb/lock/amd64/amd64/vm_machdep.c 2005/01/14 15:57:39 @@ -142,7 +142,7 @@ pcb2->pcb_rsp = (register_t)td2->td_frame - sizeof(void *); pcb2->pcb_rbx = (register_t)td2; /* fork_trampoline argument */ pcb2->pcb_rip = (register_t)fork_trampoline; - pcb2->pcb_rflags = td2->td_frame->tf_rflags & ~PSL_I; /* ints disabled */ + pcb2->pcb_rflags = PSL_KERNEL; /* ints disabled */ /*- * pcb2->pcb_dr*: cloned above. * pcb2->pcb_savefpu: cloned above. @@ -151,6 +151,10 @@ * pcb2->pcb_[fg]sbase: cloned above */ + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; + /* * Now, cpu_switch() can schedule the new process. * pcb_rsp is loaded pointing to the cpu_switch() stack frame @@ -282,6 +286,10 @@ * pcb2->pcb_onfault: cloned above (always NULL here?). * pcb2->pcb_[fg]sbase: cloned above */ + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; } /* --- //depot/projects/smpng/sys/amd64/include/proc.h 2005/01/05 22:38:38 +++ //depot/user/jhb/lock/amd64/include/proc.h 2005/01/06 18:27:35 @@ -37,7 +37,8 @@ * Machine-dependent part of the proc structure for AMD64. */ struct mdthread { - register_t md_savecrit; + int md_spinlock_count; /* (k) */ + register_t md_saved_flags; /* (k) */ }; struct mdproc { --- //depot/projects/smpng/sys/arm/arm/machdep.c 2005/01/14 15:30:06 +++ //depot/user/jhb/lock/arm/arm/machdep.c 2005/01/14 15:57:39 @@ -368,6 +368,30 @@ { } +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_cspr = disable_interrupts(I32_bit | F32_bit); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + restore_interrupts(td->td_md.md_saved_cspr); +} + /* * Clear registers on exec */ --- //depot/projects/smpng/sys/arm/arm/vm_machdep.c 2005/01/05 22:38:38 +++ //depot/user/jhb/lock/arm/arm/vm_machdep.c 2005/01/06 18:27:35 @@ -129,6 +129,10 @@ tf->tf_r0 = 0; tf->tf_r1 = 0; pcb2->un_32.pcb32_sp = (u_int)sf; + + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_cspr = 0; } void @@ -263,6 +267,10 @@ td->td_pcb->un_32.pcb32_sp = (u_int)sf; td->td_pcb->un_32.pcb32_und_sp = td->td_kstack + td->td_kstack_pages * PAGE_SIZE + USPACE_UNDEF_STACK_TOP; + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_cspr = 0; } /* --- //depot/projects/smpng/sys/arm/include/proc.h 2005/01/14 15:30:06 +++ //depot/user/jhb/lock/arm/include/proc.h 2005/01/14 15:57:39 @@ -46,7 +46,8 @@ }; struct mdthread { - register_t md_savecrit; + int md_spinlock_count; /* (k) */ + register_t md_saved_cspr; /* (k) */ int md_ptrace_instr; int md_ptrace_addr; }; --- //depot/projects/smpng/sys/conf/files.alpha 2004/11/23 23:10:26 +++ //depot/user/jhb/lock/conf/files.alpha 2004/11/23 23:15:03 @@ -43,7 +43,6 @@ alpha/alpha/clock.c standard alpha/alpha/clock_if.m standard alpha/alpha/cpuconf.c standard -alpha/alpha/critical.c standard alpha/alpha/db_disasm.c optional ddb alpha/alpha/db_interface.c optional ddb alpha/alpha/db_trace.c optional ddb --- //depot/projects/smpng/sys/conf/files.amd64 2004/12/23 19:56:09 +++ //depot/user/jhb/lock/conf/files.amd64 2004/12/27 16:17:42 @@ -68,7 +68,6 @@ amd64/amd64/bios.c standard amd64/amd64/busdma_machdep.c standard amd64/amd64/cpu_switch.S standard -amd64/amd64/critical.c standard amd64/amd64/db_disasm.c optional ddb amd64/amd64/db_interface.c optional ddb amd64/amd64/db_trace.c optional ddb --- //depot/projects/smpng/sys/conf/files.arm 2004/11/23 23:10:26 +++ //depot/user/jhb/lock/conf/files.arm 2004/11/23 23:15:03 @@ -12,7 +12,6 @@ arm/arm/cpufunc_asm_sa1.S standard arm/arm/cpufunc_asm_armv4.S standard arm/arm/cpufunc_asm_sa11x0.S standard -arm/arm/critical.c standard arm/arm/db_disasm.c optional ddb arm/arm/db_interface.c optional ddb arm/arm/db_trace.c optional ddb --- //depot/projects/smpng/sys/conf/files.i386 2005/01/14 15:30:06 +++ //depot/user/jhb/lock/conf/files.i386 2005/01/14 15:57:39 @@ -218,7 +218,6 @@ i386/i386/bios.c standard i386/i386/bioscall.s standard i386/i386/busdma_machdep.c standard -i386/i386/critical.c standard i386/i386/db_disasm.c optional ddb i386/i386/db_interface.c optional ddb i386/i386/db_trace.c optional ddb --- //depot/projects/smpng/sys/conf/files.ia64 2004/11/23 23:10:26 +++ //depot/user/jhb/lock/conf/files.ia64 2004/11/23 23:15:03 @@ -90,7 +90,6 @@ ia64/ia64/clock.c standard ia64/ia64/clock_if.m standard ia64/ia64/context.S standard -ia64/ia64/critical.c standard ia64/ia64/db_interface.c optional ddb ia64/ia64/db_trace.c optional ddb ia64/ia64/dump_machdep.c standard --- //depot/projects/smpng/sys/conf/files.pc98 2005/01/14 15:30:06 +++ //depot/user/jhb/lock/conf/files.pc98 2005/01/14 15:57:39 @@ -160,7 +160,6 @@ i386/i386/bios.c standard i386/i386/bioscall.s standard i386/i386/busdma_machdep.c standard -i386/i386/critical.c standard i386/i386/db_disasm.c optional ddb i386/i386/db_interface.c optional ddb i386/i386/db_trace.c optional ddb --- //depot/projects/smpng/sys/conf/files.powerpc 2004/08/20 17:10:02 +++ //depot/user/jhb/lock/conf/files.powerpc 2004/11/05 18:37:00 @@ -36,7 +36,6 @@ powerpc/powerpc/copyinout.c standard powerpc/powerpc/copystr.c standard powerpc/powerpc/cpu.c standard -powerpc/powerpc/critical.c standard powerpc/powerpc/elf_machdep.c standard powerpc/powerpc/fpu.c standard powerpc/powerpc/fuswintr.c standard --- //depot/projects/smpng/sys/conf/files.sparc64 2004/11/22 20:01:11 +++ //depot/user/jhb/lock/conf/files.sparc64 2004/11/22 20:16:39 @@ -74,7 +74,6 @@ sparc64/sparc64/cheetah.c standard sparc64/sparc64/clock.c standard sparc64/sparc64/counter.c standard -sparc64/sparc64/critical.c standard sparc64/sparc64/db_disasm.c optional ddb sparc64/sparc64/db_interface.c optional ddb sparc64/sparc64/db_trace.c optional ddb --- //depot/projects/smpng/sys/i386/i386/machdep.c 2005/01/04 15:43:29 +++ //depot/user/jhb/lock/i386/i386/machdep.c 2005/01/13 20:47:09 @@ -2211,6 +2211,30 @@ pcpu->pc_acpi_id = 0xffffffff; } +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_flags = intr_disable(); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(td->td_md.md_saved_flags); +} + #if defined(I586_CPU) && !defined(NO_F00F_HACK) static void f00f_hack(void *unused); SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL) --- //depot/projects/smpng/sys/i386/i386/mp_machdep.c 2004/12/23 20:48:47 +++ //depot/user/jhb/lock/i386/i386/mp_machdep.c 2005/01/13 20:43:58 @@ -432,6 +432,7 @@ void init_secondary(void) { + vm_offset_t addr; int gsel_tss; int x, myid; u_int cr0; @@ -489,7 +490,8 @@ /* BSP may have changed PTD while we were waiting */ invltlb(); - pmap_invalidate_range(kernel_pmap, 0, NKPT * NBPDR - 1); + for (addr = 0; addr < NKPT * NBPDR - 1; addr += PAGE_SIZE) + invlpg(addr); #if defined(I586_CPU) && !defined(NO_F00F_HACK) lidt(&r_idt); @@ -513,6 +515,10 @@ panic("cpuid mismatch! boom!!"); } + /* Initialize curthread. */ + KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); + PCPU_SET(curthread, PCPU_GET(idlethread)); + mtx_lock_spin(&ap_boot_mtx); /* Init local apic for irq's */ @@ -551,6 +557,18 @@ /* ok, now grab sched_lock and enter the scheduler */ mtx_lock_spin(&sched_lock); + /* + * Correct spinlock nesting. The idle thread context that we are + * borrowing was created so that it would start out with a single + * spin lock (sched_lock) held in fork_trampoline(). Since we've + * explicitly acquired locks in this function, the nesting count + * is now 2 rather than 1. Since we are nested, calling + * spinlock_exit() will simply adjust the counts without allowing + * spin lock using code to interrupt us. + */ + spinlock_exit(); + KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); + binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); --- //depot/projects/smpng/sys/i386/i386/vm_machdep.c 2005/01/13 21:06:03 +++ //depot/user/jhb/lock/i386/i386/vm_machdep.c 2005/01/14 15:57:39 @@ -260,6 +260,10 @@ } mtx_unlock_spin(&sched_lock); + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_flags = PSL_KERNEL | PSL_I; + /* * Now, cpu_switch() can schedule the new process. * pcb_esp is loaded pointing to the cpu_switch() stack frame @@ -416,7 +420,7 @@ pcb2->pcb_esp = (int)td->td_frame - sizeof(void *); /* trampoline arg */ pcb2->pcb_ebx = (int)td; /* trampoline arg */ pcb2->pcb_eip = (int)fork_trampoline; - pcb2->pcb_psl &= ~(PSL_I); /* interrupts must be disabled */ + pcb2->pcb_psl = PSL_KERNEL; /* ints disabled */ pcb2->pcb_gs = rgs(); /* * If we didn't copy the pcb, we'd need to do the following registers: @@ -427,7 +431,11 @@ * pcb2->pcb_gs: cloned above. XXXKSE ??? * pcb2->pcb_ext: cleared below. */ pcb2->pcb_ext = NULL; + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_flags = PSL_KERNEL | PSL_I; } /* --- //depot/projects/smpng/sys/i386/include/proc.h 2005/01/07 18:05:05 +++ //depot/user/jhb/lock/i386/include/proc.h 2005/01/07 18:43:27 @@ -47,7 +47,8 @@ * Machine-dependent part of the proc structure for i386. */ struct mdthread { - register_t md_savecrit; + int md_spinlock_count; /* (k) */ + register_t md_saved_flags; /* (k) */ }; struct mdproc { --- //depot/projects/smpng/sys/ia64/ia64/machdep.c 2004/12/15 21:41:34 +++ //depot/user/jhb/lock/ia64/ia64/machdep.c 2005/01/13 21:40:07 @@ -381,6 +381,30 @@ } void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_intr = intr_disable(); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(td->td_md.md_saved_intr); +} + +void map_pal_code(void) { pt_entry_t pte; --- //depot/projects/smpng/sys/ia64/ia64/mp_machdep.c 2004/02/24 18:59:26 +++ //depot/user/jhb/lock/ia64/ia64/mp_machdep.c 2005/01/13 22:04:55 @@ -111,17 +111,33 @@ ia64_mca_save_state(SAL_INFO_MCA); ia64_mca_save_state(SAL_INFO_CMC); + /* Initialize curthread. */ + KASSERT(PCPU_GET(idlethread) != NULL, ("no idle thread")); + PCPU_SET(curthread, PCPU_GET(idlethread)); + ap_awake++; while (!smp_started) /* spin */; CTR1(KTR_SMP, "SMP: cpu%d launched", PCPU_GET(cpuid)); + mtx_lock_spin(&sched_lock); + + /* + * Correct spinlock nesting. The idle thread context that we are + * borrowing was created so that it would start out with a single + * spin lock (sched_lock) held in fork_trampoline(). Since we've + * explicitly acquired locks in this function, the nesting count + * is now 2 rather than 1. Since we are nested, calling + * spinlock_exit() will simply adjust the counts without allowing + * spin lock using code to interrupt us. + */ + spinlock_exit(); + KASSERT(curthread->td_md.md_spinlock_count == 1, ("invalid count")); + binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); - mtx_lock_spin(&sched_lock); - ia64_set_tpr(0); /* kick off the clock on this AP */ --- //depot/projects/smpng/sys/ia64/ia64/vm_machdep.c 2005/01/13 21:06:03 +++ //depot/user/jhb/lock/ia64/ia64/vm_machdep.c 2005/01/14 15:57:39 @@ -158,6 +158,10 @@ pcb->pcb_special.sp = (uintptr_t)tf - 16; pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline); cpu_set_fork_handler(td, (void (*)(void*))fork_return, td); + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_intr = 1; } void @@ -271,6 +275,10 @@ td2->td_pcb->pcb_special.sp = (uintptr_t)stackp - 16; td2->td_pcb->pcb_special.rp = FDESC_FUNC(fork_trampoline); cpu_set_fork_handler(td2, (void (*)(void*))fork_return, td2); + + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_intr = 1; } /* --- //depot/projects/smpng/sys/ia64/include/proc.h 2005/01/07 18:05:05 +++ //depot/user/jhb/lock/ia64/include/proc.h 2005/01/07 18:43:27 @@ -30,7 +30,8 @@ #define _MACHINE_PROC_H_ struct mdthread { - register_t md_savecrit; + int md_spinlock_count; /* (k) */ + register_t md_saved_intr; /* (k) */ }; struct mdproc { --- //depot/projects/smpng/sys/kern/kern_fork.c 2005/01/07 18:05:05 +++ //depot/user/jhb/lock/kern/kern_fork.c 2005/01/07 18:43:27 @@ -72,7 +72,6 @@ #include #include -#include #ifndef _SYS_SYSPROTO_H_ struct fork_args { @@ -764,7 +763,6 @@ sched_lock.mtx_lock = (uintptr_t)td; mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); - cpu_critical_fork_exit(); CTR4(KTR_PROC, "fork_exit: new thread %p (kse %p, pid %d, %s)", td, td->td_sched, p->p_pid, p->p_comm); --- //depot/projects/smpng/sys/kern/kern_idle.c 2004/09/03 14:14:21 +++ //depot/user/jhb/lock/kern/kern_idle.c 2005/01/13 15:57:13 @@ -65,10 +65,6 @@ error = kthread_create(idle_proc, NULL, &p, RFSTOPPED | RFHIGHPID, 0, "idle: cpu%d", pc->pc_cpuid); pc->pc_idlethread = FIRST_THREAD_IN_PROC(p); - if (pc->pc_curthread == NULL) { - pc->pc_curthread = pc->pc_idlethread; - pc->pc_idlethread->td_critnest = 0; - } #else error = kthread_create(idle_proc, NULL, &p, RFSTOPPED | RFHIGHPID, 0, "idle"); --- //depot/projects/smpng/sys/kern/kern_mutex.c 2005/01/05 22:38:38 +++ //depot/user/jhb/lock/kern/kern_mutex.c 2005/01/06 18:27:35 @@ -594,7 +602,7 @@ break; /* Give interrupts a chance while we spin. */ - critical_exit(); + spinlock_exit(); #ifdef __i386__ apic_hack = (read_eflags() & PSL_I) == 0; if (apic_hack) @@ -622,7 +630,7 @@ if (apic_hack) APIC_IPI_SPINWAIT_EXIT(); #endif - critical_enter(); + spinlock_enter(); } if (LOCK_LOG_TEST(&m->mtx_object, opts)) --- //depot/projects/smpng/sys/kern/kern_proc.c 2005/01/07 18:05:05 +++ //depot/user/jhb/lock/kern/kern_proc.c 2005/01/07 18:43:27 @@ -63,7 +63,6 @@ #include #include #include -#include MALLOC_DEFINE(M_PGRP, "pgrp", "process group header"); MALLOC_DEFINE(M_SESSION, "session", "session header"); --- //depot/projects/smpng/sys/kern/kern_switch.c 2005/01/07 18:05:05 +++ //depot/user/jhb/lock/kern/kern_switch.c 2005/01/07 18:43:27 @@ -105,7 +105,6 @@ #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) #include #endif -#include #if defined(SMP) && defined(SCHED_4BSD) #include #endif @@ -568,8 +567,6 @@ struct thread *td; td = curthread; - if (td->td_critnest == 0) - cpu_critical_enter(td); td->td_critnest++; CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, (long)td->td_proc->p_pid, td->td_proc->p_comm, td->td_critnest); @@ -597,7 +594,6 @@ } #endif td->td_critnest = 0; - cpu_critical_exit(td); } else { td->td_critnest--; } --- //depot/projects/smpng/sys/pc98/i386/machdep.c 2004/12/15 21:41:34 +++ //depot/user/jhb/lock/pc98/i386/machdep.c 2005/01/13 21:40:07 @@ -2257,6 +2257,30 @@ } +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_flags = intr_disable(); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(td->td_md.md_saved_flags); +} + #if defined(I586_CPU) && !defined(NO_F00F_HACK) static void f00f_hack(void *unused); SYSINIT(f00f_hack, SI_SUB_INTRINSIC, SI_ORDER_FIRST, f00f_hack, NULL) --- //depot/projects/smpng/sys/powerpc/include/proc.h 2002/04/01 17:58:24 +++ //depot/user/jhb/lock/powerpc/include/proc.h 2004/11/23 19:46:21 @@ -39,7 +39,8 @@ * Machine-dependent part of the proc structure */ struct mdthread { - register_t md_savecrit; + int md_spinlock_count; /* (k) */ + register_t md_saved_msr; /* (k) */ }; struct mdproc { --- //depot/projects/smpng/sys/powerpc/powerpc/machdep.c 2005/01/07 18:05:05 +++ //depot/user/jhb/lock/powerpc/powerpc/machdep.c 2005/01/13 21:40:07 @@ -875,6 +875,30 @@ } +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) + td->td_md.md_saved_msr = intr_disable(); + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + intr_restore(td->td_md.md_saved_msr); +} + /* * kcopy(const void *src, void *dst, size_t len); * --- //depot/projects/smpng/sys/powerpc/powerpc/vm_machdep.c 2005/01/13 21:06:03 +++ //depot/user/jhb/lock/powerpc/powerpc/vm_machdep.c 2005/01/14 15:57:39 @@ -154,6 +154,10 @@ pcb->pcb_lr = (register_t)fork_trampoline; pcb->pcb_usr = kernel_pmap->pm_sr[USER_SR]; + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_msr = PSL_KERNSET; + /* * Now cpu_switch() can schedule the new process. */ @@ -322,6 +326,10 @@ pcb2->pcb_sp = (register_t)cf; pcb2->pcb_lr = (register_t)fork_trampoline; pcb2->pcb_usr = kernel_pmap->pm_sr[USER_SR]; + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_msr = PSL_KERNSET; } void --- //depot/projects/smpng/sys/sparc64/include/proc.h 2005/01/07 18:05:05 +++ //depot/user/jhb/lock/sparc64/include/proc.h 2005/01/07 18:43:27 @@ -42,7 +42,8 @@ }; struct mdthread { - register_t md_savecrit; + int md_spinlock_count; /* (k) */ + register_t md_saved_pil; /* (k) */ }; struct mdproc { --- //depot/projects/smpng/sys/sparc64/sparc64/machdep.c 2005/01/13 21:22:16 +++ //depot/user/jhb/lock/sparc64/sparc64/machdep.c 2005/01/14 15:57:39 @@ -230,6 +230,32 @@ } } +void +spinlock_enter(void) +{ + struct thread *td; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) { + td->td_md.md_saved_pil = rdpr(pil); + wrpr(pil, 0, 14); + } + td->td_md.md_spinlock_count++; + critical_enter(); +} + +void +spinlock_exit(void) +{ + struct thread *td; + + td = curthread; + critical_exit(); + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) + wrpr(pil, td->td_md.md_saved_pil, 0); +} + unsigned tick_get_timecount(struct timecounter *tc) { --- //depot/projects/smpng/sys/sparc64/sparc64/mp_machdep.c 2004/10/01 14:25:19 +++ //depot/user/jhb/lock/sparc64/sparc64/mp_machdep.c 2005/01/14 18:01:45 @@ -309,9 +309,7 @@ continue; KASSERT(pc->pc_idlethread != NULL, ("cpu_mp_unleash: idlethread")); - KASSERT(pc->pc_curthread == pc->pc_idlethread, - ("cpu_mp_unleash: curthread")); - + pc->pc_curthread = pc->pc_idlethread; pc->pc_curpcb = pc->pc_curthread->td_pcb; for (i = 0; i < PCPU_PAGES; i++) { va = pc->pc_addr + i * PAGE_SIZE; @@ -347,6 +345,7 @@ tick_start_ap(); smp_cpus++; + KASSERT(curthread != NULL, ("cpu_mp_bootstrap: curthread")); PCPU_SET(other_cpus, all_cpus & ~(1 << PCPU_GET(cpuid))); printf("SMP: AP CPU #%d Launched!\n", PCPU_GET(cpuid)); @@ -356,11 +355,11 @@ while (csa->csa_count != 0) ; + /* ok, now grab sched_lock and enter the scheduler */ + mtx_lock_spin(&sched_lock); + spinlock_exit(); binuptime(PCPU_PTR(switchtime)); PCPU_SET(switchticks, ticks); - - /* ok, now grab sched_lock and enter the scheduler */ - mtx_lock_spin(&sched_lock); cpu_throw(NULL, choosethread()); /* doesn't return */ } --- //depot/projects/smpng/sys/sparc64/sparc64/vm_machdep.c 2005/01/13 21:39:00 +++ //depot/user/jhb/lock/sparc64/sparc64/vm_machdep.c 2005/01/14 15:57:39 @@ -170,6 +170,10 @@ fr->fr_local[2] = (u_long)tf; pcb->pcb_pc = (u_long)fork_trampoline - 8; pcb->pcb_sp = (u_long)fr - SPOFF; + + /* Setup to release sched_lock in fork_exit(). */ + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_pil = 0; } void @@ -281,6 +285,10 @@ pcb2->pcb_sp = (u_long)fp - SPOFF; pcb2->pcb_pc = (u_long)fork_trampoline - 8; + /* Setup to release sched_lock in fork_exit(). */ + td2->td_md.md_spinlock_count = 1; + td2->td_md.md_saved_pil = 0; + /* * Now, cpu_switch() can schedule the new process. */ --- //depot/projects/smpng/sys/sys/lock.h 2004/01/28 21:25:50 +++ //depot/user/jhb/lock/sys/lock.h 2004/11/05 00:22:59 @@ -196,6 +196,8 @@ extern struct lock_class lock_class_mtx_spin; extern struct lock_class lock_class_sx; +void spinlock_enter(void); +void spinlock_exit(void); void witness_init(struct lock_object *); void witness_destroy(struct lock_object *); int witness_defineorder(struct lock_object *, struct lock_object *); --- //depot/projects/smpng/sys/sys/mutex.h 2005/01/05 22:38:38 +++ //depot/user/jhb/lock/sys/mutex.h 2005/01/06 18:27:35 @@ -167,7 +167,7 @@ #define _get_spin_lock(mp, tid, opts, file, line) do { \ struct thread *_tid = (tid); \ \ - critical_enter(); \ + spinlock_enter(); \ if (!_obtain_lock((mp), _tid)) { \ if ((mp)->mtx_lock == (uintptr_t)_tid) \ (mp)->mtx_recurse++; \ @@ -179,7 +179,7 @@ #define _get_spin_lock(mp, tid, opts, file, line) do { \ struct thread *_tid = (tid); \ \ - critical_enter(); \ + spinlock_enter(); \ if ((mp)->mtx_lock == (uintptr_t)_tid) \ (mp)->mtx_recurse++; \ else { \ @@ -207,8 +207,8 @@ * Since spin locks are not _too_ common, inlining this code is not too big * a deal. * - * Since we always perform a critical_enter() when attempting to acquire a - * spin lock, we need to always perform a matching critical_exit() when + * Since we always perform a spinlock_enter() when attempting to acquire a + * spin lock, we need to always perform a matching spinlock_exit() when * releasing a spin lock. This includes the recursion cases. */ #ifndef _rel_spin_lock @@ -218,7 +218,7 @@ (mp)->mtx_recurse--; \ else \ _release_lock_quick((mp)); \ - critical_exit(); \ + spinlock_exit(); \ } while (0) #else /* SMP */ #define _rel_spin_lock(mp) do { \ @@ -226,7 +226,7 @@ (mp)->mtx_recurse--; \ else \ (mp)->mtx_lock = MTX_UNOWNED; \ - critical_exit(); \ + spinlock_exit(); \ } while (0) #endif /* SMP */ #endif