--- //depot/projects/smpng/sys/kern/kern_shutdown.c 2009/04/07 17:48:51 +++ //depot/user/jhb/preemption/kern/kern_shutdown.c 2009/04/07 19:19:09 @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -297,6 +298,10 @@ */ EVENTHANDLER_INVOKE(shutdown_pre_sync, howto); + KASSERT(curthread->td_critnest == 0, ("boot called from a critical section")); +#ifdef __i386__ + KASSERT((read_eflags() & PSL_I), ("boot called with interrupts disabled")); +#endif /* * Now sync filesystems */ --- //depot/projects/smpng/sys/kern/kern_synch.c 2009/02/27 15:49:22 +++ //depot/user/jhb/preemption/kern/kern_synch.c 2009/02/27 16:32:01 @@ -377,6 +377,14 @@ panic("%s: did not reenter debugger", __func__); } +u_long preempt_switches, total_switches; + +SYSCTL_NODE(_debug, OID_AUTO, switches, CTLFLAG_RD, NULL, "switch counts"); +SYSCTL_ULONG(_debug_switches, OID_AUTO, total, CTLFLAG_RD, &total_switches, 0, + ""); +SYSCTL_ULONG(_debug_switches, OID_AUTO, preempt, CTLFLAG_RD, &preempt_switches, + 0, ""); + /* * The machine independent parts of context switching. */ @@ -408,10 +416,13 @@ */ if (kdb_active) kdb_switch(); + total_switches++; if (flags & SW_VOL) td->td_ru.ru_nvcsw++; - else + else { td->td_ru.ru_nivcsw++; + preempt_switches++; + } #ifdef SCHED_STATS SCHED_STAT_INC(sched_switch_stats[flags & SW_TYPE_MASK]); #endif --- //depot/projects/smpng/sys/notes 2009/02/18 22:05:55 +++ //depot/user/jhb/preemption/notes 2009/02/20 16:15:34 @@ -73,3 +73,22 @@ - jhb_socket - socket hacking Space reserved for child branches: ++ Disable preemption when waking up several threads until they are all woken + up. + - Testing ++ Fixup some things that are really curthread only + - Testing +- Testing + - x86 UP + - corrupted eip's on returns from faults (seem to be off by one? for + both userland and kernel) + - x86 SMP + - alpha + - sparc64 + - amd64 + - ia64 +- possibly stash the thread we should preempt to in a per-cpu variable while + in a critical section so that critical_exit() can avoid calling + choosethread()? + - we would have to defer the setrunqueue for this to work, that may not be + a good idea --- //depot/projects/smpng/sys/vm/vm_glue.c 2008/08/05 21:26:01 +++ //depot/user/jhb/preemption/vm/vm_glue.c 2008/08/06 17:00:06 @@ -642,6 +642,7 @@ FOREACH_THREAD_IN_PROC(p, td) vm_thread_swapin(td); PROC_LOCK(p); + critical_enter(); swapclear(p); p->p_swtick = ticks; @@ -649,6 +650,7 @@ /* Allow other threads to swap p out now. */ --p->p_lock; + critical_exit(); } #endif /* NO_SWAPPING */ }