Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC (revisione 240513) +++ sys/amd64/conf/GENERIC (copia locale) @@ -79,8 +79,8 @@ options KDB # Enable kernel debugger support. options DDB # Support DDB. options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver -options INVARIANTS # Enable calls of extra sanity checking -options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS +#options INVARIANTS # Enable calls of extra sanity checking +#options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS options WITNESS # Enable checks to detect deadlocks and cycles options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones Index: sys/sys/systm.h =================================================================== --- sys/sys/systm.h (revisione 240513) +++ sys/sys/systm.h (copia locale) @@ -171,11 +171,37 @@ void g_waitidle(void); void panic(const char *, ...) __dead2 __printflike(1, 2); +void _critical_enter(const char *, int); +void _critical_exit(const char *, int); +void _critical_exit_hard(struct thread *); + +#define _critical_enter_fast() do { \ + curthread->td_critnest++; \ + __asm __volatile("" : : : "memory"); \ +} while (0) + +#define _critical_exit_fast() do { \ + struct thread *__td; \ + \ + __td = curthread; \ + __asm __volatile("" : : : "memory"); \ + if (__td->td_critnest != 1) \ + __td->td_critnest--; \ + else \ + _critical_exit_hard(__td); \ +} while (0) + +#if defined(INVARIANT_SUPPORT) || defined(INVARIANTS) || defined(KTR) +#define critical_enter() _critical_enter(__FILE__, __LINE__) +#define critical_exit() _critical_exit(__FILE__, __LINE__) +#else +#define critical_enter() _critical_enter_fast() +#define critical_exit() _critical_exit_fast() +#endif + void cpu_boot(int); void cpu_flush_dcache(void *, size_t); void cpu_rootconf(void); -void critical_enter(void); -void critical_exit(void); void init_param1(void); void init_param2(long physpages); void init_static_kenv(char *, size_t); Index: sys/kern/kern_switch.c =================================================================== --- sys/kern/kern_switch.c (revisione 240513) +++ sys/kern/kern_switch.c (copia locale) @@ -178,47 +178,56 @@ retry: * regions of code in which preemptions are not allowed. */ void -critical_enter(void) +_critical_enter(const char *file, int line) { struct thread *td; td = curthread; td->td_critnest++; - CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, - (long)td->td_proc->p_pid, td->td_name, td->td_critnest); + CTR6(KTR_CRITICAL, "critical_enter: thread %p (%ld, %s) to %d @ %s:%d", + td, (long)td->td_proc->p_pid, td->td_name, td->td_critnest, file, + line); } void -critical_exit(void) +_critical_exit(const char *file, int line) { struct thread *td; - int flags; td = curthread; KASSERT(td->td_critnest != 0, ("critical_exit: td_critnest == 0")); - if (td->td_critnest == 1) { - td->td_critnest = 0; - if (td->td_owepreempt && !kdb_active) { - td->td_critnest = 1; - thread_lock(td); - td->td_critnest--; - flags = SW_INVOL | SW_PREEMPT; - if (TD_IS_IDLETHREAD(td)) - flags |= SWT_IDLE; - else - flags |= SWT_OWEPREEMPT; - mi_switch(flags, NULL); - thread_unlock(td); - } - } else + if (td->td_critnest != 1) td->td_critnest--; + else + _critical_exit_hard(td); - CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, - (long)td->td_proc->p_pid, td->td_name, td->td_critnest); + CTR6(KTR_CRITICAL, "critical_exit: thread %p (%ld, %s) to %d @ %s:%d", + td, (long)td->td_proc->p_pid, td->td_name, td->td_critnest, + file, line); } +void +_critical_exit_hard(struct thread *td) +{ + int flags; + + td->td_critnest = 0; + if (td->td_owepreempt && !kdb_active) { + td->td_critnest = 1; + thread_lock(td); + td->td_critnest--; + flags = SW_INVOL | SW_PREEMPT; + if (TD_IS_IDLETHREAD(td)) + flags |= SWT_IDLE; + else + flags |= SWT_OWEPREEMPT; + mi_switch(flags, NULL); + thread_unlock(td); + } +} + /************************************************************************ * SYSTEM RUN QUEUE manipulations and tests * ************************************************************************/ Index: sys/kern/subr_smp.c =================================================================== --- sys/kern/subr_smp.c (revisione 240513) +++ sys/kern/subr_smp.c (copia locale) @@ -368,7 +368,7 @@ smp_rendezvous_action(void) * during this routine. */ td = curthread; - td->td_critnest++; + critical_enter(); #ifdef INVARIANTS owepreempt = td->td_owepreempt; #endif Index: sys/kern/kern_rmlock.c =================================================================== --- sys/kern/kern_rmlock.c (revisione 240513) +++ sys/kern/kern_rmlock.c (copia locale) @@ -351,10 +351,8 @@ _rm_rlock(struct rmlock *rm, struct rm_priotracker tracker->rmp_thread = td; tracker->rmp_rmlock = rm; - td->td_critnest++; /* critical_enter(); */ + critical_enter(); - compiler_memory_barrier(); - pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_add(pc, tracker); @@ -419,7 +417,7 @@ _rm_runlock(struct rmlock *rm, struct rm_priotrack if (SCHEDULER_STOPPED()) return; - td->td_critnest++; /* critical_enter(); */ + critical_enter(); pc = cpuid_to_pcpu[td->td_oncpu]; /* pcpu_find(td->td_oncpu); */ rm_tracker_remove(pc, tracker); td->td_critnest--;