Index: sys/amd64/amd64/machdep.c =================================================================== --- sys/amd64/amd64/machdep.c (revision 331013) +++ sys/amd64/amd64/machdep.c (working copy) @@ -1897,37 +1897,6 @@ efi_map_sysctl_handler(SYSCTL_HANDLER_ARGS) SYSCTL_PROC(_machdep, OID_AUTO, efi_map, CTLTYPE_OPAQUE|CTLFLAG_RD, NULL, 0, efi_map_sysctl_handler, "S,efi_map_header", "Raw EFI Memory Map"); -void -spinlock_enter(void) -{ - struct thread *td; - register_t flags; - - td = curthread; - if (td->td_md.md_spinlock_count == 0) { - flags = intr_disable(); - td->td_md.md_spinlock_count = 1; - td->td_md.md_saved_flags = flags; - critical_enter(); - } else - td->td_md.md_spinlock_count++; -} - -void -spinlock_exit(void) -{ - struct thread *td; - register_t flags; - - td = curthread; - flags = td->td_md.md_saved_flags; - td->td_md.md_spinlock_count--; - if (td->td_md.md_spinlock_count == 0) { - critical_exit(); - intr_restore(flags); - } -} - /* * Construct a PCB from a trapframe. This is called from kdb_trap() where * we want to start a backtrace from the function that caused us to enter Index: sys/amd64/include/spinlock.h =================================================================== --- sys/amd64/include/spinlock.h (nonexistent) +++ sys/amd64/include/spinlock.h (working copy) @@ -0,0 +1,75 @@ +/*- + * SPDX-License-Identifier: BSD-3-Clause + * + * Copyright (c) 2003 Peter Wemm. + * Copyright (c) 1993 The Regents of the University of California. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +/* + * Functions to provide access to special i386 instructions. + * This in included in sys/systm.h, and that file should be + * used in preference to this. + */ + +#ifndef _MACHINE_SPINLOCK_H_ +#define _MACHINE_SPINLOCK_H_ + +static __inline void +spinlock_enter(void) +{ + struct thread *td; + register_t flags; + + td = curthread; + if (td->td_md.md_spinlock_count == 0) { + flags = intr_disable(); + td->td_md.md_spinlock_count = 1; + td->td_md.md_saved_flags = flags; + critical_enter(); + } else + td->td_md.md_spinlock_count++; +} + +static __inline void +spinlock_exit(void) +{ + struct thread *td; + register_t flags; + + td = curthread; + flags = td->td_md.md_saved_flags; + td->td_md.md_spinlock_count--; + if (td->td_md.md_spinlock_count == 0) { + critical_exit(); + intr_restore(flags); + } +} + +#endif /* !_MACHINE_SPINLOCK_H_ */ Property changes on: sys/amd64/include/spinlock.h ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Index: sys/amd64/vmm/intel/ept.c =================================================================== --- sys/amd64/vmm/intel/ept.c (revision 331013) +++ sys/amd64/vmm/intel/ept.c (working copy) @@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include #include Index: sys/amd64/vmm/intel/vmcs.c =================================================================== --- sys/amd64/vmm/intel/vmcs.c (revision 331013) +++ sys/amd64/vmm/intel/vmcs.c (working copy) @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include #include Index: sys/amd64/vmm/intel/vmx_genassym.c =================================================================== --- sys/amd64/vmm/intel/vmx_genassym.c (revision 331013) +++ sys/amd64/vmm/intel/vmx_genassym.c (working copy) @@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include #include Index: sys/cddl/dev/dtrace/dtrace_debug.c =================================================================== --- sys/cddl/dev/dtrace/dtrace_debug.c (revision 331013) +++ sys/cddl/dev/dtrace/dtrace_debug.c (working copy) @@ -32,6 +32,7 @@ #ifdef DEBUG #include +#include #define DTRACE_DEBUG_BUFR_SIZE (32 * 1024) Index: sys/compat/x86bios/x86bios.c =================================================================== --- sys/compat/x86bios/x86bios.c (revision 331013) +++ sys/compat/x86bios/x86bios.c (working copy) @@ -37,6 +37,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -46,6 +47,8 @@ __FBSDID("$FreeBSD$"); #include #include +#include + #include #include Index: sys/dev/cxgbe/t4_mp_ring.c =================================================================== --- sys/dev/cxgbe/t4_mp_ring.c (revision 331013) +++ sys/dev/cxgbe/t4_mp_ring.c (working copy) @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include "t4_mp_ring.h" Index: sys/dev/hyperv/vmbus/vmbus.c =================================================================== --- sys/dev/hyperv/vmbus/vmbus.c (revision 331013) +++ sys/dev/hyperv/vmbus/vmbus.c (working copy) @@ -39,6 +39,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include Index: sys/dev/ow/owc_gpiobus.c =================================================================== --- sys/dev/ow/owc_gpiobus.c (revision 331013) +++ sys/dev/ow/owc_gpiobus.c (working copy) @@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #ifdef FDT #include Index: sys/kern/kern_clocksource.c =================================================================== --- sys/kern/kern_clocksource.c (revision 331013) +++ sys/kern/kern_clocksource.c (working copy) @@ -55,6 +55,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include int cpu_disable_c2_sleep = 0; /* Timer dies in C2. */ int cpu_disable_c3_sleep = 0; /* Timer dies in C3. */ Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 331013) +++ sys/kern/kern_mutex.c (working copy) @@ -66,6 +66,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include @@ -288,6 +289,98 @@ __mtx_unlock_flags(volatile uintptr_t *c, int opts TD_LOCKS_DEC(curthread); } +/* + * Lock a spin mutex. For spinlocks, we handle recursion inline (it + * turns out that function calls can be significantly expensive on + * some architectures). Since spin locks are not _too_ common, + * inlining this code is not too big a deal. + */ +#ifdef SMP +#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ + uintptr_t _tid = (uintptr_t)(tid); \ + uintptr_t _v = MTX_UNOWNED; \ + \ + spinlock_enter(); \ + if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ + !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ + _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ +} while (0) +#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ + uintptr_t _tid = (uintptr_t)(tid); \ + int _ret; \ + \ + spinlock_enter(); \ + if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ + spinlock_exit(); \ + _ret = 0; \ + } else { \ + LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ + mp, 0, 0, file, line); \ + _ret = 1; \ + } \ + _ret; \ +}) +#else /* SMP */ +#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ + uintptr_t _tid = (uintptr_t)(tid); \ + \ + spinlock_enter(); \ + if ((mp)->mtx_lock == _tid) \ + (mp)->mtx_recurse++; \ + else { \ + KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ + (mp)->mtx_lock = _tid; \ + } \ +} while (0) +#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ + uintptr_t _tid = (uintptr_t)(tid); \ + int _ret; \ + \ + spinlock_enter(); \ + if ((mp)->mtx_lock != MTX_UNOWNED) { \ + spinlock_exit(); \ + _ret = 0; \ + } else { \ + (mp)->mtx_lock = _tid; \ + _ret = 1; \ + } \ + _ret; \ +}) +#endif /* SMP */ + +/* + * Unlock a spin mutex. For spinlocks, we can handle everything + * inline, as it's pretty simple and a function call would be too + * expensive (at least on some architectures). Since spin locks are + * not _too_ common, inlining this code is not too big a deal. + * + * Since we always perform a spinlock_enter() when attempting to acquire a + * spin lock, we need to always perform a matching spinlock_exit() when + * releasing a spin lock. This includes the recursion cases. + */ +#ifdef SMP +#define __mtx_unlock_spin(mp) do { \ + if (mtx_recursed((mp))) \ + (mp)->mtx_recurse--; \ + else { \ + LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ + _mtx_release_lock_quick((mp)); \ + } \ + spinlock_exit(); \ +} while (0) +#else /* SMP */ +#define __mtx_unlock_spin(mp) do { \ + if (mtx_recursed((mp))) \ + (mp)->mtx_recurse--; \ + else { \ + LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ + (mp)->mtx_lock = MTX_UNOWNED; \ + } \ + spinlock_exit(); \ +} while (0) +#endif /* SMP */ + + void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line) Index: sys/kern/kern_shutdown.c =================================================================== --- sys/kern/kern_shutdown.c (revision 331013) +++ sys/kern/kern_shutdown.c (working copy) @@ -84,6 +84,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include Index: sys/kern/kern_switch.c =================================================================== --- sys/kern/kern_switch.c (revision 331013) +++ sys/kern/kern_switch.c (working copy) @@ -188,71 +188,29 @@ choosethread(void) return (td); } -/* - * Kernel thread preemption implementation. Critical sections mark - * regions of code in which preemptions are not allowed. - * - * It might seem a good idea to inline critical_enter() but, in order - * to prevent instructions reordering by the compiler, a __compiler_membar() - * would have to be used here (the same as sched_pin()). The performance - * penalty imposed by the membar could, then, produce slower code than - * the function call itself, for most cases. - */ void -critical_enter(void) +critical_exit_owepreempt(struct thread *td) { - struct thread *td; - - td = curthread; - td->td_critnest++; - CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, - (long)td->td_proc->p_pid, td->td_name, td->td_critnest); -} - -void -critical_exit(void) -{ - struct thread *td; int flags; - td = curthread; - KASSERT(td->td_critnest != 0, - ("critical_exit: td_critnest == 0")); + if (kdb_active) + return; - if (td->td_critnest == 1) { - td->td_critnest = 0; - - /* - * Interrupt handlers execute critical_exit() on - * leave, and td_owepreempt may be left set by an - * interrupt handler only when td_critnest > 0. If we - * are decrementing td_critnest from 1 to 0, read - * td_owepreempt after decrementing, to not miss the - * preempt. Disallow compiler to reorder operations. - */ - __compiler_membar(); - if (td->td_owepreempt && !kdb_active) { - /* - * Microoptimization: we committed to switch, - * disable preemption in interrupt handlers - * while spinning for the thread lock. - */ - td->td_critnest = 1; - thread_lock(td); - td->td_critnest--; - flags = SW_INVOL | SW_PREEMPT; - if (TD_IS_IDLETHREAD(td)) - flags |= SWT_IDLE; - else - flags |= SWT_OWEPREEMPT; - mi_switch(flags, NULL); - thread_unlock(td); - } - } else - td->td_critnest--; - - CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, - (long)td->td_proc->p_pid, td->td_name, td->td_critnest); + /* + * Microoptimization: we committed to switch, + * disable preemption in interrupt handlers + * while spinning for the thread lock. + */ + td->td_critnest = 1; + thread_lock(td); + td->td_critnest--; + flags = SW_INVOL | SW_PREEMPT; + if (TD_IS_IDLETHREAD(td)) + flags |= SWT_IDLE; + else + flags |= SWT_OWEPREEMPT; + mi_switch(flags, NULL); + thread_unlock(td); } /************************************************************************ Index: sys/kern/kern_timeout.c =================================================================== --- sys/kern/kern_timeout.c (revision 331013) +++ sys/kern/kern_timeout.c (working copy) @@ -72,6 +72,8 @@ __FBSDID("$FreeBSD$"); #include #endif +#include + #ifndef NO_EVENTTIMERS DPCPU_DECLARE(sbintime_t, hardclocktime); #endif Index: sys/kern/sched_ule.c =================================================================== --- sys/kern/sched_ule.c (revision 331013) +++ sys/kern/sched_ule.c (working copy) @@ -78,6 +78,7 @@ dtrace_vtime_switch_func_t dtrace_vtime_switch_fun #include #include +#include #define KTR_ULE 0 Index: sys/kern/subr_smp.c =================================================================== --- sys/kern/subr_smp.c (revision 331013) +++ sys/kern/subr_smp.c (working copy) @@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include "opt_sched.h" Index: sys/net/if_var.h =================================================================== --- sys/net/if_var.h (revision 331013) +++ sys/net/if_var.h (working copy) @@ -73,6 +73,7 @@ struct netmap_adapter; #ifdef _KERNEL #include /* ifqueue only? */ +#include #include #include #endif /* _KERNEL */ Index: sys/net/mp_ring.c =================================================================== --- sys/net/mp_ring.c (revision 331013) +++ sys/net/mp_ring.c (working copy) @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #if defined(__powerpc__) || defined(__mips__) Index: sys/sys/buf_ring.h =================================================================== --- sys/sys/buf_ring.h (revision 331013) +++ sys/sys/buf_ring.h (working copy) @@ -33,6 +33,7 @@ #define _SYS_BUF_RING_H_ #include +#include #if defined(INVARIANTS) && !defined(DEBUG_BUFRING) #define DEBUG_BUFRING 1 Index: sys/sys/lock.h =================================================================== --- sys/sys/lock.h (revision 331013) +++ sys/sys/lock.h (working copy) @@ -251,8 +251,6 @@ void lock_init(struct lock_object *, struct lock_c void lock_destroy(struct lock_object *); void lock_delay(struct lock_delay_arg *); void lock_delay_default_init(struct lock_delay_config *); -void spinlock_enter(void); -void spinlock_exit(void); void witness_init(struct lock_object *, const char *); void witness_destroy(struct lock_object *); int witness_defineorder(struct lock_object *, struct lock_object *); Index: sys/sys/mutex.h =================================================================== --- sys/sys/mutex.h (revision 331013) +++ sys/sys/mutex.h (working copy) @@ -244,65 +244,6 @@ void _thread_lock(struct thread *); _mtx_lock_sleep((mp), _v, (opts), (file), (line)); \ } while (0) -/* - * Lock a spin mutex. For spinlocks, we handle recursion inline (it - * turns out that function calls can be significantly expensive on - * some architectures). Since spin locks are not _too_ common, - * inlining this code is not too big a deal. - */ -#ifdef SMP -#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ - uintptr_t _tid = (uintptr_t)(tid); \ - uintptr_t _v = MTX_UNOWNED; \ - \ - spinlock_enter(); \ - if (__predict_false(LOCKSTAT_PROFILE_ENABLED(spin__acquire) || \ - !_mtx_obtain_lock_fetch((mp), &_v, _tid))) \ - _mtx_lock_spin((mp), _v, (opts), (file), (line)); \ -} while (0) -#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ - uintptr_t _tid = (uintptr_t)(tid); \ - int _ret; \ - \ - spinlock_enter(); \ - if (((mp)->mtx_lock != MTX_UNOWNED || !_mtx_obtain_lock((mp), _tid))) {\ - spinlock_exit(); \ - _ret = 0; \ - } else { \ - LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(spin__acquire, \ - mp, 0, 0, file, line); \ - _ret = 1; \ - } \ - _ret; \ -}) -#else /* SMP */ -#define __mtx_lock_spin(mp, tid, opts, file, line) do { \ - uintptr_t _tid = (uintptr_t)(tid); \ - \ - spinlock_enter(); \ - if ((mp)->mtx_lock == _tid) \ - (mp)->mtx_recurse++; \ - else { \ - KASSERT((mp)->mtx_lock == MTX_UNOWNED, ("corrupt spinlock")); \ - (mp)->mtx_lock = _tid; \ - } \ -} while (0) -#define __mtx_trylock_spin(mp, tid, opts, file, line) __extension__ ({ \ - uintptr_t _tid = (uintptr_t)(tid); \ - int _ret; \ - \ - spinlock_enter(); \ - if ((mp)->mtx_lock != MTX_UNOWNED) { \ - spinlock_exit(); \ - _ret = 0; \ - } else { \ - (mp)->mtx_lock = _tid; \ - _ret = 1; \ - } \ - _ret; \ -}) -#endif /* SMP */ - /* Unlock a normal mutex. */ #define __mtx_unlock(mp, tid, opts, file, line) do { \ uintptr_t _v = (uintptr_t)(tid); \ @@ -313,38 +254,6 @@ void _thread_lock(struct thread *); } while (0) /* - * Unlock a spin mutex. For spinlocks, we can handle everything - * inline, as it's pretty simple and a function call would be too - * expensive (at least on some architectures). Since spin locks are - * not _too_ common, inlining this code is not too big a deal. - * - * Since we always perform a spinlock_enter() when attempting to acquire a - * spin lock, we need to always perform a matching spinlock_exit() when - * releasing a spin lock. This includes the recursion cases. - */ -#ifdef SMP -#define __mtx_unlock_spin(mp) do { \ - if (mtx_recursed((mp))) \ - (mp)->mtx_recurse--; \ - else { \ - LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ - _mtx_release_lock_quick((mp)); \ - } \ - spinlock_exit(); \ -} while (0) -#else /* SMP */ -#define __mtx_unlock_spin(mp) do { \ - if (mtx_recursed((mp))) \ - (mp)->mtx_recurse--; \ - else { \ - LOCKSTAT_PROFILE_RELEASE_LOCK(spin__release, mp); \ - (mp)->mtx_lock = MTX_UNOWNED; \ - } \ - spinlock_exit(); \ -} while (0) -#endif /* SMP */ - -/* * Exported lock manipulation interface. * * mtx_lock(m) locks MTX_DEF mutex `m' @@ -414,24 +323,18 @@ extern struct mtx_pool *mtxpool_sleep; _mtx_lock_flags((m), (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ _mtx_unlock_flags((m), (opts), (file), (line)) -#define mtx_lock_spin_flags_(m, opts, file, line) \ - _mtx_lock_spin_flags((m), (opts), (file), (line)) -#define mtx_trylock_spin_flags_(m, opts, file, line) \ - _mtx_trylock_spin_flags((m), (opts), (file), (line)) -#define mtx_unlock_spin_flags_(m, opts, file, line) \ - _mtx_unlock_spin_flags((m), (opts), (file), (line)) #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ #define mtx_lock_flags_(m, opts, file, line) \ __mtx_lock((m), curthread, (opts), (file), (line)) #define mtx_unlock_flags_(m, opts, file, line) \ __mtx_unlock((m), curthread, (opts), (file), (line)) +#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ #define mtx_lock_spin_flags_(m, opts, file, line) \ - __mtx_lock_spin((m), curthread, (opts), (file), (line)) + _mtx_lock_spin_flags((m), (opts), (file), (line)) #define mtx_trylock_spin_flags_(m, opts, file, line) \ - __mtx_trylock_spin((m), curthread, (opts), (file), (line)) + _mtx_trylock_spin_flags((m), (opts), (file), (line)) #define mtx_unlock_spin_flags_(m, opts, file, line) \ - __mtx_unlock_spin((m)) -#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ + _mtx_unlock_spin_flags((m), (opts), (file), (line)) #ifdef INVARIANTS #define mtx_assert_(m, what, file, line) \ Index: sys/sys/proc.h =================================================================== --- sys/sys/proc.h (revision 331013) +++ sys/sys/proc.h (working copy) @@ -48,6 +48,7 @@ #endif #include #include +#include #include #include #include @@ -1107,7 +1108,6 @@ void thread_unlink(struct thread *td); void thread_unsuspend(struct proc *p); void thread_wait(struct proc *p); struct thread *thread_find(struct proc *p, lwpid_t tid); - void stop_all_proc(void); void resume_all_proc(void); @@ -1146,6 +1146,64 @@ td_softdep_cleanup(struct thread *td) softdep_ast_cleanup(td); } +/* + * Kernel thread preemption implementation. Critical sections mark + * regions of code in which preemptions are not allowed. + */ + +void critical_exit_owepreempt(struct thread *td); + +static __inline void +critical_enter(void) +{ + struct thread *td; + + td = curthread; + td->td_critnest++; + __compiler_membar(); + + CTR4(KTR_CRITICAL, "critical_enter by thread %p (%ld, %s) to %d", td, + (long)td->td_proc->p_pid, td->td_name, td->td_critnest); +} + +static __inline void +critical_exit(void) +{ + struct thread *td; + + td = curthread; + KASSERT(td->td_critnest != 0, + ("critical_exit: td_critnest == 0")); + + if (td->td_critnest == 1) { + td->td_critnest = 0; + + /* + * Interrupt handlers execute critical_exit() on + * leave, and td_owepreempt may be left set by an + * interrupt handler only when td_critnest > 0. If we + * are decrementing td_critnest from 1 to 0, read + * td_owepreempt after decrementing, to not miss the + * preempt. Disallow compiler to reorder operations. + */ + __compiler_membar(); +#if 1 + if (td->td_owepreempt) + critical_exit_owepreempt(td); +#endif + } else + td->td_critnest--; + + CTR4(KTR_CRITICAL, "critical_exit by thread %p (%ld, %s) to %d", td, + (long)td->td_proc->p_pid, td->td_name, td->td_critnest); +} + +/* + * Assert that a thread is in critical(9) section. + */ +#define CRITICAL_ASSERT(td) \ + KASSERT((td)->td_critnest >= 1, ("Not in critical section")); + #endif /* _KERNEL */ #endif /* !_SYS_PROC_H_ */ Index: sys/sys/systm.h =================================================================== --- sys/sys/systm.h (revision 331013) +++ sys/sys/systm.h (working copy) @@ -119,12 +119,6 @@ void kassert_panic(const char *fmt, ...) __printf ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) /* - * Assert that a thread is in critical(9) section. - */ -#define CRITICAL_ASSERT(td) \ - KASSERT((td)->td_critnest >= 1, ("Not in critical section")); - -/* * If we have already panic'd and this is the thread that called * panic(), then don't block on any mutexes but silently succeed. * Otherwise, the kernel will deadlock since the scheduler isn't @@ -214,8 +208,6 @@ void vpanic(const char *, __va_list) __dead2 __pri void cpu_boot(int); void cpu_flush_dcache(void *, size_t); void cpu_rootconf(void); -void critical_enter(void); -void critical_exit(void); void init_param1(void); void init_param2(long physpages); void init_static_kenv(char *, size_t); Index: sys/x86/x86/x86_mem.c =================================================================== --- sys/x86/x86/x86_mem.c (revision 331013) +++ sys/x86/x86/x86_mem.c (working copy) @@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include Index: sys/x86/xen/xen_intr.c =================================================================== --- sys/x86/xen/xen_intr.c (revision 331013) +++ sys/x86/xen/xen_intr.c (working copy) @@ -44,6 +44,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include