Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 227533) +++ sys/kern/kern_mutex.c (working copy) @@ -188,7 +188,7 @@ * modules and can also be called from assembly language if needed. */ void -_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) +mtx_lock_flags_shim(struct mtx *m, int opts, const char *file, int line) { MPASS(curthread != NULL); @@ -208,7 +208,7 @@ } void -_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) +mtx_unlock_flags_shim(struct mtx *m, int opts, const char *file, int line) { MPASS(curthread != NULL); KASSERT(m->mtx_lock != MTX_DESTROYED, @@ -228,7 +228,7 @@ } void -_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) +mtx_lock_spin_flags_shim(struct mtx *m, int opts, const char *file, int line) { MPASS(curthread != NULL); @@ -250,7 +250,7 @@ } void -_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) +mtx_unlock_spin_flags_shim(struct mtx *m, int opts, const char *file, int line) { MPASS(curthread != NULL); @@ -273,7 +273,7 @@ * is already owned, it will recursively acquire the lock. */ int -_mtx_trylock(struct mtx *m, int opts, const char *file, int line) +mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line) { #ifdef LOCK_PROFILING uint64_t waittime = 0; @@ -310,13 +310,13 @@ } /* - * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. + * mtx_lock_sleep_hard: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ void -_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, +mtx_lock_sleep_hard(struct mtx *m, uintptr_t tid, int opts, const char *file, int line) { struct turnstile *ts; @@ -339,12 +339,12 @@ if (mtx_owned(m)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, - ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", + ("mtx_lock_sleep_hard: recursed on non-recursive mutex %s @ %s:%d\n", m->lock_object.lo_name, file, line)); m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) - CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recursing", m); + CTR1(KTR_LOCK, "mtx_lock_sleep_hard: %p recursing", m); return; } @@ -352,7 +352,7 @@ &contested, &waittime); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR4(KTR_LOCK, - "_mtx_lock_sleep: %s contested (lock=%p) at %s:%d", + "mtx_lock_sleep_hard: %s contested (lock=%p) at %s:%d", m->lock_object.lo_name, (void *)m->mtx_lock, file, line); while (!_mtx_obtain_lock(m, tid)) { @@ -492,13 +492,13 @@ #ifdef SMP /* - * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. + * mtx_lock_spin_hard: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ void -_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, +mtx_lock_spin_hard(struct mtx *m, uintptr_t tid, int opts, const char *file, int line) { int i = 0; @@ -508,7 +508,7 @@ #endif if (LOCK_LOG_TEST(&m->lock_object, opts)) - CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); + CTR1(KTR_LOCK, "mtx_lock_spin_hard: %p spinning", m); lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); while (!_mtx_obtain_lock(m, tid)) { @@ -530,7 +530,7 @@ } if (LOCK_LOG_TEST(&m->lock_object, opts)) - CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); + CTR1(KTR_LOCK, "mtx_lock_spin_hard: %p spin done", m); LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); @@ -539,7 +539,7 @@ #endif /* SMP */ void -_thread_lock_flags(struct thread *td, int opts, const char *file, int line) +thread_lock_flags_(struct thread *td, int opts, const char *file, int line) { struct mtx *m; uintptr_t tid; @@ -645,13 +645,13 @@ } /* - * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. + * mtx_unlock_sleep_hard: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed or contested (i.e. we * need to wake up a blocked thread). */ void -_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) +mtx_unlock_sleep_hard(struct mtx *m, int opts, const char *file, int line) { struct turnstile *ts; @@ -659,7 +659,8 @@ if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); if (LOCK_LOG_TEST(&m->lock_object, opts)) - CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); + CTR1(KTR_LOCK, + "mtx_unlock_sleep_hard: %p unrecurse", m); return; } @@ -670,7 +671,7 @@ turnstile_chain_lock(&m->lock_object); ts = turnstile_lookup(&m->lock_object); if (LOCK_LOG_TEST(&m->lock_object, opts)) - CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); + CTR1(KTR_LOCK, "mtx_unlock_sleep_hard: %p contested", m); MPASS(ts != NULL); turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); _mtx_release_lock_quick(m); @@ -693,7 +694,7 @@ */ #ifdef INVARIANT_SUPPORT void -_mtx_assert(struct mtx *m, int what, const char *file, int line) +mtx_assert_hard(struct mtx *m, int what, const char *file, int line) { if (panicstr != NULL || dumping) Index: sys/dev/ppbus/ppb_base.c =================================================================== --- sys/dev/ppbus/ppb_base.c (revision 227533) +++ sys/dev/ppbus/ppb_base.c (working copy) @@ -236,11 +236,9 @@ void _ppb_assert_locked(device_t bus, const char *file, int line) { -#ifdef INVARIANTS struct ppb_data *ppb = DEVTOSOFTC(bus); - _mtx_assert(ppb->ppc_lock, MA_OWNED, file, line); -#endif + mtx_assert_(ppb->ppc_lock, MA_OWNED, file, line); } void Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c (revision 227533) +++ sys/vm/vm_map.c (working copy) @@ -464,7 +464,7 @@ { if (map->system_map) - _mtx_lock_flags(&map->system_mtx, 0, file, line); + mtx_lock_flags_(&map->system_mtx, 0, file, line); else (void)_sx_xlock(&map->lock, 0, file, line); map->timestamp++; @@ -489,7 +489,7 @@ { if (map->system_map) - _mtx_unlock_flags(&map->system_mtx, 0, file, line); + mtx_unlock_flags_(&map->system_mtx, 0, file, line); else { _sx_xunlock(&map->lock, file, line); vm_map_process_deferred(); @@ -501,7 +501,7 @@ { if (map->system_map) - _mtx_lock_flags(&map->system_mtx, 0, file, line); + mtx_lock_flags_(&map->system_mtx, 0, file, line); else (void)_sx_slock(&map->lock, 0, file, line); } @@ -511,7 +511,7 @@ { if (map->system_map) - _mtx_unlock_flags(&map->system_mtx, 0, file, line); + mtx_unlock_flags_(&map->system_mtx, 0, file, line); else { _sx_sunlock(&map->lock, file, line); vm_map_process_deferred(); @@ -524,7 +524,7 @@ int error; error = map->system_map ? - !_mtx_trylock(&map->system_mtx, 0, file, line) : + !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : !_sx_try_xlock(&map->lock, file, line); if (error == 0) map->timestamp++; @@ -537,7 +537,7 @@ int error; error = map->system_map ? - !_mtx_trylock(&map->system_mtx, 0, file, line) : + !mtx_trylock_flags_(&map->system_mtx, 0, file, line) : !_sx_try_slock(&map->lock, file, line); return (error == 0); } @@ -558,9 +558,7 @@ unsigned int last_timestamp; if (map->system_map) { -#ifdef INVARIANTS - _mtx_assert(&map->system_mtx, MA_OWNED, file, line); -#endif + mtx_assert_(&map->system_mtx, MA_OWNED, file, line); } else { if (!_sx_try_upgrade(&map->lock, file, line)) { last_timestamp = map->timestamp; @@ -586,9 +584,7 @@ { if (map->system_map) { -#ifdef INVARIANTS - _mtx_assert(&map->system_mtx, MA_OWNED, file, line); -#endif + mtx_assert_(&map->system_mtx, MA_OWNED, file, line); } else _sx_downgrade(&map->lock, file, line); } @@ -609,13 +605,14 @@ return (sx_xlocked(&map->lock)); } +/* XXX: INVARIANTS here is still necessary because of sx support. */ #ifdef INVARIANTS static void _vm_map_assert_locked(vm_map_t map, const char *file, int line) { if (map->system_map) - _mtx_assert(&map->system_mtx, MA_OWNED, file, line); + mtx_assert_(&map->system_mtx, MA_OWNED, file, line); else _sx_assert(&map->lock, SA_XLOCKED, file, line); } @@ -626,7 +623,7 @@ { if (map->system_map) - _mtx_assert(&map->system_mtx, MA_OWNED, file, line); + mtx_assert_(&map->system_mtx, MA_OWNED, file, line); else _sx_assert(&map->lock, SA_SLOCKED, file, line); } @@ -661,7 +658,7 @@ mtx_lock(&map_sleep_mtx); if (map->system_map) - _mtx_unlock_flags(&map->system_mtx, 0, file, line); + mtx_unlock_flags_(&map->system_mtx, 0, file, line); else _sx_xunlock(&map->lock, file, line); return (msleep(&map->root, &map_sleep_mtx, PDROP | PVM, "vmmaps", Index: sys/sys/mutex.h =================================================================== --- sys/sys/mutex.h (revision 227533) +++ sys/sys/mutex.h (working copy) @@ -77,10 +77,13 @@ /* * Prototypes * - * NOTE: Functions prepended with `_' (underscore) are exported to other parts - * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE - * and LOCK_LINE. These functions should not be called directly by any - * code using the API. Their macros cover their functionality. + * NOTES: Functions with a `_shim' suffix are used for debugging and + * compatibility with KLD. These functions should not be called + * directly by any code using the KPI. + * Functions with a `_' suffix are the entrypoint for the common + * KPI covering both compat shims and fast path case. These can be + * used by consumers willing to pass options, file and line + * informations, in option-independent way. * * [See below for descriptions] * @@ -89,32 +92,34 @@ void mtx_destroy(struct mtx *m); void mtx_sysinit(void *arg); void mutex_init(void); -void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, +int mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line); +void mtx_lock_flags_shim(struct mtx *m, int opts, const char *file, + int line); +void mtx_unlock_flags_shim(struct mtx *m, int opts, const char *file, + int line); +void mtx_lock_spin_flags_shim(struct mtx *m, int opts, const char *file, + int line); +void mtx_unlock_spin_flags_shim(struct mtx *m, int opts, const char *file, + int line); +void mtx_lock_sleep_hard(struct mtx *m, uintptr_t tid, int opts, const char *file, int line); -void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); +void mtx_unlock_sleep_hard(struct mtx *m, int opts, const char *file, + int line); #ifdef SMP -void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, +void mtx_lock_spin_hard(struct mtx *m, uintptr_t tid, int opts, const char *file, int line); #endif -void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); -int _mtx_trylock(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, - int line); -void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, - int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -void _mtx_assert(struct mtx *m, int what, const char *file, int line); +void mtx_assert_hard(struct mtx *m, int what, const char *file, int line); #endif -void _thread_lock_flags(struct thread *, int, const char *, int); +void thread_lock_flags_(struct thread *, int, const char *, int); #define thread_lock(tdp) \ - _thread_lock_flags((tdp), 0, __FILE__, __LINE__) + thread_lock_flags_((tdp), 0, __FILE__, __LINE__) #define thread_lock_flags(tdp, opt) \ - _thread_lock_flags((tdp), (opt), __FILE__, __LINE__) + thread_lock_flags_((tdp), (opt), __FILE__, __LINE__) #define thread_unlock(tdp) \ - mtx_unlock_spin((tdp)->td_lock) + mtx_unlock_spin((tdp)->td_lock) #define mtx_recurse lock_object.lo_data @@ -143,7 +148,8 @@ uintptr_t _tid = (uintptr_t)(tid); \ \ if (!_mtx_obtain_lock((mp), _tid)) \ - _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ + mtx_lock_sleep_hard((mp), _tid, (opts), (file), \ + (line)); \ else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_LOCK_ACQUIRE, \ mp, 0, 0, (file), (line)); \ @@ -164,7 +170,8 @@ if ((mp)->mtx_lock == _tid) \ (mp)->mtx_recurse++; \ else \ - _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ + mtx_lock_spin_hard((mp), _tid, (opts), (file), \ + (line)); \ } else \ LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, \ mp, 0, 0, (file), (line)); \ @@ -188,7 +195,7 @@ uintptr_t _tid = (uintptr_t)(tid); \ \ if (!_mtx_release_lock((mp), _tid)) \ - _mtx_unlock_sleep((mp), (opts), (file), (line)); \ + mtx_unlock_sleep_hard((mp), (opts), (file), (line)); \ } while (0) /* @@ -290,27 +297,48 @@ #error LOCK_DEBUG not defined, include before #endif #if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE) -#define mtx_lock_flags(m, opts) \ - _mtx_lock_flags((m), (opts), LOCK_FILE, LOCK_LINE) -#define mtx_unlock_flags(m, opts) \ - _mtx_unlock_flags((m), (opts), LOCK_FILE, LOCK_LINE) -#define mtx_lock_spin_flags(m, opts) \ - _mtx_lock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) -#define mtx_unlock_spin_flags(m, opts) \ - _mtx_unlock_spin_flags((m), (opts), LOCK_FILE, LOCK_LINE) +#define mtx_lock_flags_(m, opts, file, line) \ + mtx_lock_flags_shim((m), (opts), (file), (line)) +#define mtx_unlock_flags_(m, opts, file, line) \ + mtx_unlock_flags_shim((m), (opts), (file), (line)) +#define mtx_lock_spin_flags_(m, opts, file, line) \ + mtx_lock_spin_flags_shim((m), (opts), (file), (line)) +#define mtx_unlock_spin_flags_(m, opts, file, line) \ + mtx_unlock_spin_flags_shim((m), (opts), (file), (line)) #else /* LOCK_DEBUG == 0 && !MUTEX_NOINLINE */ +#define mtx_lock_flags_(m, opts, file, line) \ + __mtx_lock((m), curthread, (opts), (file), (line)) +#define mtx_unlock_flags_(m, opts, file, line) \ + __mtx_unlock((m), curthread, (opts), (file), (line)) +#define mtx_lock_spin_flags_(m, opts, file, line) \ + __mtx_lock_spin((m), curthread, (opts), (file), (line)) +#define mtx_unlock_spin_flags_(m, opts, file, line) \ + __mtx_unlock_spin((m)) +#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ + +#ifdef INVARIANTS +#define mtx_assert_(m, what, file, line) \ + mtx_assert_hard((m), (what), (file), (line)) + +#define GIANT_REQUIRED mtx_assert_(&Giant, MA_OWNED, __FILE__, __LINE__) + +#else /* INVARIANTS */ +#define mtx_assert_(m, what) (void)0 +#define GIANT_REQUIRED +#endif /* INVARIANTS */ + #define mtx_lock_flags(m, opts) \ - __mtx_lock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) + mtx_lock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_flags(m, opts) \ - __mtx_unlock((m), curthread, (opts), LOCK_FILE, LOCK_LINE) + mtx_unlock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_lock_spin_flags(m, opts) \ - __mtx_lock_spin((m), curthread, (opts), LOCK_FILE, LOCK_LINE) + mtx_lock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_unlock_spin_flags(m, opts) \ - __mtx_unlock_spin((m)) -#endif /* LOCK_DEBUG > 0 || MUTEX_NOINLINE */ - + mtx_unlock_spin_flags_((m), (opts), LOCK_FILE, LOCK_LINE) #define mtx_trylock_flags(m, opts) \ - _mtx_trylock((m), (opts), LOCK_FILE, LOCK_LINE) + mtx_trylock_flags_((m), (opts), LOCK_FILE, LOCK_LINE) +#define mtx_assert(m, what) \ + mtx_assert_((m), (what), __FILE__, __LINE__) #define mtx_sleep(chan, mtx, pri, wmesg, timo) \ _sleep((chan), &(mtx)->lock_object, (pri), (wmesg), (timo)) @@ -388,8 +416,8 @@ * The INVARIANTS-enabled mtx_assert() functionality. * * The constants need to be defined for INVARIANT_SUPPORT infrastructure - * support as _mtx_assert() itself uses them and the latter implies that - * _mtx_assert() must build. + * support as mtx_assert_hard() itself uses them and the latter implies that + * mtx_assert_hard() must build. */ #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) #define MA_OWNED LA_XLOCKED @@ -398,17 +426,6 @@ #define MA_NOTRECURSED LA_NOTRECURSED #endif -#ifdef INVARIANTS -#define mtx_assert(m, what) \ - _mtx_assert((m), (what), __FILE__, __LINE__) - -#define GIANT_REQUIRED mtx_assert(&Giant, MA_OWNED) - -#else /* INVARIANTS */ -#define mtx_assert(m, what) (void)0 -#define GIANT_REQUIRED -#endif /* INVARIANTS */ - /* * Common lock type names. */