Index: sys/sys/_mutex.h =================================================================== --- sys/sys/_mutex.h (revision 242333) +++ sys/sys/_mutex.h (working copy) @@ -33,6 +33,10 @@ /* * Sleep/spin mutex. + * + * The layout of the first 2 members of struct mtx is considered fixed. + * If they need to change, the bits in the mutex implementation might be + * modified appropriately. */ struct mtx { struct lock_object lock_object; /* Common lock properties. */ Index: sys/sys/mutex.h =================================================================== --- sys/sys/mutex.h (revision 242333) +++ sys/sys/mutex.h (working copy) @@ -79,7 +79,8 @@ * * NOTE: Functions prepended with `_' (underscore) are exported to other parts * of the kernel via macros, thus allowing us to use the cpp LOCK_FILE - * and LOCK_LINE. These functions should not be called directly by any + * and LOCK_LINE or for hiding the lock cookie crunching to the + * consumers. These functions should not be called directly by any * code using the API. Their macros cover their functionality. * Functions with a `_' suffix are the entrypoint for the common * KPI covering both compat shims and fast path case. These can be @@ -89,28 +90,32 @@ * [See below for descriptions] * */ -void mtx_init(struct mtx *m, const char *name, const char *type, int opts); -void mtx_destroy(struct mtx *m); +void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, + int opts); +void _mtx_destroy(volatile uintptr_t *c); void mtx_sysinit(void *arg); -int mtx_trylock_flags_(struct mtx *m, int opts, const char *file, +int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line); void mutex_init(void); -void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, +void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); -void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); +void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, + int line); #ifdef SMP -void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); #endif -void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, +void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, + int line); +void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, + int line); +void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); -void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, - int line); +void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, + const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -void _mtx_assert(const struct mtx *m, int what, const char *file, int line); +void __mtx_assert(const volatile uintptr_t *c, int what, const char *file, + int line); #endif void thread_lock_flags_(struct thread *, int, const char *, int); @@ -121,6 +126,38 @@ void thread_lock_flags_(struct thread *, int, cons #define thread_unlock(tdp) \ mtx_unlock_spin((tdp)->td_lock) +/* + * Top-level macros to provide lock cookie once the actual mtx is passed. + * They will also prevent passing a malformed object to the mtx KPI by + * failing compilation. + */ +#define mtx_init(m, n, t, o) \ + _mtx_init(&(m)->mtx_lock, n, t, o) +#define mtx_destroy(m) \ + _mtx_destroy(&(m)->mtx_lock) +#define mtx_trylock_flags_(m, o, f, l) \ + _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) +#define _mtx_lock_sleep(m, t, o, f, l) \ + __mtx_lock_sleep(&(m)->mtx_lock, t, o, f, l) +#define _mtx_unlock_sleep(m, o, f, l) \ + __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) +#ifdef SMP +#define _mtx_lock_spin(m, t, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, t, o, f, l) +#endif +#define _mtx_lock_flags(m, o, f, l) \ + __mtx_lock_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_unlock_flags(m, o, f, l) \ + __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_lock_spin_flags(m, o, f, l) \ + __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_unlock_spin_flags(m, o, f, l) \ + __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) +#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) +#define _mtx_assert(m, w, f, l) \ + __mtx_assert(&(m)->mtx_lock, w, f, l) +#endif + #define mtx_recurse lock_object.lo_data /* Very simple operations on mtx_lock. */ @@ -409,7 +446,7 @@ struct mtx_args { SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ mtx_sysinit, &name##_args); \ SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ - mtx_destroy, (mtx)) + _mtx_destroy, __DEVOLATILE(void *, &(mtx)->mtx_lock)) /* * The INVARIANTS-enabled mtx_assert() functionality. Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 242333) +++ sys/kern/kern_mutex.c (working copy) @@ -83,6 +83,13 @@ PMC_SOFT_DEFINE( , , lock, failed); #endif /* + * Return the mutex address when the lock cookie address is provided. + * This functionality assumes that the lock cookie is the second member + * of the mtx struct after the first, mandatory, lock_object. + */ +#define mtxlock2mtx(c) (__containerof(c, struct mtx, mtx_lock)) + +/* * Internal utility macros. */ #define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) @@ -195,11 +202,15 @@ owner_mtx(const struct lock_object *lock, struct t * modules and can also be called from assembly language if needed. */ void -_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(!TD_IS_IDLETHREAD(curthread), ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); @@ -219,11 +230,15 @@ void } void -_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, @@ -241,11 +256,16 @@ void } void -_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, + int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, @@ -264,11 +284,16 @@ void } void -_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, + int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, @@ -288,8 +313,9 @@ void * is already owned, it will recursively acquire the lock. */ int -mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line) +_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; @@ -299,6 +325,8 @@ int if (SCHEDULER_STOPPED()) return (1); + m = mtxlock2mtx(c); + KASSERT(!TD_IS_IDLETHREAD(curthread), ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); @@ -330,15 +358,16 @@ int } /* - * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. + * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ void -_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, - int line) +__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, + const char *file, int line) { + struct mtx *m; struct turnstile *ts; uintptr_t v; #ifdef ADAPTIVE_MUTEXES @@ -360,6 +389,8 @@ void if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (mtx_owned(m)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", @@ -518,15 +549,16 @@ _mtx_lock_spin_failed(struct mtx *m) #ifdef SMP /* - * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. + * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ void -_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, - int line) +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, + const char *file, int line) { + struct mtx *m; int i = 0; #ifdef LOCK_PROFILING int contested = 0; @@ -536,6 +568,8 @@ void if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); @@ -684,19 +718,22 @@ thread_lock_set(struct thread *td, struct mtx *new } /* - * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. + * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed or contested (i.e. we * need to wake up a blocked thread). */ void -_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; struct turnstile *ts; if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (mtx_recursed(m)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); @@ -735,11 +772,15 @@ void */ #ifdef INVARIANT_SUPPORT void -_mtx_assert(const struct mtx *m, int what, const char *file, int line) +__mtx_assert(const volatile uintptr_t *c, int what, const char *file, int line) { + const struct mtx *m; if (panicstr != NULL || dumping) return; + + m = mtxlock2mtx(c); + switch (what) { case MA_OWNED: case MA_OWNED | MA_RECURSED: @@ -816,11 +857,14 @@ mtx_sysinit(void *arg) * witness. */ void -mtx_init(struct mtx *m, const char *name, const char *type, int opts) +_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) { + struct mtx *m; struct lock_class *class; int flags; + m = mtxlock2mtx(c); + MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, @@ -863,9 +907,12 @@ void * flags. */ void -mtx_destroy(struct mtx *m) +_mtx_destroy(volatile uintptr_t *c) { + struct mtx *m; + m = mtxlock2mtx(c); + if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else {