Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 242084) +++ sys/kern/kern_mutex.c (working copy) @@ -144,7 +144,7 @@ void assert_mtx(const struct lock_object *lock, int what) { - mtx_assert((const struct mtx *)lock, what); + mtx_assert(__DECONST(struct mtx *, lock), what); } void @@ -195,11 +195,15 @@ owner_mtx(const struct lock_object *lock, struct t * modules and can also be called from assembly language if needed. */ void -_mtx_lock_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(!TD_IS_IDLETHREAD(curthread), ("mtx_lock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); @@ -219,11 +223,15 @@ void } void -_mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_sleep, @@ -241,11 +249,16 @@ void } void -_mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, + int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_lock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, @@ -264,11 +277,16 @@ void } void -_mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, const char *file, + int line) { + struct mtx *m; if (SCHEDULER_STOPPED()) return; + + m = mtxlock2mtx(c); + KASSERT(m->mtx_lock != MTX_DESTROYED, ("mtx_unlock_spin() of destroyed mutex @ %s:%d", file, line)); KASSERT(LOCK_CLASS(&m->lock_object) == &lock_class_mtx_spin, @@ -288,8 +306,9 @@ void * is already owned, it will recursively acquire the lock. */ int -mtx_trylock_flags_(struct mtx *m, int opts, const char *file, int line) +_mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; #ifdef LOCK_PROFILING uint64_t waittime = 0; int contested = 0; @@ -299,6 +318,8 @@ int if (SCHEDULER_STOPPED()) return (1); + m = mtxlock2mtx(c); + KASSERT(!TD_IS_IDLETHREAD(curthread), ("mtx_trylock() by idle thread %p on sleep mutex %s @ %s:%d", curthread, m->lock_object.lo_name, file, line)); @@ -330,15 +351,16 @@ int } /* - * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. + * __mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. * * We call this if the lock is either contested (i.e. we need to go to * sleep waiting for it), or if we need to recurse on it. */ void -_mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, const char *file, - int line) +__mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, + const char *file, int line) { + struct mtx *m; struct turnstile *ts; uintptr_t v; #ifdef ADAPTIVE_MUTEXES @@ -360,6 +382,8 @@ void if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (mtx_owned(m)) { KASSERT((m->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_mtx_lock_sleep: recursed on non-recursive mutex %s @ %s:%d\n", @@ -518,15 +542,16 @@ _mtx_lock_spin_failed(struct mtx *m) #ifdef SMP /* - * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. + * _mtx_lock_spin_cookie: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ void -_mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, - int line) +_mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, + const char *file, int line) { + struct mtx *m; int i = 0; #ifdef LOCK_PROFILING int contested = 0; @@ -536,6 +561,8 @@ void if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); @@ -684,19 +711,22 @@ thread_lock_set(struct thread *td, struct mtx *new } /* - * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. + * __mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. * * We are only called here if the lock is recursed or contested (i.e. we * need to wake up a blocked thread). */ void -_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) +__mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, int line) { + struct mtx *m; struct turnstile *ts; if (SCHEDULER_STOPPED()) return; + m = mtxlock2mtx(c); + if (mtx_recursed(m)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); @@ -735,11 +765,15 @@ void */ #ifdef INVARIANT_SUPPORT void -_mtx_assert(const struct mtx *m, int what, const char *file, int line) +__mtx_assert(volatile uintptr_t *c, int what, const char *file, int line) { + struct mtx *m; if (panicstr != NULL || dumping) return; + + m = mtxlock2mtx(c); + switch (what) { case MA_OWNED: case MA_OWNED | MA_RECURSED: @@ -809,6 +843,13 @@ mtx_sysinit(void *arg) mtx_init(margs->ma_mtx, margs->ma_desc, NULL, margs->ma_opts); } +void +mtx_sysuninit(struct mtx *m) +{ + + mtx_destroy(m); +} + /* * Mutex initialization routine; initialize lock `m' of type contained in * `opts' with options contained in `opts' and name `name.' The optional @@ -816,11 +857,14 @@ mtx_sysinit(void *arg) * witness. */ void -mtx_init(struct mtx *m, const char *name, const char *type, int opts) +_mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts) { + struct mtx *m; struct lock_class *class; int flags; + m = mtxlock2mtx(c); + MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, @@ -863,9 +907,12 @@ void * flags. */ void -mtx_destroy(struct mtx *m) +_mtx_destroy(volatile uintptr_t *c) { + struct mtx *m; + m = mtxlock2mtx(c); + if (!mtx_owned(m)) MPASS(mtx_unowned(m)); else { Index: sys/sys/_mutex.h =================================================================== --- sys/sys/_mutex.h (revision 242084) +++ sys/sys/_mutex.h (working copy) @@ -39,4 +39,12 @@ struct mtx { volatile uintptr_t mtx_lock; /* Owner and flags. */ }; +/* + * Return the mutex address when the lock cookie address is provided. + * This functionality assumes that the lock cookie is the second member + * of the mtx struct after the first, mandatory, lock_object. + */ +#define mtxlock2mtx(c) \ + (__DEVOLATILE(struct mtx *, (c)) - sizeof(struct lock_object)) + #endif /* !_SYS__MUTEX_H_ */ Index: sys/sys/mutex.h =================================================================== --- sys/sys/mutex.h (revision 242084) +++ sys/sys/mutex.h (working copy) @@ -89,28 +89,35 @@ * [See below for descriptions] * */ -void mtx_init(struct mtx *m, const char *name, const char *type, int opts); -void mtx_destroy(struct mtx *m); +void _mtx_init(volatile uintptr_t *c, const char *name, const char *type, + int opts); +void _mtx_destroy(volatile uintptr_t *c); void mtx_sysinit(void *arg); -int mtx_trylock_flags_(struct mtx *m, int opts, const char *file, +void mtx_sysuninit(struct mtx *m); +int _mtx_trylock_flags_(volatile uintptr_t *c, int opts, const char *file, int line); void mutex_init(void); -void _mtx_lock_sleep(struct mtx *m, uintptr_t tid, int opts, +void __mtx_lock_sleep(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); -void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); +void __mtx_unlock_sleep(volatile uintptr_t *c, int opts, const char *file, + int line); #ifdef SMP -void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, +void _mtx_lock_spin_cookie(volatile uintptr_t *c, uintptr_t tid, int opts, const char *file, int line); #endif -void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, +void __mtx_lock_flags(volatile uintptr_t *c, int opts, const char *file, + int line); +void __mtx_unlock_flags(volatile uintptr_t *c, int opts, const char *file, + int line); +void __mtx_lock_spin_flags(volatile uintptr_t *c, int opts, const char *file, int line); -void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, - int line); +void __mtx_unlock_spin_flags(volatile uintptr_t *c, int opts, + const char *file, int line); #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -void _mtx_assert(const struct mtx *m, int what, const char *file, int line); +void __mtx_assert(volatile uintptr_t *c, int what, const char *file, + int line); + +#define _mtx_assert(m, w, f, l) __mtx_assert(&(m)->mtx_lock, w, f, l) #endif void thread_lock_flags_(struct thread *, int, const char *, int); @@ -121,6 +128,29 @@ void thread_lock_flags_(struct thread *, int, cons #define thread_unlock(tdp) \ mtx_unlock_spin((tdp)->td_lock) +#define mtx_init(m, n, t, o) \ + _mtx_init(&(m)->mtx_lock, n, t, o) +#define mtx_destroy(m) \ + _mtx_destroy(&(m)->mtx_lock) +#define mtx_trylock_flags_(m, o, f, l) \ + _mtx_trylock_flags_(&(m)->mtx_lock, o, f, l) +#define _mtx_lock_sleep(m, t, o, f, l) \ + __mtx_lock_sleep(&(m)->mtx_lock, t, o, f, l) +#define _mtx_unlock_sleep(m, o, f, l) \ + __mtx_unlock_sleep(&(m)->mtx_lock, o, f, l) +#ifdef SMP +#define _mtx_lock_spin(m, t, o, f, l) \ + _mtx_lock_spin_cookie(&(m)->mtx_lock, t, o, f, l) +#endif +#define _mtx_lock_flags(m, o, f, l) \ + __mtx_lock_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_unlock_flags(m, o, f, l) \ + __mtx_unlock_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_lock_spin_flags(m, o, f, l) \ + __mtx_lock_spin_flags(&(m)->mtx_lock, o, f, l) +#define _mtx_unlock_spin_flags(m, o, f, l) \ + __mtx_unlock_spin_flags(&(m)->mtx_lock, o, f, l) + #define mtx_recurse lock_object.lo_data /* Very simple operations on mtx_lock. */ @@ -409,7 +439,7 @@ struct mtx_args { SYSINIT(name##_mtx_sysinit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ mtx_sysinit, &name##_args); \ SYSUNINIT(name##_mtx_sysuninit, SI_SUB_LOCK, SI_ORDER_MIDDLE, \ - mtx_destroy, (mtx)) + mtx_sysuninit, (mtx)) /* * The INVARIANTS-enabled mtx_assert() functionality.