Index: kern/kern_lock.c =================================================================== --- kern/kern_lock.c (revision 196271) +++ kern/kern_lock.c (working copy) @@ -334,6 +334,9 @@ int iflags; MPASS((flags & ~LK_INIT_MASK) == 0); + ASSERT_ATOMIC_LOAD_PTR(lk->lk_lock, + ("%s: lockmgr not aligned for %s: %p", __func__, wmesg, + &lk->lk_lock)); iflags = LO_SLEEPABLE | LO_UPGRADABLE; if (flags & LK_CANRECURSE) Index: kern/kern_mutex.c =================================================================== --- kern/kern_mutex.c (revision 196271) +++ kern/kern_mutex.c (working copy) @@ -783,8 +783,9 @@ MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE | MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0); - ASSERT_ATOMIC_LOAD(m->mtx_lock, ("%s: mtx_lock not aligned for %s: %p", - __func__, name, &m->mtx_lock)); + ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock, + ("%s: mtx_lock not aligned for %s: %p", __func__, name, + &m->mtx_lock)); #ifdef MUTEX_DEBUG /* Diagnostic and error correction */ Index: kern/kern_rwlock.c =================================================================== --- kern/kern_rwlock.c (revision 196271) +++ kern/kern_rwlock.c (working copy) @@ -174,8 +174,9 @@ MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | RW_RECURSE)) == 0); - ASSERT_ATOMIC_LOAD(rw->rw_lock, ("%s: rw_lock not aligned for %s: %p", - __func__, name, &rw->rw_lock)); + ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock, + ("%s: rw_lock not aligned for %s: %p", __func__, name, + &rw->rw_lock)); flags = LO_UPGRADABLE; if (opts & RW_DUPOK) Index: kern/kern_sx.c =================================================================== --- kern/kern_sx.c (revision 196271) +++ kern/kern_sx.c (working copy) @@ -205,8 +205,9 @@ MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | SX_NOPROFILE | SX_NOADAPTIVE)) == 0); - ASSERT_ATOMIC_LOAD(sx->sx_lock, ("%s: sx_lock not aligned for %s: %p", - __func__, description, &sx->sx_lock)); + ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock, + ("%s: sx_lock not aligned for %s: %p", __func__, description, + &sx->sx_lock)); flags = LO_SLEEPABLE | LO_UPGRADABLE; if (opts & SX_DUPOK) Index: sys/systm.h =================================================================== --- sys/systm.h (revision 196271) +++ sys/systm.h (working copy) @@ -89,9 +89,18 @@ #define __CTASSERT(x, y) typedef char __assert ## y[(x) ? 1 : -1] #endif -#define ASSERT_ATOMIC_LOAD(var,msg) \ - KASSERT(sizeof(var) <= sizeof(uintptr_t) && \ - ALIGN(&(var)) == (uintptr_t)&(var), msg) +/* + * Assert that a pointer can be loaded atomically with a direct memory + * access, even if the value is stale. + * + * On some architecture this condition is stronger than necessary as + * the atomic operation can be invalidated only by overflowing + * cache-lines boundaries, but this is a more robust and portable + * definition. + */ +#define ASSERT_ATOMIC_LOAD_PTR(var, msg) \ + KASSERT(sizeof(var) == sizeof(void *) && \ + ((uintptr_t)&(var) & (sizeof(void *) - 1)) == 0, msg) /* * XXX the hints declarations are even more misplaced than most declarations