Index: sys/kern/kern_lock.c =================================================================== --- sys/kern/kern_lock.c (revision 193228) +++ sys/kern/kern_lock.c (working copy) @@ -51,8 +51,7 @@ #include #endif -CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) == - (LK_CANRECURSE | LK_NOSHARE)); +CTASSERT((LK_NOSHARE & LO_CLASSFLAGS) == LK_NOSHARE); #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 @@ -316,7 +315,9 @@ MPASS((flags & ~LK_INIT_MASK) == 0); - iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; + iflags = LO_SLEEPABLE | LO_UPGRADABLE; + if (flags & LK_CANRECURSE) + iflags |= LO_RECURSABLE; if ((flags & LK_NODUP) == 0) iflags |= LO_DUPOK; if (flags & LK_NOPROFILE) @@ -325,7 +326,7 @@ iflags |= LO_WITNESS; if (flags & LK_QUIET) iflags |= LO_QUIET; - iflags |= flags & (LK_CANRECURSE | LK_NOSHARE); + iflags |= flags & LK_NOSHARE; lk->lk_lock = LK_UNLOCKED; lk->lk_recurse = 0; @@ -530,7 +531,7 @@ */ if (lockmgr_xlocked(lk)) { if ((flags & LK_CANRECURSE) == 0 && - (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) { + (lk->lock_object.lo_flags & LO_RECURSABLE) == 0) { /* * If the lock is expected to not panic just Index: sys/kern/kern_rwlock.c =================================================================== --- sys/kern/kern_rwlock.c (revision 193228) +++ sys/kern/kern_rwlock.c (working copy) @@ -51,8 +51,6 @@ #include -CTASSERT((RW_RECURSE & LO_CLASSFLAGS) == RW_RECURSE); - #if defined(SMP) && !defined(NO_ADAPTIVE_RWLOCKS) #define ADAPTIVE_RWLOCKS #endif @@ -177,16 +175,17 @@ MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET | RW_RECURSE)) == 0); - flags = LO_UPGRADABLE | LO_RECURSABLE; + flags = LO_UPGRADABLE; if (opts & RW_DUPOK) flags |= LO_DUPOK; if (opts & RW_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & RW_NOWITNESS)) flags |= LO_WITNESS; + if (opts & RW_RECURSE) + flags |= LO_RECURSABLE; if (opts & RW_QUIET) flags |= LO_QUIET; - flags |= opts & RW_RECURSE; rw->rw_lock = RW_UNLOCKED; rw->rw_recurse = 0; @@ -249,7 +248,8 @@ KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_try_wlock() of destroyed rwlock @ %s:%d", file, line)); - if (rw_wlocked(rw) && (rw->lock_object.lo_flags & RW_RECURSE) != 0) { + if (rw_wlocked(rw) && + (rw->lock_object.lo_flags & LO_RECURSABLE) != 0) { rw->rw_recurse++; rval = 1; } else @@ -646,7 +646,7 @@ #endif if (rw_wlocked(rw)) { - KASSERT(rw->lock_object.lo_flags & RW_RECURSE, + KASSERT(rw->lock_object.lo_flags & LO_RECURSABLE, ("%s: recursing but non-recursive rw %s @ %s:%d\n", __func__, rw->lock_object.lo_name, file, line)); rw->rw_recurse++; Index: sys/kern/kern_sx.c =================================================================== --- sys/kern/kern_sx.c (revision 193228) +++ sys/kern/kern_sx.c (working copy) @@ -66,8 +66,7 @@ #define ADAPTIVE_SX #endif -CTASSERT(((SX_NOADAPTIVE | SX_RECURSE) & LO_CLASSFLAGS) == - (SX_NOADAPTIVE | SX_RECURSE)); +CTASSERT((SX_NOADAPTIVE & LO_CLASSFLAGS) == SX_NOADAPTIVE); /* Handy macros for sleep queues. */ #define SQ_EXCLUSIVE_QUEUE 0 @@ -207,17 +206,19 @@ MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK | SX_NOPROFILE | SX_NOADAPTIVE)) == 0); - flags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; + flags = LO_SLEEPABLE | LO_UPGRADABLE; if (opts & SX_DUPOK) flags |= LO_DUPOK; if (opts & SX_NOPROFILE) flags |= LO_NOPROFILE; if (!(opts & SX_NOWITNESS)) flags |= LO_WITNESS; + if (opts & SX_RECURSE) + flags |= LO_RECURSABLE; if (opts & SX_QUIET) flags |= LO_QUIET; - flags |= opts & (SX_NOADAPTIVE | SX_RECURSE); + flags |= opts & SX_NOADAPTIVE; sx->sx_lock = SX_LOCK_UNLOCKED; sx->sx_recurse = 0; lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags); @@ -305,7 +306,8 @@ KASSERT(sx->sx_lock != SX_LOCK_DESTROYED, ("sx_try_xlock() of destroyed sx @ %s:%d", file, line)); - if (sx_xlocked(sx) && (sx->lock_object.lo_flags & SX_RECURSE) != 0) { + if (sx_xlocked(sx) && + (sx->lock_object.lo_flags & LO_RECURSABLE) != 0) { sx->sx_recurse++; atomic_set_ptr(&sx->sx_lock, SX_LOCK_RECURSED); rval = 1; @@ -479,7 +481,7 @@ /* If we already hold an exclusive lock, then recurse. */ if (sx_xlocked(sx)) { - KASSERT((sx->lock_object.lo_flags & SX_RECURSE) != 0, + KASSERT((sx->lock_object.lo_flags & LO_RECURSABLE) != 0, ("_sx_xlock_hard: recursed on non-recursive sx %s @ %s:%d\n", sx->lock_object.lo_name, file, line)); sx->sx_recurse++; Index: sys/sys/vnode.h =================================================================== --- sys/sys/vnode.h (revision 193228) +++ sys/sys/vnode.h (working copy) @@ -419,7 +419,7 @@ #define VI_MTX(vp) (&(vp)->v_interlock) #define VN_LOCK_AREC(vp) \ - ((vp)->v_vnlock->lock_object.lo_flags |= LK_CANRECURSE) + ((vp)->v_vnlock->lock_object.lo_flags |= LO_RECURSABLE) #define VN_LOCK_ASHARE(vp) \ ((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE) Index: sys/ufs/ffs/ffs_softdep.c =================================================================== --- sys/ufs/ffs/ffs_softdep.c (revision 193228) +++ sys/ufs/ffs/ffs_softdep.c (working copy) @@ -556,8 +556,8 @@ #define ACQUIRE_LOCK(lk) mtx_lock(lk) #define FREE_LOCK(lk) mtx_unlock(lk) -#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LK_CANRECURSE) -#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LK_CANRECURSE) +#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LO_RECURSABLE) +#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LO_RECURSABLE) /* * Worklist queue management.