Index: src/sys/kern/kern_lock.c @@ -58,6 +58,7 @@ (LK_ADAPTIVE | LK_CANRECURSE | LK_NOSHARE)); CTASSERT(LK_UNLOCKED == (LK_UNLOCKED & ~(LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS))); +CTASSERT((LK_DISOWNED & LO_CLASSFLAGS) == LK_DISOWNED); #define SQ_EXCLUSIVE_QUEUE 0 #define SQ_SHARED_QUEUE 1 @@ -127,7 +128,7 @@ ((f) & LK_SLEEPFAIL) == 0) #define lockmgr_disowned(lk) \ - (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) + (((lk)->lock_object.lo_flags & LK_DISOWNED) == LK_DISOWNED) #define lockmgr_xlocked(lk) \ (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) @@ -446,7 +447,7 @@ * changes. */ if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && - LK_HOLDER(x) != LK_KERNPROC) { + !lockmgr_disowned(lk)) { owner = (struct thread *)LK_HOLDER(x); if (LOCK_LOG_TEST(&lk->lock_object, 0)) CTR3(KTR_LOCK, @@ -517,7 +518,7 @@ * again. */ if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && - LK_HOLDER(x) != LK_KERNPROC) { + !lockmgr_disowned(lk)) { owner = (struct thread *)LK_HOLDER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&lk->lock_object); @@ -659,7 +660,7 @@ */ x = lk->lk_lock; if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && - LK_HOLDER(x) != LK_KERNPROC) { + !lockmgr_disowned(lk)) { owner = (struct thread *)LK_HOLDER(x); if (LOCK_LOG_TEST(&lk->lock_object, 0)) CTR3(KTR_LOCK, @@ -733,7 +734,7 @@ * again. */ if (LK_CAN_ADAPT(lk, flags) && (x & LK_SHARE) == 0 && - LK_HOLDER(x) != LK_KERNPROC) { + !lockmgr_disowned(lk)) { owner = (struct thread *)LK_HOLDER(x); if (TD_IS_RUNNING(owner)) { sleepq_release(&lk->lock_object); @@ -832,15 +833,17 @@ x = lk->lk_lock; if ((x & LK_SHARE) == 0) { + int anon = 0; /* * As first option, treact the lock as if it has not * any waiter. * Fix-up the tid var if the lock has been disowned. */ - if (LK_HOLDER(x) == LK_KERNPROC) - tid = LK_KERNPROC; - else { + if (lockmgr_disowned(lk)) { + tid = LK_HOLDER(lk->lk_lock); + anon++; + } else { WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_DEC(curthread); @@ -858,7 +861,9 @@ lk->lk_recurse--; break; } - if (tid != LK_KERNPROC) + if (anon) + lk->lock_object.lo_flags &= ~LK_DISOWNED; + else lock_profile_release_lock(&lk->lock_object); if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, @@ -1033,33 +1038,21 @@ void _lockmgr_disown(struct lock *lk, const char *file, int line) { - uintptr_t tid, x; + uintptr_t tid; tid = (uintptr_t)curthread; _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); /* - * If the owner is already LK_KERNPROC just skip the whole operation. + * If the lock has already been disowned just skip the whole operation. */ - if (LK_HOLDER(lk->lk_lock) != tid) + if (lockmgr_disowned(lk)) return; lock_profile_release_lock(&lk->lock_object); LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, line); WITNESS_UNLOCK(&lk->lock_object, LOP_EXCLUSIVE, file, line); TD_LOCKS_DEC(curthread); - - /* - * In order to preserve waiters flags, just spin. - */ - for (;;) { - x = lk->lk_lock; - MPASS((x & LK_EXCLUSIVE_SPINNERS) == 0); - x &= LK_ALL_WAITERS; - if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid | x, - LK_KERNPROC | x)) - return; - cpu_spinwait(); - } + lk->lock_object.lo_flags |= LK_DISOWNED; } void @@ -1102,7 +1095,7 @@ v = LK_HOLDER(x); if ((x & LK_SHARE) == 0) { - if (v == (uintptr_t)curthread || v == LK_KERNPROC) + if (v == (uintptr_t)curthread || lockmgr_disowned(lk)) ret = LK_EXCLUSIVE; else ret = LK_EXCLOTHER; @@ -1225,12 +1218,11 @@ db_printf("SLOCK: %ju\n", (uintmax_t)LK_SHARERS(lk->lk_lock)); else { td = lockmgr_xholder(lk); - if (td == (struct thread *)LK_KERNPROC) - db_printf("XLOCK: LK_KERNPROC\n"); - else - db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, - td->td_tid, td->td_proc->p_pid, - td->td_proc->p_comm); + db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, + td->td_tid, td->td_proc->p_pid, + td->td_proc->p_comm); + if (lockmgr_disowned(lk)) + db_printf(" disowned\n"); if (lockmgr_recursed(lk)) db_printf(" recursed: %d\n", lk->lk_recurse); } Index: src/sys/sys/lockmgr.h @@ -38,6 +38,7 @@ #include #endif +/* Stored in the lock_object lock field */ #define LK_SHARE 0x01 #define LK_SHARED_WAITERS 0x02 #define LK_EXCLUSIVE_WAITERS 0x04 @@ -53,7 +54,9 @@ #define LK_SHARERS_LOCK(x) ((x) << LK_SHARERS_SHIFT | LK_SHARE) #define LK_ONE_SHARER (1 << LK_SHARERS_SHIFT) #define LK_UNLOCKED LK_SHARERS_LOCK(0) -#define LK_KERNPROC ((uintptr_t)(-1) & ~LK_FLAGMASK) + +/* Stored in the lock_object class flags */ +#define LK_DISOWNED 0x10 struct lock { struct lock_object lock_object;