diff --git a/sys/kern/kern_lock.c b/sys/kern/kern_lock.c index 36a8470..18c82ba 100644 --- a/sys/kern/kern_lock.c +++ b/sys/kern/kern_lock.c @@ -1111,184 +1111,6 @@ __lockmgr_args(struct lock *lk, u_int flags, struct lock_object *ilk, } else wakeup_swapper = wakeupshlk(lk, file, line); break; - case LK_DRAIN: - if (LK_CAN_WITNESS(flags)) - WITNESS_CHECKORDER(&lk->lock_object, LOP_NEWORDER | - LOP_EXCLUSIVE, file, line, flags & LK_INTERLOCK ? - ilk : NULL); - - /* - * Trying to drain a lock we already own will result in a - * deadlock. - */ - if (lockmgr_xlocked(lk)) { - if (flags & LK_INTERLOCK) - class->lc_unlock(ilk); - panic("%s: draining %s with the lock held @ %s:%d\n", - __func__, iwmesg, file, line); - } - - while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { -#ifdef HWPMC_HOOKS - PMC_SOFT_CALL( , , lock, failed); -#endif - lock_profile_obtain_lock_failed(&lk->lock_object, - &contested, &waittime); - - /* - * If the lock is expected to not sleep just give up - * and return. - */ - if (LK_TRYOP(flags)) { - LOCK_LOG2(lk, "%s: %p fails the try operation", - __func__, lk); - error = EBUSY; - break; - } - - /* - * Acquire the sleepqueue chain lock because we - * probabilly will need to manipulate waiters flags. - */ - sleepq_lock(&lk->lock_object); - x = lk->lk_lock; - - /* - * if the lock has been released while we spun on - * the sleepqueue chain lock just try again. - */ - if (x == LK_UNLOCKED) { - sleepq_release(&lk->lock_object); - continue; - } - - v = x & (LK_ALL_WAITERS | LK_EXCLUSIVE_SPINNERS); - if ((x & ~v) == LK_UNLOCKED) { - v = (x & ~LK_EXCLUSIVE_SPINNERS); - - /* - * If interruptible sleeps left the exclusive - * queue empty avoid a starvation for the - * threads sleeping on the shared queue by - * giving them precedence and cleaning up the - * exclusive waiters bit anyway. - * Please note that lk_exslpfail count may be - * lying about the real number of waiters with - * the LK_SLEEPFAIL flag on because they may - * be used in conjuction with interruptible - * sleeps so lk_exslpfail might be considered - * an 'upper limit' bound, including the edge - * cases. - */ - if (v & LK_EXCLUSIVE_WAITERS) { - queue = SQ_EXCLUSIVE_QUEUE; - v &= ~LK_EXCLUSIVE_WAITERS; - } else { - - /* - * Exclusive waiters sleeping with - * LK_SLEEPFAIL on and using - * interruptible sleeps/timeout may - * have left spourious lk_exslpfail - * counts on, so clean it up anyway. - */ - MPASS(v & LK_SHARED_WAITERS); - lk->lk_exslpfail = 0; - queue = SQ_SHARED_QUEUE; - v &= ~LK_SHARED_WAITERS; - } - if (queue == SQ_EXCLUSIVE_QUEUE) { - realexslp = - sleepq_sleepcnt(&lk->lock_object, - SQ_EXCLUSIVE_QUEUE); - if (lk->lk_exslpfail >= realexslp) { - lk->lk_exslpfail = 0; - queue = SQ_SHARED_QUEUE; - v &= ~LK_SHARED_WAITERS; - if (realexslp != 0) { - LOCK_LOG2(lk, - "%s: %p has only LK_SLEEPFAIL sleepers", - __func__, lk); - LOCK_LOG2(lk, - "%s: %p waking up threads on the exclusive queue", - __func__, lk); - wakeup_swapper = - sleepq_broadcast( - &lk->lock_object, - SLEEPQ_LK, 0, - SQ_EXCLUSIVE_QUEUE); - } - } else - lk->lk_exslpfail = 0; - } - if (!atomic_cmpset_ptr(&lk->lk_lock, x, v)) { - sleepq_release(&lk->lock_object); - continue; - } - LOCK_LOG3(lk, - "%s: %p waking up all threads on the %s queue", - __func__, lk, queue == SQ_SHARED_QUEUE ? - "shared" : "exclusive"); - wakeup_swapper |= sleepq_broadcast( - &lk->lock_object, SLEEPQ_LK, 0, queue); - - /* - * If shared waiters have been woken up we need - * to wait for one of them to acquire the lock - * before to set the exclusive waiters in - * order to avoid a deadlock. - */ - if (queue == SQ_SHARED_QUEUE) { - for (v = lk->lk_lock; - (v & LK_SHARE) && !LK_SHARERS(v); - v = lk->lk_lock) - cpu_spinwait(); - } - } - - /* - * Try to set the LK_EXCLUSIVE_WAITERS flag. If we - * fail, loop back and retry. - */ - if ((x & LK_EXCLUSIVE_WAITERS) == 0) { - if (!atomic_cmpset_ptr(&lk->lk_lock, x, - x | LK_EXCLUSIVE_WAITERS)) { - sleepq_release(&lk->lock_object); - continue; - } - LOCK_LOG2(lk, "%s: %p set drain waiters flag", - __func__, lk); - } - - /* - * As far as we have been unable to acquire the - * exclusive lock and the exclusive waiters flag - * is set, we will sleep. - */ - if (flags & LK_INTERLOCK) { - class->lc_unlock(ilk); - flags &= ~LK_INTERLOCK; - } - GIANT_SAVE(); - sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, - SQ_EXCLUSIVE_QUEUE); - sleepq_wait(&lk->lock_object, ipri & PRIMASK); - GIANT_RESTORE(); - LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", - __func__, lk); - } - - if (error == 0) { - lock_profile_obtain_lock_success(&lk->lock_object, - contested, waittime, file, line); - LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, - lk->lk_recurse, file, line); - WITNESS_LOCK(&lk->lock_object, LOP_EXCLUSIVE | - LK_TRYWIT(flags), file, line); - TD_LOCKS_INC(curthread); - STACK_SAVE(lk); - } - break; default: if (flags & LK_INTERLOCK) class->lc_unlock(ilk); diff --git a/sys/sys/lockmgr.h b/sys/sys/lockmgr.h index ff0473d..fee2833 100644 --- a/sys/sys/lockmgr.h +++ b/sys/sys/lockmgr.h @@ -165,7 +165,7 @@ _lockmgr_args_rw(struct lock *lk, u_int flags, struct rwlock *ilk, */ #define LK_TYPE_MASK 0xFF0000 #define LK_DOWNGRADE 0x010000 -#define LK_DRAIN 0x020000 +#define LK_UNUSED 0x020000 /* was LK_DRAIN */ #define LK_EXCLOTHER 0x040000 #define LK_EXCLUSIVE 0x080000 #define LK_RELEASE 0x100000