--- //depot/vendor/freebsd/src/sys/fs/unionfs/union_subr.c 2008/01/24 12:35:50 +++ //depot/user/attilio/attilio_lockmgr/fs/unionfs/union_subr.c 2008/03/26 17:22:37 @@ -538,12 +538,13 @@ unionfs_node_update(struct unionfs_node *unp, struct vnode *uvp, struct thread *td) { - int count, lockcnt; + unsigned count, lockrec; struct vnode *vp; struct vnode *lvp; vp = UNIONFSTOV(unp); lvp = unp->un_lowervp; + ASSERT_VOP_ELOCKED(lvp, "unionfs_node_update"); /* * lock update @@ -551,11 +552,9 @@ VI_LOCK(vp); unp->un_uppervp = uvp; vp->v_vnlock = uvp->v_vnlock; - lockcnt = lvp->v_vnlock->lk_exclusivecount; - if (lockcnt <= 0) - panic("unionfs: no exclusive lock"); VI_UNLOCK(vp); - for (count = 1; count < lockcnt; count++) + lockrec = lvp->v_vnlock->lk_recurse; + for (count = 0; count < lockrec; count++) vn_lock(uvp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY); } --- //depot/vendor/freebsd/src/sys/kern/kern_lock.c 2008/03/01 19:51:07 +++ //depot/user/attilio/attilio_lockmgr/kern/kern_lock.c 2008/04/02 18:33:00 @@ -1,692 +1,869 @@ -/*- - * Copyright (c) 1995 - * The Regents of the University of California. All rights reserved. - * - * Copyright (C) 1997 - * John S. Dyson. All rights reserved. - * - * This code contains ideas from software contributed to Berkeley by - * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating - * System project at Carnegie-Mellon University. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)kern_lock.c 8.18 (Berkeley) 5/21/95 - */ +#include "opt_ddb.h" #include -__FBSDID("$FreeBSD: src/sys/kern/kern_lock.c,v 1.127 2008/03/01 19:47:49 attilio Exp $"); - -#include "opt_ddb.h" -#include "opt_global.h" #include -#include -#include #include #include +#include #include #include #include -#include -#include +#include #ifdef DEBUG_LOCKS #include #endif +#include + +#ifdef DDB +#include +#endif + +CTASSERT(((LK_CANRECURSE | LK_NOSHARE) & LO_CLASSFLAGS) == + (LK_CANRECURSE | LK_NOSHARE)); + +#define SQ_EXCLUSIVE_QUEUE 0 +#define SQ_SHARED_QUEUE 1 + +#ifndef INVARIANTS +#define _lockmgr_assert(lk, what, file, line) +#define TD_LOCKS_INC(td) +#define TD_LOCKS_DEC(td) +#else +#define TD_LOCKS_INC(td) ((td)->td_locks++) +#define TD_LOCKS_DEC(td) ((td)->td_locks--) +#endif + +#ifndef DEBUG_LOCKS +#define STACK_PRINT(lk) +#define STACK_SAVE(lk) +#define STACK_ZERO(lk) +#else +#define STACK_PRINT(lk) stack_print_ddb(&(lk)->lk_stack) +#define STACK_SAVE(lk) stack_save(&(lk)->lk_stack) +#define STACK_ZERO(lk) stack_zero(&(lk)->lk_stack) +#endif + +#define LOCK_LOG2(lk, string, arg1, arg2) \ + if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ + CTR2(KTR_LOCK, (string), (arg1), (arg2)) +#define LOCK_LOG3(lk, string, arg1, arg2, arg3) \ + if (LOCK_LOG_TEST(&(lk)->lock_object, 0)) \ + CTR3(KTR_LOCK, (string), (arg1), (arg2), (arg3)) + +#define lockmgr_disowned(lk) \ + (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == LK_KERNPROC) -#define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT) -#define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0) -#define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0) -#define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC) +#define lockmgr_xlocked(lk) \ + (((lk)->lk_lock & ~(LK_FLAGMASK & ~LK_SHARE)) == (uintptr_t)curthread) -static void assert_lockmgr(struct lock_object *lock, int what); +static void assert_lockmgr(struct lock_object *lock, int how); #ifdef DDB -#include -static void db_show_lockmgr(struct lock_object *lock); +static void db_show_lockmgr(struct lock_object *lock); #endif -static void lock_lockmgr(struct lock_object *lock, int how); -static int unlock_lockmgr(struct lock_object *lock); +static void lock_lockmgr(struct lock_object *lock, int how); +static int unlock_lockmgr(struct lock_object *lock); struct lock_class lock_class_lockmgr = { .lc_name = "lockmgr", - .lc_flags = LC_SLEEPLOCK | LC_SLEEPABLE | LC_RECURSABLE | LC_UPGRADABLE, + .lc_flags = LC_RECURSABLE | LC_SLEEPABLE | LC_SLEEPLOCK | LC_UPGRADABLE, .lc_assert = assert_lockmgr, #ifdef DDB .lc_ddb_show = db_show_lockmgr, #endif .lc_lock = lock_lockmgr, - .lc_unlock = unlock_lockmgr, + .lc_unlock = unlock_lockmgr }; -#ifndef INVARIANTS -#define _lockmgr_assert(lkp, what, file, line) -#endif +static __inline struct thread * +lockmgr_xholder(struct lock *lk) +{ + uintptr_t x; + + x = lk->lk_lock; + return ((x & LK_SHARE) ? NULL : (struct thread *)LK_HOLDER(x)); +} /* - * Locking primitives implementation. - * Locks provide shared/exclusive sychronization. + * It assumes sleepq_lock held and returns with this one unheld. + * If LK_INTERLOCK is specified the interlock is not reacquired after the + * sleep. */ +static __inline int +sleeplk(struct lock *lk, u_int flags, struct mtx *ilk, const char *wmesg, + int pri, int timo, int queue) +{ + int catch, error; + + catch = (pri) ? (pri & PCATCH) : 0; + pri &= PRIMASK; + error = 0; + + LOCK_LOG3(lk, "%s: %p blocking on the %s sleepqueue", __func__, lk, + (queue == SQ_EXCLUSIVE_QUEUE) ? "exclusive" : "shared"); + + if (flags & LK_INTERLOCK) + mtx_unlock(ilk); + DROP_GIANT(); + sleepq_add(&lk->lock_object, NULL, wmesg, SLEEPQ_LK | (catch ? + SLEEPQ_INTERRUPTIBLE : 0), queue); + if ((flags & LK_TIMELOCK) && timo) + sleepq_set_timeout(&lk->lock_object, timo); + if (queue == SQ_EXCLUSIVE_QUEUE) + lk->lk_xwaiters++; + + /* + * Decisional switch for real sleeping. + */ + if ((flags & LK_TIMELOCK) && timo && catch) + error = sleepq_timedwait_sig(&lk->lock_object, pri); + else if ((flags & LK_TIMELOCK) && timo) + error = sleepq_timedwait(&lk->lock_object, pri); + else if (catch) + error = sleepq_wait_sig(&lk->lock_object, pri); + else + sleepq_wait(&lk->lock_object, pri); + PICKUP_GIANT(); + if ((flags & LK_SLEEPFAIL) && error == 0) + error = ENOLCK; + + return (error); +} + +static __inline void +wakeupshlk(struct lock *lk, const char *file, int line) +{ + uintptr_t x; + + TD_LOCKS_DEC(curthread); + LOCK_LOG_LOCK("SUNLOCK", &lk->lock_object, 0, 0, file, line); + + for (;;) { + x = lk->lk_lock; + MPASS((x & (LK_SHARE | LK_SHARED_WAITERS)) == LK_SHARE); + + /* + * If there is more than one shared lock held, just drop one + * and return. + */ + if (LK_SHARERS(x) > 1) { + if (atomic_cmpset_ptr(&lk->lk_lock, x, + x - LK_ONE_SHARER)) + break; + continue; + } + + /* + * If there are not waiters on the exclusive queue, drop the + * lock quickly. + */ + if ((x & LK_EXCLUSIVE_WAITERS) == 0) { + MPASS(x == LK_SHARERS_LOCK(1)); + if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1), + LK_UNLOCKED)) + break; + continue; + } + MPASS(x == (LK_SHARERS_LOCK(1) | LK_EXCLUSIVE_WAITERS)); + + /* + * We should have a sharer with not shared waiters, so enter + * the hard path for handling wakeups correctly. + */ + sleepq_lock(&lk->lock_object); + MPASS(lk->lk_lock & LK_EXCLUSIVE_WAITERS); + x = LK_EXCLUSIVE_WAITERS; + + /* + * If the lock has exclusive waiters, just wake up one in order + * to avoid a thundering herd problems, which has been + * sometimes reported in main lockmgr consumers like vnode + * locks. + */ + if ((--lk->lk_xwaiters) == 0) + x = 0; + if (!atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | + LK_EXCLUSIVE_WAITERS, LK_UNLOCKED | x)) { + lk->lk_xwaiters++; + sleepq_release(&lk->lock_object); + continue; + } + + LOCK_LOG2(lk, + "%s: %p waking up a thread on the exclusive queue", + __func__, lk); + sleepq_signal(&lk->lock_object, SLEEPQ_LK, 0, + SQ_EXCLUSIVE_QUEUE); + sleepq_release(&lk->lock_object); + break; + } -void + lock_profile_release_lock(&lk->lock_object); +} + +static void assert_lockmgr(struct lock_object *lock, int what) { panic("lockmgr locks do not support assertions"); } -void +static void lock_lockmgr(struct lock_object *lock, int how) { panic("lockmgr locks do not support sleep interlocking"); } -int +static int unlock_lockmgr(struct lock_object *lock) { panic("lockmgr locks do not support sleep interlocking"); } -#define COUNT(td, x) ((td)->td_locks += (x)) -#define LK_ALL (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | \ - LK_SHARE_NONZERO | LK_WAIT_NONZERO) +void +lockinit(struct lock *lk, int pri, const char *wmesg, int timo, int flags) +{ + int iflags; + + MPASS((flags & ~LK_INIT_MASK) == 0); -static int acquire(struct lock **lkpp, int extflags, int wanted, - const char *wmesg, int prio, int timo, int *contested, uint64_t *waittime); -static int acquiredrain(struct lock *lkp, int extflags, const char *wmesg, - int prio, int timo); + iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; + if ((flags & LK_NODUP) == 0) + iflags |= LO_DUPOK; + if (flags & LK_NOPROFILE) + iflags |= LO_NOPROFILE; + if ((flags & LK_NOWITNESS) == 0) + iflags |= LO_WITNESS; + if (flags & LK_QUIET) + iflags |= LO_QUIET; + iflags |= flags & (LK_CANRECURSE | LK_NOSHARE); -static __inline void -sharelock(struct thread *td, struct lock *lkp, int incr) { - lkp->lk_flags |= LK_SHARE_NONZERO; - lkp->lk_sharecount += incr; - COUNT(td, incr); + lk->lk_lock = LK_UNLOCKED; + lk->lk_recurse = 0; + lk->lk_xwaiters = 0; + lk->lk_timo = timo; + lk->lk_pri = pri; + lock_init(&lk->lock_object, &lock_class_lockmgr, wmesg, NULL, iflags); + STACK_ZERO(lk); } -static __inline void -shareunlock(struct thread *td, struct lock *lkp, int decr) { +void +lockdestroy(struct lock *lk) +{ - KASSERT(lkp->lk_sharecount >= decr, ("shareunlock: count < decr")); - - COUNT(td, -decr); - if (lkp->lk_sharecount == decr) { - lkp->lk_flags &= ~LK_SHARE_NONZERO; - if (lkp->lk_flags & (LK_WANT_UPGRADE | LK_WANT_EXCL)) { - wakeup(lkp); - } - lkp->lk_sharecount = 0; - } else { - lkp->lk_sharecount -= decr; - } + KASSERT(lk->lk_lock == LK_UNLOCKED, ("lockmgr still held")); + KASSERT(lk->lk_recurse == 0, ("lockmgr still recursed")); + KASSERT(lk->lk_xwaiters == 0, ("lockmgr has still exclusive waiters")); + lock_destroy(&lk->lock_object); } -static int -acquire(struct lock **lkpp, int extflags, int wanted, const char *wmesg, - int prio, int timo, int *contested, uint64_t *waittime) +int +_lockmgr_args(struct lock *lk, u_int flags, struct mtx *ilk, const char *wmesg, + int pri, int timo, const char *file, int line) { - struct lock *lkp = *lkpp; + uint64_t waittime; const char *iwmesg; - int error, iprio, itimo; + uintptr_t tid, v, x; + u_int op; + int contested, error, ipri, itimo; - iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg; - iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio; - itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo; + contested = 0; + error = 0; + waittime = 0; + tid = (uintptr_t)curthread; + op = (flags & LK_TYPE_MASK); + iwmesg = (wmesg == LK_WMESG_DEFAULT) ? lk->lock_object.lo_name : wmesg; + ipri = (pri == LK_PRIO_DEFAULT) ? lk->lk_pri : pri; + itimo = (timo == LK_TIMO_DEFAULT) ? lk->lk_timo : timo; - CTR3(KTR_LOCK, - "acquire(): lkp == %p, extflags == 0x%x, wanted == 0x%x", - lkp, extflags, wanted); + MPASS((flags & ~LK_TOTAL_MASK) == 0); + KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0 || + (op != LK_DOWNGRADE && op != LK_RELEASE), + ("%s: Invalid flags in regard of the operation desired @ %s:%d", + __func__, file, line)); - if ((extflags & LK_NOWAIT) && (lkp->lk_flags & wanted)) - return EBUSY; - error = 0; - if ((lkp->lk_flags & wanted) != 0) - lock_profile_obtain_lock_failed(&lkp->lk_object, contested, waittime); - - while ((lkp->lk_flags & wanted) != 0) { - CTR2(KTR_LOCK, - "acquire(): lkp == %p, lk_flags == 0x%x sleeping", - lkp, lkp->lk_flags); - lkp->lk_flags |= LK_WAIT_NONZERO; - lkp->lk_waitcount++; - error = msleep(lkp, lkp->lk_interlock, iprio, iwmesg, - ((extflags & LK_TIMELOCK) ? itimo : 0)); - lkp->lk_waitcount--; - if (lkp->lk_waitcount == 0) - lkp->lk_flags &= ~LK_WAIT_NONZERO; - if (error) - break; - if (extflags & LK_SLEEPFAIL) { - error = ENOLCK; - break; - } - if (lkp->lk_newlock != NULL) { - mtx_lock(lkp->lk_newlock->lk_interlock); - mtx_unlock(lkp->lk_interlock); - if (lkp->lk_waitcount == 0) - wakeup((void *)(&lkp->lk_newlock)); - *lkpp = lkp = lkp->lk_newlock; - } + if (panicstr != NULL) { + if (flags & LK_INTERLOCK) + mtx_unlock(ilk); + return (0); } - mtx_assert(lkp->lk_interlock, MA_OWNED); - return (error); -} -/* - * Set, change, or release a lock. - * - * Shared requests increment the shared count. Exclusive requests set the - * LK_WANT_EXCL flag (preventing further shared locks), and wait for already - * accepted shared locks and shared-to-exclusive upgrades to go away. - */ -int -_lockmgr_args(struct lock *lkp, u_int flags, struct mtx *interlkp, - const char *wmesg, int prio, int timo, char *file, int line) + if (op == LK_SHARED && (lk->lock_object.lo_flags & LK_NOSHARE)) + op = LK_EXCLUSIVE; -{ - struct thread *td; - int error; - int extflags, lockflags; - int contested = 0; - uint64_t waitstart = 0; + switch (op) { + case LK_SHARED: + for (;;) { + x = lk->lk_lock; - error = 0; - td = curthread; + /* + * If no other thread has an exclusive lock, try + * to bump the count of sharers. Since we have to + * preserve the state of waiters, if we fail to + * acquire the shared lock loop back and retry. + */ + if (x & LK_SHARE) { + MPASS((x & LK_SHARED_WAITERS) == 0); + if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, + x + LK_ONE_SHARER)) + break; + continue; + } + lock_profile_obtain_lock_failed(&lk->lock_object, + &contested, &waittime); -#ifdef INVARIANTS - if (lkp->lk_flags & LK_DESTROYED) { - if (flags & LK_INTERLOCK) - mtx_unlock(interlkp); - if (panicstr != NULL) - return (0); - panic("%s: %p lockmgr is destroyed", __func__, lkp); - } -#endif - mtx_lock(lkp->lk_interlock); - CTR6(KTR_LOCK, - "lockmgr(): lkp == %p (lk_wmesg == \"%s\"), owner == %p, exclusivecount == %d, flags == 0x%x, " - "td == %p", lkp, (wmesg != LK_WMESG_DEFAULT) ? wmesg : - lkp->lk_wmesg, lkp->lk_lockholder, lkp->lk_exclusivecount, flags, - td); -#ifdef DEBUG_LOCKS - { - struct stack stack; /* XXX */ - stack_save(&stack); - CTRSTACK(KTR_LOCK, &stack, 0, 1); - } -#endif + /* + * If the lock is alredy held by curthread in + * exclusive way avoid a deadlock. + */ + if (LK_HOLDER(x) == tid) { + LOCK_LOG2(lk, + "%s: %p alredy held in exclusive mode", + __func__, lk); + error = EDEADLK; + break; + } - if (flags & LK_INTERLOCK) { - mtx_assert(interlkp, MA_OWNED | MA_NOTRECURSED); - mtx_unlock(interlkp); - } + /* + * If the lock is expected to not sleep just give up + * and return. + */ + if (flags & LK_NOWAIT) { + LOCK_LOG2(lk, "%s: %p fails the try operation", + __func__, lk); + error = EBUSY; + break; + } - if ((flags & (LK_NOWAIT|LK_RELEASE)) == 0) - WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, - &lkp->lk_interlock->lock_object, - "Acquiring lockmgr lock \"%s\"", - (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg); + /* + * Acquire the sleepqueue chain lock because we + * probabilly will need to manipulate waiters flags. + */ + sleepq_lock(&lk->lock_object); + x = lk->lk_lock; - if (panicstr != NULL) { - mtx_unlock(lkp->lk_interlock); - return (0); - } - if ((lkp->lk_flags & LK_NOSHARE) && - (flags & LK_TYPE_MASK) == LK_SHARED) { - flags &= ~LK_TYPE_MASK; - flags |= LK_EXCLUSIVE; - } - extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; + /* + * if the lock has been released while we spun on + * the sleepqueue chain lock just try again. + */ + if (x & LK_SHARE) { + sleepq_release(&lk->lock_object); + continue; + } - switch (flags & LK_TYPE_MASK) { + /* + * Try to set the LK_SHARED_WAITERS flag. If we fail, + * loop back and retry. + */ + if ((x & LK_SHARED_WAITERS) == 0) { + if (!atomic_cmpset_acq_ptr(&lk->lk_lock, x, + x | LK_SHARED_WAITERS)) { + sleepq_release(&lk->lock_object); + continue; + } + LOCK_LOG2(lk, "%s: %p set shared waiters flag", + __func__, lk); + } - case LK_SHARED: - if (!LOCKMGR_TRYOP(extflags)) - WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER, file, + /* + * As far as we have been unable to acquire the + * shared lock and the shared waiters flag is set, + * we will sleep. + */ + error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, + SQ_SHARED_QUEUE); + flags &= ~LK_INTERLOCK; + if (error) { + LOCK_LOG3(lk, + "%s: interrupted sleep for %p with %d", + __func__, lk, error); + break; + } + LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", + __func__, lk); + } + if (error == 0) { + lock_profile_obtain_lock_success(&lk->lock_object, + contested, waittime, file, line); + LOCK_LOG_LOCK("SLOCK", &lk->lock_object, 0, 0, file, line); + TD_LOCKS_INC(curthread); + STACK_SAVE(lk); + } + break; + case LK_UPGRADE: + _lockmgr_assert(lk, KA_SLOCKED, file, line); + x = lk->lk_lock & LK_EXCLUSIVE_WAITERS; + /* - * If we are not the exclusive lock holder, we have to block - * while there is an exclusive lock holder or while an - * exclusive lock request or upgrade request is in progress. - * - * However, if TDP_DEADLKTREAT is set, we override exclusive - * lock requests or upgrade requests ( but not the exclusive - * lock itself ). + * Try to switch from one shared lock to an exclusive one. + * We need to maintain drain and exclusive waiters flags if + * set so that we will wke up them once the lock is dropped. */ - if (lkp->lk_lockholder != td) { - lockflags = LK_HAVE_EXCL; - if (!(td->td_pflags & TDP_DEADLKTREAT)) - lockflags |= LK_WANT_EXCL | LK_WANT_UPGRADE; - error = acquire(&lkp, extflags, lockflags, wmesg, - prio, timo, &contested, &waitstart); - if (error) - break; - sharelock(td, lkp, 1); - if (lkp->lk_sharecount == 1) - lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); - WITNESS_LOCK(&lkp->lk_object, LOCKMGR_TRYW(extflags), - file, line); - -#if defined(DEBUG_LOCKS) - stack_save(&lkp->lk_stack); -#endif + if (atomic_cmpset_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | x, + tid | x)) { + LOCK_LOG_LOCK("XUPGRADE", &lk->lock_object, 0, 0, file, + line); break; } + /* - * We hold an exclusive lock, so downgrade it to shared. - * An alternative would be to fail with EDEADLK. + * We have been unable to succeed in upgrading, so just + * give up the shared lock. */ - /* FALLTHROUGH downgrade */ + wakeupshlk(lk, file, line); - case LK_DOWNGRADE: - _lockmgr_assert(lkp, KA_XLOCKED, file, line); - sharelock(td, lkp, lkp->lk_exclusivecount); - WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line); - COUNT(td, -lkp->lk_exclusivecount); - lkp->lk_exclusivecount = 0; - lkp->lk_flags &= ~LK_HAVE_EXCL; - lkp->lk_lockholder = LK_NOPROC; - if (lkp->lk_waitcount) - wakeup((void *)lkp); - break; + /* FALLTHROUGH */ + case LK_EXCLUSIVE: - case LK_UPGRADE: /* - * Upgrade a shared lock to an exclusive one. If another - * shared lock has already requested an upgrade to an - * exclusive lock, our shared lock is released and an - * exclusive lock is requested (which will be granted - * after the upgrade). If we return an error, the file - * will always be unlocked. + * If curthread alredy holds the lock and this one is + * allowed to recurse, simply recurse on it. */ - _lockmgr_assert(lkp, KA_SLOCKED, file, line); - shareunlock(td, lkp, 1); - if (lkp->lk_sharecount == 0) - lock_profile_release_lock(&lkp->lk_object); - /* - * If we are just polling, check to see if we will block. - */ - if ((extflags & LK_NOWAIT) && - ((lkp->lk_flags & LK_WANT_UPGRADE) || - lkp->lk_sharecount > 1)) { - error = EBUSY; - WITNESS_UNLOCK(&lkp->lk_object, 0, file, line); + if (lockmgr_xlocked(lk)) { + if ((flags & LK_CANRECURSE) == 0 && + (lk->lock_object.lo_flags & LK_CANRECURSE) == 0) { + + /* + * If the lock is expected to not panic just + * give up and return. + */ + if (flags & LK_NOWAIT) { + LOCK_LOG2(lk, + "%s: %p fails the try operation", + __func__, lk); + error = EBUSY; + break; + } + if (flags & LK_INTERLOCK) + mtx_unlock(ilk); + panic("%s: recursing on non recursive lockmgr %s @ %s:%d\n", + __func__, iwmesg, file, line); + } + lk->lk_recurse++; + LOCK_LOG2(lk, "%s: %p recursing", __func__, lk); + LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, + lk->lk_recurse, file, line); + TD_LOCKS_INC(curthread); break; } - if ((lkp->lk_flags & LK_WANT_UPGRADE) == 0) { + + while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, + tid)) { + lock_profile_obtain_lock_failed(&lk->lock_object, + &contested, &waittime); + + /* + * If the lock is expected to not sleep just give up + * and return. + */ + if (flags & LK_NOWAIT) { + LOCK_LOG2(lk, "%s: %p fails the try operation", + __func__, lk); + error = EBUSY; + break; + } + + /* + * Acquire the sleepqueue chain lock because we + * probabilly will need to manipulate waiters flags. + */ + sleepq_lock(&lk->lock_object); + x = lk->lk_lock; + + /* + * if the lock has been released while we spun on + * the sleepqueue chain lock just try again. + */ + if (x == LK_UNLOCKED) { + sleepq_release(&lk->lock_object); + continue; + } + /* - * We are first shared lock to request an upgrade, so - * request upgrade and wait for the shared count to - * drop to zero, then take exclusive lock. + * The lock can be in the state where there is a + * pending queue of waiters, but still no owner. + * This happens when the lock is contested and an + * owner is going to claim the lock. + * If curthread is the one successfully acquiring it + * claim lock ownership and return, preserving waiters + * flags. */ - lkp->lk_flags |= LK_WANT_UPGRADE; - error = acquire(&lkp, extflags, LK_SHARE_NONZERO, wmesg, - prio, timo, &contested, &waitstart); - lkp->lk_flags &= ~LK_WANT_UPGRADE; + if (x == (LK_UNLOCKED | LK_EXCLUSIVE_WAITERS)) { + MPASS(lk->lk_xwaiters != 0); + if (atomic_cmpset_acq_ptr(&lk->lk_lock, x, + tid | LK_EXCLUSIVE_WAITERS)) { + sleepq_release(&lk->lock_object); + LOCK_LOG2(lk, + "%s: %p claimed by a new writer", + __func__, lk); + break; + } + sleepq_release(&lk->lock_object); + continue; + } - if (error) { - if ((lkp->lk_flags & ( LK_WANT_EXCL | LK_WAIT_NONZERO)) == (LK_WANT_EXCL | LK_WAIT_NONZERO)) - wakeup((void *)lkp); - WITNESS_UNLOCK(&lkp->lk_object, 0, file, line); - break; + /* + * Try to set the LK_EXCLUSIVE_WAITERS flag. If we + * fail, loop back and retry. + */ + if ((x & LK_EXCLUSIVE_WAITERS) == 0) { + if (!atomic_cmpset_ptr(&lk->lk_lock, x, + x | LK_EXCLUSIVE_WAITERS)) { + sleepq_release(&lk->lock_object); + continue; + } + LOCK_LOG2(lk, "%s: %p set excl waiters flag", + __func__, lk); } - if (lkp->lk_exclusivecount != 0) - panic("lockmgr: non-zero exclusive count"); - lkp->lk_flags |= LK_HAVE_EXCL; - lkp->lk_lockholder = td; - lkp->lk_exclusivecount = 1; - WITNESS_UPGRADE(&lkp->lk_object, LOP_EXCLUSIVE | - LOP_TRYLOCK, file, line); - COUNT(td, 1); - lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); -#if defined(DEBUG_LOCKS) - stack_save(&lkp->lk_stack); -#endif - break; - } - /* - * Someone else has requested upgrade. Release our shared - * lock, awaken upgrade requestor if we are the last shared - * lock, then request an exclusive lock. - */ - WITNESS_UNLOCK(&lkp->lk_object, 0, file, line); - if ( (lkp->lk_flags & (LK_SHARE_NONZERO|LK_WAIT_NONZERO)) == - LK_WAIT_NONZERO) - wakeup((void *)lkp); - /* FALLTHROUGH exclusive request */ - case LK_EXCLUSIVE: - if (!LOCKMGR_TRYOP(extflags)) - WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER | - LOP_EXCLUSIVE, file, line); - if (lkp->lk_lockholder == td) { /* - * Recursive lock. + * As far as we have been unable to acquire the + * exclusive lock and the exclusive waiters flag + * is set, we will sleep. */ - if ((extflags & (LK_NOWAIT | LK_CANRECURSE)) == 0) - panic("lockmgr: locking against myself"); - if ((extflags & LK_CANRECURSE) != 0) { - lkp->lk_exclusivecount++; - WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE | - LOCKMGR_TRYW(extflags), file, line); - COUNT(td, 1); + error = sleeplk(lk, flags, ilk, iwmesg, ipri, itimo, + SQ_EXCLUSIVE_QUEUE); + flags &= ~LK_INTERLOCK; + if (error) { + LOCK_LOG3(lk, + "%s: interrupted sleep for %p with %d", + __func__, lk, error); break; } + LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", + __func__, lk); + } + if (error == 0) { + lock_profile_obtain_lock_success(&lk->lock_object, + contested, waittime, file, line); + LOCK_LOG_LOCK("XLOCK", &lk->lock_object, 0, + lk->lk_recurse, file, line); + TD_LOCKS_INC(curthread); + STACK_SAVE(lk); } + break; + case LK_DOWNGRADE: + _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); + /* - * If we are just polling, check to see if we will sleep. + * Try to switch from an exclusive lock with no shared + * waiters to one sharer with no shared waiters. If there + * are shared waiters we need to wake them up so we need to + * acquire the sleepqueue spinlock but in the opposite case + * we can try an easy case as first option. */ - if ((extflags & LK_NOWAIT) && - (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | LK_SHARE_NONZERO))) { - error = EBUSY; + x = lk->lk_lock; + if ((x & LK_SHARED_WAITERS) == 0 && + atomic_cmpset_rel_ptr(&lk->lk_lock, x, LK_SHARERS_LOCK(1) | + (x & LK_EXCLUSIVE_WAITERS))) { + LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, + file, line); break; } + /* - * Try to acquire the want_exclusive flag. + * Lock the sleepqueue spinlock so that we can read the + * waiters flags atomically and wake up eventual shared + * waiters. */ - error = acquire(&lkp, extflags, (LK_HAVE_EXCL | LK_WANT_EXCL), - wmesg, prio, timo, &contested, &waitstart); - if (error) - break; - lkp->lk_flags |= LK_WANT_EXCL; + sleepq_lock(&lk->lock_object); + x = lk->lk_lock; + v = x & LK_EXCLUSIVE_WAITERS; + MPASS(x & LK_ALL_WAITERS); + /* - * Wait for shared locks and upgrades to finish. + * Preserve exclusive waiters while downgrading to a single + * sharer. If there is any shared waiter, wake it up. */ - error = acquire(&lkp, extflags, LK_HAVE_EXCL | LK_WANT_UPGRADE | - LK_SHARE_NONZERO, wmesg, prio, timo, - &contested, &waitstart); - lkp->lk_flags &= ~LK_WANT_EXCL; - if (error) { - if (lkp->lk_flags & LK_WAIT_NONZERO) - wakeup((void *)lkp); - break; - } - lkp->lk_flags |= LK_HAVE_EXCL; - lkp->lk_lockholder = td; - if (lkp->lk_exclusivecount != 0) - panic("lockmgr: non-zero exclusive count"); - lkp->lk_exclusivecount = 1; - WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE | - LOCKMGR_TRYW(extflags), file, line); - COUNT(td, 1); - lock_profile_obtain_lock_success(&lkp->lk_object, contested, waitstart, file, line); -#if defined(DEBUG_LOCKS) - stack_save(&lkp->lk_stack); -#endif + atomic_store_rel_ptr(&lk->lk_lock, LK_SHARERS_LOCK(1) | v); + if (x & LK_SHARED_WAITERS) { + LOCK_LOG2(lk, + "%s: %p waking up all threads on the shared queue", + __func__, lk); + sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, 0, + SQ_SHARED_QUEUE); + } + sleepq_release(&lk->lock_object); + LOCK_LOG_LOCK("XDOWNGRADE", &lk->lock_object, 0, 0, file, line); break; + case LK_RELEASE: + _lockmgr_assert(lk, KA_LOCKED, file, line); + x = lk->lk_lock; + + if ((x & LK_SHARE) == 0) { + + /* + * As first option, treact the lock as if it has not + * any waiter. + * Fix-up the tid var if the lock has been disowned. + */ + if (LK_HOLDER(x) == LK_KERNPROC) + tid = LK_KERNPROC; + else + TD_LOCKS_DEC(curthread); + LOCK_LOG_LOCK("XUNLOCK", &lk->lock_object, 0, + lk->lk_recurse, file, line); - case LK_RELEASE: - _lockmgr_assert(lkp, KA_LOCKED, file, line); - if (lkp->lk_exclusivecount != 0) { - if (lkp->lk_lockholder != LK_KERNPROC) { - WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, - file, line); - COUNT(td, -1); + /* + * The lock is held in exclusive mode. + * If the lock is recursed also, then unrecurse it. + */ + if (lockmgr_xlocked(lk) && lockmgr_recursed(lk)) { + LOCK_LOG2(lk, "%s: %p unrecursing", __func__, + lk); + lk->lk_recurse--; + break; } - if (lkp->lk_exclusivecount-- == 1) { - lkp->lk_flags &= ~LK_HAVE_EXCL; - lkp->lk_lockholder = LK_NOPROC; - lock_profile_release_lock(&lkp->lk_object); + lock_profile_release_lock(&lk->lock_object); + + if (atomic_cmpset_rel_ptr(&lk->lk_lock, tid, + LK_UNLOCKED)) + break; + + sleepq_lock(&lk->lock_object); + x = lk->lk_lock; + v = x & LK_EXCLUSIVE_WAITERS; + MPASS(x & LK_ALL_WAITERS); + + /* + * If the lock has shared waiters, give them + * preference in order to avoid sharers starvation. + */ + if (x & LK_SHARED_WAITERS) { + LOCK_LOG2(lk, + "%s: %p waking up all threads on the shared queue", + __func__, lk); + atomic_store_rel_ptr(&lk->lk_lock, + LK_UNLOCKED | v); + sleepq_broadcast(&lk->lock_object, SLEEPQ_LK, + 0, SQ_SHARED_QUEUE); + sleepq_release(&lk->lock_object); + break; } - } else if (lkp->lk_flags & LK_SHARE_NONZERO) { - WITNESS_UNLOCK(&lkp->lk_object, 0, file, line); - shareunlock(td, lkp, 1); - } - if (lkp->lk_flags & LK_WAIT_NONZERO) - wakeup((void *)lkp); + /* + * If the lock should have only exclusive waiters so + * just wake up one in order to avoid thundering herd + * problems, which have been sometimes reported in main + * lockmgr consumers like vnode locks. + */ + MPASS(x & LK_EXCLUSIVE_WAITERS); + LOCK_LOG2(lk, + "%s: %p waking up a thread on the exclusive queue", + __func__, lk); + if ((--lk->lk_xwaiters) == 0) + v = 0; + atomic_store_rel_ptr(&lk->lk_lock, LK_UNLOCKED | v); + sleepq_signal(&lk->lock_object, SLEEPQ_LK, 0, + SQ_EXCLUSIVE_QUEUE); + sleepq_release(&lk->lock_object); + break; + } else + wakeupshlk(lk, file, line); break; + case LK_DRAIN: - case LK_DRAIN: /* - * Check that we do not already hold the lock, as it can - * never drain if we do. Unfortunately, we have no way to - * check for holding a shared lock, but at least we can - * check for an exclusive one. + * Trying to drain a lock we alredy own will result in a + * deadlock. */ - if (!LOCKMGR_TRYOP(extflags)) - WITNESS_CHECKORDER(&lkp->lk_object, LOP_NEWORDER | - LOP_EXCLUSIVE, file, line); - if (lkp->lk_lockholder == td) - panic("lockmgr: draining against myself"); + if (lockmgr_xlocked(lk)) { + if (flags & LK_INTERLOCK) + mtx_unlock(ilk); + panic("%s: draining %s with the lock held @ %s:%d\n", + __func__, iwmesg, file, line); + } + + while (!atomic_cmpset_acq_ptr(&lk->lk_lock, LK_UNLOCKED, tid)) { + lock_profile_obtain_lock_failed(&lk->lock_object, + &contested, &waittime); + + /* + * If the lock is expected to not sleep just give up + * and return. + */ + if (flags & LK_NOWAIT) { + LOCK_LOG2(lk, "%s: %p fails the try operation", + __func__, lk); + error = EBUSY; + break; + } + + /* + * Acquire the sleepqueue chain lock because we + * probabilly will need to manipulate waiters flags. + */ + sleepq_lock(&lk->lock_object); + x = lk->lk_lock; - error = acquiredrain(lkp, extflags, wmesg, prio, timo); - if (error) - break; - lkp->lk_flags |= LK_DRAINING | LK_HAVE_EXCL; - lkp->lk_lockholder = td; - lkp->lk_exclusivecount = 1; - WITNESS_LOCK(&lkp->lk_object, LOP_EXCLUSIVE | - LOCKMGR_TRYW(extflags), file, line); - COUNT(td, 1); -#if defined(DEBUG_LOCKS) - stack_save(&lkp->lk_stack); -#endif - break; + /* + * if the lock has been released while we spun on + * the sleepqueue chain lock just try again. + */ + if (x == LK_UNLOCKED) { + sleepq_release(&lk->lock_object); + continue; + } - default: - mtx_unlock(lkp->lk_interlock); - panic("lockmgr: unknown locktype request %d", - flags & LK_TYPE_MASK); - /* NOTREACHED */ - } - if ((lkp->lk_flags & LK_WAITDRAIN) && - (lkp->lk_flags & (LK_HAVE_EXCL | LK_WANT_EXCL | LK_WANT_UPGRADE | - LK_SHARE_NONZERO | LK_WAIT_NONZERO)) == 0) { - lkp->lk_flags &= ~LK_WAITDRAIN; - wakeup((void *)&lkp->lk_flags); - } - mtx_unlock(lkp->lk_interlock); - return (error); -} + /* + * Try to set the LK_EXCLUSIVE_WAITERS flag. If we + * fail, loop back and retry. + */ + if ((x & LK_EXCLUSIVE_WAITERS) == 0) { + if (!atomic_cmpset_ptr(&lk->lk_lock, x, + x | LK_EXCLUSIVE_WAITERS)) { + sleepq_release(&lk->lock_object); + continue; + } + LOCK_LOG2(lk, "%s: %p set drain waiters flag", + __func__, lk); + } -static int -acquiredrain(struct lock *lkp, int extflags, const char *wmesg, int prio, - int timo) -{ - const char *iwmesg; - int error, iprio, itimo; + /* + * If we have been woken up by an owner but there + * are other exclusive waiters, wake one of them up + * and back to sleep. + */ + if (x == (LK_UNLOCKED | LK_EXCLUSIVE_WAITERS)) { + MPASS(lk->lk_xwaiters != 0); + sleepq_signal(&lk->lock_object, SLEEPQ_LK, 0, + SQ_EXCLUSIVE_QUEUE); + } else + ++lk->lk_xwaiters; - iwmesg = (wmesg != LK_WMESG_DEFAULT) ? wmesg : lkp->lk_wmesg; - iprio = (prio != LK_PRIO_DEFAULT) ? prio : lkp->lk_prio; - itimo = (timo != LK_TIMO_DEFAULT) ? timo : lkp->lk_timo; + /* + * As far as we have been unable to acquire the + * exclusive lock and the exclusive waiters flag + * is set, we will sleep. + */ + if (flags & LK_INTERLOCK) { + mtx_unlock(ilk); + flags &= ~LK_INTERLOCK; + } + DROP_GIANT(); + sleepq_add(&lk->lock_object, NULL, iwmesg, SLEEPQ_LK, + SQ_EXCLUSIVE_QUEUE); + sleepq_wait(&lk->lock_object, ipri & PRIMASK); + PICKUP_GIANT(); + LOCK_LOG2(lk, "%s: %p resuming from the sleep queue", + __func__, lk); + } - if ((extflags & LK_NOWAIT) && (lkp->lk_flags & LK_ALL)) { - return EBUSY; - } - while (lkp->lk_flags & LK_ALL) { - lkp->lk_flags |= LK_WAITDRAIN; - error = msleep(&lkp->lk_flags, lkp->lk_interlock, iprio, iwmesg, - ((extflags & LK_TIMELOCK) ? itimo : 0)); - if (error) - return error; - if (extflags & LK_SLEEPFAIL) { - return ENOLCK; + if (error == 0) { + lock_profile_obtain_lock_success(&lk->lock_object, + contested, waittime, file, line); + LOCK_LOG_LOCK("DRAIN", &lk->lock_object, 0, + lk->lk_recurse, file, line); + TD_LOCKS_INC(curthread); + STACK_SAVE(lk); } + break; + default: + if (flags & LK_INTERLOCK) + mtx_unlock(ilk); + panic("%s: unknown lockmgr request 0x%x\n", __func__, op); } - return 0; + + /* + * We could have exited from the switch without reacquiring the + * interlock, so we need to check for the interlock ownership. + */ + if (flags & LK_INTERLOCK) + mtx_unlock(ilk); + + return (error); } -/* - * Initialize a lock; required before use. - */ void -lockinit(lkp, prio, wmesg, timo, flags) - struct lock *lkp; - int prio; - const char *wmesg; - int timo; - int flags; +_lockmgr_disown(struct lock *lk, const char *file, int line) { - int iflags; + uintptr_t tid, x; - KASSERT((flags & (LK_NOWAIT | LK_SLEEPFAIL)) == 0, - ("%s: Invalid flags passed with mask 0x%x", __func__, - flags & LK_EXTFLG_MASK)); - CTR5(KTR_LOCK, "lockinit(): lkp == %p, prio == %d, wmesg == \"%s\", " - "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); + tid = (uintptr_t)curthread; + _lockmgr_assert(lk, KA_XLOCKED | KA_NOTRECURSED, file, line); - lkp->lk_interlock = mtx_pool_alloc(mtxpool_lockbuilder); - lkp->lk_flags = (flags & LK_EXTFLG_MASK) & ~(LK_FUNC_MASK); - lkp->lk_sharecount = 0; - lkp->lk_waitcount = 0; - lkp->lk_exclusivecount = 0; - lkp->lk_prio = prio; - lkp->lk_timo = timo; - lkp->lk_lockholder = LK_NOPROC; - lkp->lk_newlock = NULL; - iflags = LO_RECURSABLE | LO_SLEEPABLE | LO_UPGRADABLE; - if (!(flags & LK_NODUP)) - iflags |= LO_DUPOK; - if (flags & LK_NOPROFILE) - iflags |= LO_NOPROFILE; - if (!(flags & LK_NOWITNESS)) - iflags |= LO_WITNESS; - if (flags & LK_QUIET) - iflags |= LO_QUIET; -#ifdef DEBUG_LOCKS - stack_zero(&lkp->lk_stack); -#endif - lock_init(&lkp->lk_object, &lock_class_lockmgr, wmesg, NULL, iflags); -} + /* + * If the owner is alredy LK_KERNPROC just skip the whole operation. + */ + if (LK_HOLDER(lk->lk_lock) != tid) + return; -/* - * Destroy a lock. - */ -void -lockdestroy(lkp) - struct lock *lkp; -{ - - CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", - lkp, lkp->lk_wmesg); - KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0, - ("lockmgr still held")); - KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed")); - lkp->lk_flags = LK_DESTROYED; - lock_destroy(&lkp->lk_object); + /* + * In order to preserve waiters flags, just spin. + */ + for (;;) { + x = lk->lk_lock & LK_ALL_WAITERS; + if (atomic_cmpset_ptr(&lk->lk_lock, tid | x, + LK_KERNPROC | x)) { + LOCK_LOG_LOCK("XDISOWN", &lk->lock_object, 0, 0, file, + line); + TD_LOCKS_DEC(curthread); + return; + } + } } -/* - * Disown the lockmgr. - */ void -_lockmgr_disown(struct lock *lkp, const char *file, int line) +lockmgr_printinfo(struct lock *lk) { struct thread *td; + uintptr_t x; + + if (lk->lk_lock == LK_UNLOCKED) + printf(" lock type %s: UNLOCKED\n", lk->lock_object.lo_name); + else if (lk->lk_lock & LK_SHARE) + printf(" lock type %s: SHARED (count %ju)\n", + lk->lock_object.lo_name, + (uintmax_t)LK_SHARERS(lk->lk_lock)); + else { + td = lockmgr_xholder(lk); + printf(" lock type %s: EXCL by thread %p (pid %d)\n", + lk->lock_object.lo_name, td, td->td_proc->p_pid); + } - td = curthread; - KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0, - ("%s: %p lockmgr is destroyed", __func__, lkp)); - _lockmgr_assert(lkp, KA_XLOCKED | KA_NOTRECURSED, file, line); + x = lk->lk_lock; + if (x & LK_EXCLUSIVE_WAITERS) + printf(" with exclusive waiters pending\n"); + if (x & LK_SHARED_WAITERS) + printf(" with shared waiters pending\n"); - /* - * Drop the lock reference and switch the owner. This will result - * in an atomic operation like td_lock is only accessed by curthread - * and lk_lockholder only needs one write. Note also that the lock - * owner can be alredy KERNPROC, so in that case just skip the - * decrement. - */ - if (lkp->lk_lockholder == td) { - WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line); - td->td_locks--; - } - lkp->lk_lockholder = LK_KERNPROC; + STACK_PRINT(lk); } -/* - * Determine the status of a lock. - */ int -lockstatus(lkp) - struct lock *lkp; +lockstatus(struct lock *lk) { - int lock_type = 0; - int interlocked; + uintptr_t v, x; + int ret; - KASSERT((lkp->lk_flags & LK_DESTROYED) == 0, - ("%s: %p lockmgr is destroyed", __func__, lkp)); + ret = LK_SHARED; + x = lk->lk_lock; + v = LK_HOLDER(x); - if (!kdb_active) { - interlocked = 1; - mtx_lock(lkp->lk_interlock); - } else - interlocked = 0; - if (lkp->lk_exclusivecount != 0) { - if (lkp->lk_lockholder == curthread) - lock_type = LK_EXCLUSIVE; + if ((x & LK_SHARE) == 0) { + if (v == (uintptr_t)curthread || v == LK_KERNPROC) + ret = LK_EXCLUSIVE; else - lock_type = LK_EXCLOTHER; - } else if (lkp->lk_sharecount != 0) - lock_type = LK_SHARED; - if (interlocked) - mtx_unlock(lkp->lk_interlock); - return (lock_type); -} + ret = LK_EXCLOTHER; + } else if (x == LK_UNLOCKED) + ret = 0; -/* - * Print out information about state of a lock. Used by VOP_PRINT - * routines to display status about contained locks. - */ -void -lockmgr_printinfo(lkp) - struct lock *lkp; -{ - - if (lkp->lk_sharecount) - printf(" lock type %s: SHARED (count %d)", lkp->lk_wmesg, - lkp->lk_sharecount); - else if (lkp->lk_flags & LK_HAVE_EXCL) - printf(" lock type %s: EXCL (count %d) by thread %p (pid %d)", - lkp->lk_wmesg, lkp->lk_exclusivecount, - lkp->lk_lockholder, lkp->lk_lockholder->td_proc->p_pid); - if (lkp->lk_waitcount > 0) - printf(" with %d pending", lkp->lk_waitcount); -#ifdef DEBUG_LOCKS - stack_print_ddb(&lkp->lk_stack); -#endif + return (ret); } #ifdef INVARIANT_SUPPORT #ifndef INVARIANTS -#undef _lockmgr_assert +#undef _lockmgr_assert #endif void -_lockmgr_assert(struct lock *lkp, int what, const char *file, int line) +_lockmgr_assert(struct lock *lk, int what, const char *file, int line) { - struct thread *td; - u_int x; int slocked = 0; - x = lkp->lk_flags; - td = lkp->lk_lockholder; if (panicstr != NULL) return; switch (what) { @@ -697,133 +874,109 @@ case KA_LOCKED: case KA_LOCKED | KA_NOTRECURSED: case KA_LOCKED | KA_RECURSED: -#ifdef WITNESS - /* - * We cannot trust WITNESS if the lock is held in - * exclusive mode and a call to lockmgr_disown() happened. - * Workaround this skipping the check if the lock is - * held in exclusive mode even for the KA_LOCKED case. - */ - if (slocked || (x & LK_HAVE_EXCL) == 0) { - witness_assert(&lkp->lk_object, what, file, line); - break; - } -#endif - if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 && - (slocked || LOCKMGR_NOTOWNER(td)))) + if (lk->lk_lock == LK_UNLOCKED || + ((lk->lk_lock & LK_SHARE) == 0 && (slocked || + (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk))))) panic("Lock %s not %slocked @ %s:%d\n", - lkp->lk_object.lo_name, slocked ? "share " : "", + lk->lock_object.lo_name, slocked ? "share" : "", file, line); - if ((x & LK_SHARE_NONZERO) == 0) { - if (lockmgr_recursed(lkp)) { + + if ((lk->lk_lock & LK_SHARE) == 0) { + if (lockmgr_recursed(lk)) { if (what & KA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", - lkp->lk_object.lo_name, file, line); + lk->lock_object.lo_name, file, + line); } else if (what & KA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", - lkp->lk_object.lo_name, file, line); + lk->lock_object.lo_name, file, line); } break; case KA_XLOCKED: case KA_XLOCKED | KA_NOTRECURSED: case KA_XLOCKED | KA_RECURSED: - if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td)) + if (!lockmgr_xlocked(lk) && !lockmgr_disowned(lk)) panic("Lock %s not exclusively locked @ %s:%d\n", - lkp->lk_object.lo_name, file, line); - if (lockmgr_recursed(lkp)) { + lk->lock_object.lo_name, file, line); + if (lockmgr_recursed(lk)) { if (what & KA_NOTRECURSED) panic("Lock %s recursed @ %s:%d\n", - lkp->lk_object.lo_name, file, line); + lk->lock_object.lo_name, file, line); } else if (what & KA_RECURSED) panic("Lock %s not recursed @ %s:%d\n", - lkp->lk_object.lo_name, file, line); + lk->lock_object.lo_name, file, line); break; case KA_UNLOCKED: - if (td == curthread || td == LK_KERNPROC) + if (lockmgr_xlocked(lk) || lockmgr_disowned(lk)) panic("Lock %s exclusively locked @ %s:%d\n", - lkp->lk_object.lo_name, file, line); - break; - case KA_HELD: - case KA_UNHELD: - if (LOCKMGR_UNHELD(x)) { - if (what & KA_HELD) - panic("Lock %s not locked by anyone @ %s:%d\n", - lkp->lk_object.lo_name, file, line); - } else if (what & KA_UNHELD) - panic("Lock %s locked by someone @ %s:%d\n", - lkp->lk_object.lo_name, file, line); + lk->lock_object.lo_name, file, line); break; default: - panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what, - file, line); + panic("Unknown lockmgr assertion: %d @ %s:%d\n", what, file, + line); } } -#endif /* INVARIANT_SUPPORT */ +#endif #ifdef DDB -/* - * Check to see if a thread that is blocked on a sleep queue is actually - * blocked on a 'struct lock'. If so, output some details and return true. - * If the lock has an exclusive owner, return that in *ownerp. - */ int lockmgr_chain(struct thread *td, struct thread **ownerp) { - struct lock *lkp; + struct lock *lk; - lkp = td->td_wchan; + lk = td->td_wchan; - /* Simple test to see if wchan points to a lockmgr lock. */ - if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && - lkp->lk_wmesg == td->td_wmesg) - goto ok; - - /* - * If this thread is doing a DRAIN, then it would be asleep on - * &lkp->lk_flags rather than lkp. - */ - lkp = (struct lock *)((char *)td->td_wchan - - offsetof(struct lock, lk_flags)); - if (LOCK_CLASS(&lkp->lk_object) == &lock_class_lockmgr && - lkp->lk_wmesg == td->td_wmesg && (lkp->lk_flags & LK_WAITDRAIN)) - goto ok; + if (LOCK_CLASS(&lk->lock_object) != &lock_class_lockmgr) + return (0); + db_printf("blocked on lockmgr %s", lk->lock_object.lo_name); + if (lk->lk_lock & LK_SHARE) + db_printf("SHARED (count %ju)\n", + (uintmax_t)LK_SHARERS(lk->lk_lock)); + else + db_printf("EXCL\n"); + *ownerp = lockmgr_xholder(lk); - /* Doen't seem to be a lockmgr lock. */ - return (0); - -ok: - /* Ok, we think we have a lockmgr lock, so output some details. */ - db_printf("blocked on lk \"%s\" ", lkp->lk_wmesg); - if (lkp->lk_sharecount) { - db_printf("SHARED (count %d)\n", lkp->lk_sharecount); - *ownerp = NULL; - } else { - db_printf("EXCL (count %d)\n", lkp->lk_exclusivecount); - *ownerp = lkp->lk_lockholder; - } return (1); } -void +static void db_show_lockmgr(struct lock_object *lock) { struct thread *td; - struct lock *lkp; + struct lock *lk; - lkp = (struct lock *)lock; + lk = (struct lock *)lock; - db_printf(" lock type: %s\n", lkp->lk_wmesg); db_printf(" state: "); - if (lkp->lk_sharecount) - db_printf("SHARED (count %d)\n", lkp->lk_sharecount); - else if (lkp->lk_flags & LK_HAVE_EXCL) { - td = lkp->lk_lockholder; - db_printf("EXCL (count %d) %p ", lkp->lk_exclusivecount, td); - db_printf("(tid %d, pid %d, \"%s\")\n", td->td_tid, - td->td_proc->p_pid, td->td_name); - } else + if (lk->lk_lock == LK_UNLOCKED) db_printf("UNLOCKED\n"); - if (lkp->lk_waitcount > 0) - db_printf(" waiters: %d\n", lkp->lk_waitcount); + else if (lk->lk_lock & LK_SHARE) + db_printf("SLOCK: %ju\n", + (uintmax_t)LK_SHARERS(lk->lk_lock)); + else { + td = lockmgr_xholder(lk); + if (td == (struct thread *)LK_KERNPROC) + db_printf("XLOCK: LK_KERNPROC\n"); + else + db_printf("XLOCK: %p (tid %d, pid %d, \"%s\")\n", td, + td->td_tid, td->td_proc->p_pid, + td->td_proc->p_comm); + if (lockmgr_recursed(lk)) + db_printf(" recursed: %d\n", lk->lk_recurse); + } + db_printf(" waiters: "); + switch (lk->lk_lock & LK_ALL_WAITERS) { + case LK_SHARED_WAITERS: + db_printf("shared\n"); + case LK_EXCLUSIVE_WAITERS: + db_printf("%u exclusive\n", lk->lk_xwaiters); + break; + case LK_ALL_WAITERS: + db_printf("shared and %u exclusive\n", lk->lk_xwaiters); + break; + default: + db_printf("none\n"); + } } #endif + --- //depot/vendor/freebsd/src/sys/sys/lockmgr.h 2008/03/28 12:30:16 +++ //depot/user/attilio/attilio_lockmgr/sys/lockmgr.h 2008/04/02 18:33:00 @@ -1,240 +1,127 @@ -/*- - * Copyright (c) 1995 - * The Regents of the University of California. All rights reserved. - * - * This code contains ideas from software contributed to Berkeley by - * Avadis Tevanian, Jr., Michael Wayne Young, and the Mach Operating - * System project at Carnegie-Mellon University. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * @(#)lock.h 8.12 (Berkeley) 5/19/95 - * $FreeBSD: src/sys/sys/lockmgr.h,v 1.67 2008/03/28 12:30:12 attilio Exp $ - */ - #ifndef _SYS_LOCKMGR_H_ #define _SYS_LOCKMGR_H_ -#ifdef DEBUG_LOCKS -#include /* XXX */ +#include +#include + +#define LK_SHARE 0x01 +#define LK_SHARED_WAITERS 0x02 +#define LK_EXCLUSIVE_WAITERS 0x04 +#define LK_ALL_WAITERS \ + (LK_SHARED_WAITERS | LK_EXCLUSIVE_WAITERS) +#define LK_FLAGMASK \ + (LK_SHARE | LK_ALL_WAITERS) + +#define LK_HOLDER(x) ((x) & ~LK_FLAGMASK) +#define LK_SHARERS_SHIFT 3 +#define LK_SHARERS(x) (LK_HOLDER(x) >> LK_SHARERS_SHIFT) +#define LK_SHARERS_LOCK(x) ((x) << LK_SHARERS_SHIFT | LK_SHARE) +#define LK_ONE_SHARER (1 << LK_SHARERS_SHIFT) +#define LK_UNLOCKED LK_SHARERS_LOCK(0) +#define LK_KERNPROC ((uintptr_t)(-1) & ~LK_FLAGMASK) + +#ifdef _KERNEL + +#if !defined(LOCK_FILE) || !defined(LOCK_LINE) +#error "LOCK_FILE and LOCK_LINE not defined, include before" #endif -#include -#include -struct mtx; +struct mtx; +struct thread; /* - * The general lock structure. Provides for multiple shared locks, - * upgrading from shared to exclusive, and sleeping until the lock - * can be gained. + * Function prototipes. Routines that start with an underscore are not part + * of the public interface and might be wrappered with a macro. */ -struct lock { - struct lock_object lk_object; /* common lock properties */ - struct mtx *lk_interlock; /* lock on remaining fields */ - u_int lk_flags; /* see below */ - int lk_sharecount; /* # of accepted shared locks */ - int lk_waitcount; /* # of processes sleeping for lock */ - short lk_exclusivecount; /* # of recursive exclusive locks */ - short lk_prio; /* priority at which to sleep */ - int lk_timo; /* maximum sleep time (for tsleep) */ - struct thread *lk_lockholder; /* thread of exclusive lock holder */ - struct lock *lk_newlock; /* lock taking over this lock */ +int _lockmgr_args(struct lock *lk, u_int flags, struct mtx *ilk, + const char *wmesg, int prio, int timo, const char *file, int line); +#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) +void _lockmgr_assert(struct lock *lk, int what, const char *file, int line); +#endif +void _lockmgr_disown(struct lock *lk, const char *file, int line); -#ifdef DEBUG_LOCKS - struct stack lk_stack; +void lockdestroy(struct lock *lk); +void lockinit(struct lock *lk, int prio, const char *wmesg, int timo, + int flags); +#ifdef DDB +int lockmgr_chain(struct thread *td, struct thread **ownerp); #endif -}; +void lockmgr_printinfo(struct lock *lk); +int lockstatus(struct lock *lk); -#define lk_wmesg lk_object.lo_name - -#ifdef _KERNEL +#define lockmgr(lk, flags, ilk) \ + _lockmgr_args((lk), (flags), (ilk), LK_WMESG_DEFAULT, \ + LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE) +#define lockmgr_args(lk, flags, ilk, wmesg, prio, timo) \ + _lockmgr_args((lk), (flags), (ilk), (wmesg), (prio), (timo), \ + LOCK_FILE, LOCK_LINE) +#define lockmgr_disown(lk) \ + _lockmgr_disown((lk), LOCK_FILE, LOCK_LINE) +#define lockmgr_recursed(lk) \ + ((lk)->lk_recurse != 0) +#define lockmgr_waiters(lk) \ + ((lk)->lk_lock & LK_ALL_WAITERS) +#ifdef INVARIANTS +#define lockmgr_assert(lk, what, file, line) \ + _lockmgr_assert((lk), (what), LOCK_FILE, LOCK_LINE) +#else +#define lockmgr_assert(lk, what, file, line) +#endif /* - * Lock request types: - * LK_SHARED - get one of many possible shared locks. If a process - * holding an exclusive lock requests a shared lock, the exclusive - * lock(s) will be downgraded to shared locks. - * LK_EXCLUSIVE - stop further shared locks, when they are cleared, - * grant a pending upgrade if it exists, then grant an exclusive - * lock. Only one exclusive lock may exist at a time, except that - * a process holding an exclusive lock may get additional exclusive - * locks if it explicitly sets the LK_CANRECURSE flag in the lock - * request, or if the LK_CANRECUSE flag was set when the lock was - * initialized. - * LK_UPGRADE - the process must hold a shared lock that it wants to - * have upgraded to an exclusive lock. Other processes may get - * exclusive access to the resource between the time that the upgrade - * is requested and the time that it is granted. - * LK_DOWNGRADE - the process must hold an exclusive lock that it wants - * to have downgraded to a shared lock. If the process holds multiple - * (recursive) exclusive locks, they will all be downgraded to shared - * locks. - * LK_RELEASE - release one instance of a lock. - * LK_DRAIN - wait for all activity on the lock to end, then mark it - * decommissioned. This feature is used before freeing a lock that - * is part of a piece of memory that is about to be freed. - * LK_EXCLOTHER - return for lockstatus(). Used when another process - * holds the lock exclusively. - * - * These are flags that are passed to the lockmgr routine. + * Flags for lockinit(). */ -#define LK_TYPE_MASK 0x0000000f /* type of lock sought */ -#define LK_SHARED 0x00000001 /* shared lock */ -#define LK_EXCLUSIVE 0x00000002 /* exclusive lock */ -#define LK_UPGRADE 0x00000003 /* shared-to-exclusive upgrade */ -#define LK_DOWNGRADE 0x00000005 /* exclusive-to-shared downgrade */ -#define LK_RELEASE 0x00000006 /* release any type of lock */ -#define LK_DRAIN 0x00000007 /* wait for all lock activity to end */ -#define LK_EXCLOTHER 0x00000008 /* other process holds lock */ +#define LK_INIT_MASK 0x000FF +#define LK_CANRECURSE 0x00001 +#define LK_NODUP 0x00002 +#define LK_NOPROFILE 0x00004 +#define LK_NOSHARE 0x00008 +#define LK_NOWITNESS 0x00010 +#define LK_QUIET 0x00020 + /* - * External lock flags. - * - * These may be set in lock_init to set their mode permanently, - * or passed in as arguments to the lock manager. + * Additional attributes to be used in lockmgr(). */ -#define LK_EXTFLG_MASK 0x0000fff0 /* mask of external flags */ -#define LK_NOWAIT 0x00000010 /* do not sleep to await lock */ -#define LK_SLEEPFAIL 0x00000020 /* sleep, then return failure */ -#define LK_CANRECURSE 0x00000040 /* allow recursive exclusive lock */ -#define LK_NOSHARE 0x00000080 /* Only allow exclusive locks */ -#define LK_TIMELOCK 0x00000100 /* use lk_timo, else no timeout */ -#define LK_NOWITNESS 0x00000200 /* disable WITNESS */ -#define LK_NODUP 0x00000400 /* enable duplication logging */ -#define LK_NOPROFILE 0x00000800 /* disable lock profiling */ -#define LK_QUIET 0x00001000 /* disable lock operations tracking */ -#define LK_FUNC_MASK (LK_NODUP | LK_NOPROFILE | LK_NOWITNESS | LK_QUIET) +#define LK_EATTR_MASK 0x0FF00 +#define LK_INTERLOCK 0x00100 +#define LK_NOWAIT 0x00200 +#define LK_RETRY 0x00400 +#define LK_SLEEPFAIL 0x00800 +#define LK_TIMELOCK 0x01000 + /* - * Nonpersistent external flags. + * Operations for lockmgr(). */ -#define LK_RETRY 0x00010000 /* vn_lock: retry until locked */ -#define LK_INTERLOCK 0x00020000 /* - * unlock passed mutex after getting - * lk_interlock - */ +#define LK_TYPE_MASK 0xF0000 +#define LK_DOWNGRADE 0x10000 +#define LK_DRAIN 0x20000 +#define LK_EXCLOTHER 0x30000 +#define LK_EXCLUSIVE 0x40000 +#define LK_RELEASE 0x50000 +#define LK_SHARED 0x60000 +#define LK_UPGRADE 0x70000 + +#define LK_TOTAL_MASK (LK_INIT_MASK | LK_EATTR_MASK | LK_TYPE_MASK) /* * Default values for lockmgr_args(). */ #define LK_WMESG_DEFAULT (NULL) -#define LK_PRIO_DEFAULT (-1) +#define LK_PRIO_DEFAULT (0) #define LK_TIMO_DEFAULT (0) /* - * Internal lock flags. - * - * These flags are used internally to the lock manager. - */ -#define LK_WANT_UPGRADE 0x00100000 /* waiting for share-to-excl upgrade */ -#define LK_WANT_EXCL 0x00200000 /* exclusive lock sought */ -#define LK_HAVE_EXCL 0x00400000 /* exclusive lock obtained */ -#define LK_WAITDRAIN 0x00800000 /* process waiting for lock to drain */ -#define LK_DRAINING 0x01000000 /* lock is being drained */ -#define LK_DESTROYED 0x02000000 /* lock is destroyed */ -/* - * Internal state flags corresponding to lk_sharecount, and lk_waitcount - */ -#define LK_SHARE_NONZERO 0x10000000 -#define LK_WAIT_NONZERO 0x20000000 - -#ifndef LOCK_FILE -#error "LOCK_FILE not defined, include before " -#endif - -/* * Assertion flags. */ #if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -#define KA_BASE (LA_MASKASSERT + 1) #define KA_LOCKED LA_LOCKED #define KA_SLOCKED LA_SLOCKED #define KA_XLOCKED LA_XLOCKED #define KA_UNLOCKED LA_UNLOCKED #define KA_RECURSED LA_RECURSED #define KA_NOTRECURSED LA_NOTRECURSED -#define KA_HELD (KA_BASE << 0x00) -#define KA_UNHELD (KA_BASE << 0x01) -#endif - -/* - * Lock return status. - * - * Successfully obtained locks return 0. Locks will always succeed - * unless one of the following is true: - * LK_FORCEUPGRADE is requested and some other process has already - * requested a lock upgrade (returns EBUSY). - * LK_WAIT is set and a sleep would be required (returns EBUSY). - * LK_SLEEPFAIL is set and a sleep was done (returns ENOLCK). - * PCATCH is set in lock priority and a signal arrives (returns - * either EINTR or ERESTART if system calls is to be restarted). - * Non-null lock timeout and timeout expires (returns EWOULDBLOCK). - * A failed lock attempt always returns a non-zero error value. No lock - * is held after an error return (in particular, a failed LK_UPGRADE - * or LK_FORCEUPGRADE will have released its shared access lock). - */ - -/* - * Indicator that no process holds exclusive lock - */ -#define LK_KERNPROC ((struct thread *)-2) -#define LK_NOPROC ((struct thread *) -1) - -struct thread; - -void lockinit(struct lock *, int prio, const char *wmesg, - int timo, int flags); -void lockdestroy(struct lock *); - -int _lockmgr_args(struct lock *, u_int flags, struct mtx *, - const char *wmesg, int prio, int timo, char *file, int line); -#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) -void _lockmgr_assert(struct lock *, int what, const char *, int); -#endif -void _lockmgr_disown(struct lock *, const char *, int); -void lockmgr_printinfo(struct lock *); -int lockstatus(struct lock *); - -#define lockmgr(lock, flags, mtx) \ - _lockmgr_args((lock), (flags), (mtx), LK_WMESG_DEFAULT, \ - LK_PRIO_DEFAULT, LK_TIMO_DEFAULT, LOCK_FILE, LOCK_LINE) -#define lockmgr_disown(lock) \ - _lockmgr_disown((lock), LOCK_FILE, LOCK_LINE) -#define lockmgr_args(lock, flags, mtx, wmesg, prio, timo) \ - _lockmgr_args((lock), (flags), (mtx), (wmesg), (prio), (timo), \ - LOCK_FILE, LOCK_LINE) -#define lockmgr_recursed(lkp) \ - ((lkp)->lk_exclusivecount > 1) -#define lockmgr_waiters(lkp) \ - ((lkp)->lk_waitcount != 0) -#ifdef INVARIANTS -#define lockmgr_assert(lkp, what) \ - _lockmgr_assert((lkp), (what), LOCK_FILE, LOCK_LINE) -#else -#define lockmgr_assert(lkp, what) -#endif -#ifdef DDB -int lockmgr_chain(struct thread *td, struct thread **ownerp); +#define KA_HELD +#define KA_UNHELD #endif #endif /* _KERNEL */ --- //depot/vendor/freebsd/src/sys/sys/sleepqueue.h 2008/03/12 06:31:40 +++ //depot/user/attilio/attilio_lockmgr/sys/sleepqueue.h 2008/03/12 15:07:27 @@ -87,6 +87,7 @@ #define SLEEPQ_CONDVAR 0x01 /* Used for a cv. */ #define SLEEPQ_PAUSE 0x02 /* Used by pause. */ #define SLEEPQ_SX 0x03 /* Used by an sx lock. */ +#define SLEEPQ_LK 0x04 /* Used by a lockmgr. */ #define SLEEPQ_INTERRUPTIBLE 0x100 /* Sleep is interruptible. */ void init_sleepqueues(void); --- //depot/vendor/freebsd/src/sys/sys/stack.h 2007/12/02 20:44:34 +++ //depot/user/attilio/attilio_lockmgr/sys/stack.h 2008/03/27 18:51:09 @@ -29,15 +29,10 @@ #ifndef _SYS_STACK_H_ #define _SYS_STACK_H_ -#define STACK_MAX 18 /* Don't change, stack_ktr relies on this. */ +#include struct sbuf; -struct stack { - int depth; - vm_offset_t pcs[STACK_MAX]; -}; - /* MI Routines. */ struct stack *stack_create(void); void stack_destroy(struct stack *); --- //depot/vendor/freebsd/src/sys/sys/vnode.h 2008/03/31 11:56:51 +++ //depot/user/attilio/attilio_lockmgr/sys/vnode.h 2008/04/02 18:29:15 @@ -395,8 +395,10 @@ #define VI_UNLOCK(vp) mtx_unlock(&(vp)->v_interlock) #define VI_MTX(vp) (&(vp)->v_interlock) -#define VN_LOCK_AREC(vp) ((vp)->v_vnlock->lk_flags |= LK_CANRECURSE) -#define VN_LOCK_ASHARE(vp) ((vp)->v_vnlock->lk_flags &= ~LK_NOSHARE) +#define VN_LOCK_AREC(vp) \ + ((vp)->v_vnlock->lock_object.lo_flags |= LK_CANRECURSE) +#define VN_LOCK_ASHARE(vp) \ + ((vp)->v_vnlock->lock_object.lo_flags &= ~LK_NOSHARE) #endif /* _KERNEL */ --- //depot/vendor/freebsd/src/sys/ufs/ffs/ffs_softdep.c 2008/03/23 13:46:54 +++ //depot/user/attilio/attilio_lockmgr/ufs/ffs/ffs_softdep.c 2008/03/26 16:13:06 @@ -553,8 +553,8 @@ #define ACQUIRE_LOCK(lk) mtx_lock(lk) #define FREE_LOCK(lk) mtx_unlock(lk) -#define BUF_AREC(bp) ((bp)->b_lock.lk_flags |= LK_CANRECURSE) -#define BUF_NOREC(bp) ((bp)->b_lock.lk_flags &= ~LK_CANRECURSE) +#define BUF_AREC(bp) ((bp)->b_lock.lock_object.lo_flags |= LK_CANRECURSE) +#define BUF_NOREC(bp) ((bp)->b_lock.lock_object.lo_flags &= ~LK_CANRECURSE) /* * Worklist queue management.