--- //depot/vendor/freebsd/src/sys/kern/kern_lock.c 2008/02/08 21:50:16 +++ //depot/user/attilio/attilio_schedlock/kern/kern_lock.c 2008/02/10 16:06:37 @@ -62,6 +62,8 @@ #define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT) #define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0) +#define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0) +#define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC) static void assert_lockmgr(struct lock_object *lock, int what); #ifdef DDB @@ -82,6 +84,10 @@ .lc_unlock = unlock_lockmgr, }; +#ifndef INVARIANTS +#define _lockmgr_assert(lkp, what, file, line) +#endif + /* * Locking primitives implementation. * Locks provide shared/exclusive sychronization. @@ -205,6 +211,15 @@ error = 0; td = curthread; +#ifdef INVARIANTS + if (lkp->lk_flags & LK_DESTROYED) { + if (flags & LK_INTERLOCK) + mtx_unlock(interlkp); + if (panicstr != NULL) + return (0); + panic("%s: %p lockmgr is destroyed", __func__, lkp); + } +#endif if ((flags & LK_INTERNAL) == 0) mtx_lock(lkp->lk_interlock); CTR6(KTR_LOCK, @@ -280,10 +295,7 @@ /* FALLTHROUGH downgrade */ case LK_DOWNGRADE: - KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0, - ("lockmgr: not holding exclusive lock " - "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", - lkp->lk_lockholder, td, lkp->lk_exclusivecount)); + _lockmgr_assert(lkp, CA_XLOCKED, file, line); sharelock(td, lkp, lkp->lk_exclusivecount); WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line); COUNT(td, -lkp->lk_exclusivecount); @@ -303,10 +315,7 @@ * after the upgrade). If we return an error, the file * will always be unlocked. */ - if (lkp->lk_lockholder == td) - panic("lockmgr: upgrade exclusive lock"); - if (lkp->lk_sharecount <= 0) - panic("lockmgr: upgrade without shared"); + _lockmgr_assert(lkp, CA_SLOCKED, file, line); shareunlock(td, lkp, 1); if (lkp->lk_sharecount == 0) lock_profile_release_lock(&lkp->lk_object); @@ -419,33 +428,21 @@ break; case LK_RELEASE: + _lockmgr_assert(lkp, CA_LOCKED, file, line); if (lkp->lk_exclusivecount != 0) { - if (lkp->lk_lockholder != td && - lkp->lk_lockholder != LK_KERNPROC) { - panic("lockmgr: thread %p, not %s %p unlocking", - td, "exclusive lock holder", - lkp->lk_lockholder); - } if (lkp->lk_lockholder != LK_KERNPROC) { WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line); COUNT(td, -1); } - if (lkp->lk_exclusivecount == 1) { + if (lkp->lk_exclusivecount-- == 1) { lkp->lk_flags &= ~LK_HAVE_EXCL; lkp->lk_lockholder = LK_NOPROC; - lkp->lk_exclusivecount = 0; lock_profile_release_lock(&lkp->lk_object); - } else { - lkp->lk_exclusivecount--; } } else if (lkp->lk_flags & LK_SHARE_NONZERO) { WITNESS_UNLOCK(&lkp->lk_object, 0, file, line); shareunlock(td, lkp, 1); - } else { - printf("lockmgr: thread %p unlocking unheld lock\n", - td); - kdb_backtrace(); } if (lkp->lk_flags & LK_WAIT_NONZERO) @@ -562,6 +559,10 @@ CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", lkp, lkp->lk_wmesg); + KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0, + ("lockmgr still held")); + KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed")); + lkp->lk_flags = LK_DESTROYED; lock_destroy(&lkp->lk_object); } @@ -574,12 +575,10 @@ struct thread *td; td = curthread; - KASSERT(panicstr != NULL || lkp->lk_exclusivecount, - ("%s: %p lockmgr must be exclusively locked", __func__, lkp)); - KASSERT(panicstr != NULL || lkp->lk_lockholder == td || - lkp->lk_lockholder == LK_KERNPROC, - ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp, - td)); + + KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0, + ("%s: %p lockmgr is destroyed", __func__, lkp)); + _lockmgr_assert(lkp, CA_XLOCKED | CA_NOTRECURSED, file, line); /* * Drop the lock reference and switch the owner. This will result @@ -608,6 +607,8 @@ KASSERT(td == curthread, ("%s: thread passed argument (%p) is not valid", __func__, td)); + KASSERT((lkp->lk_flags & LK_DESTROYED) == 0, + ("%s: %p lockmgr is destroyed", __func__, lkp)); if (!kdb_active) { interlocked = 1; @@ -635,6 +636,8 @@ { int count; + KASSERT((lkp->lk_flags & LK_DESTROYED) == 0, + ("%s: %p lockmgr is destroyed", __func__, lkp)); mtx_lock(lkp->lk_interlock); count = lkp->lk_waitcount; mtx_unlock(lkp->lk_interlock); @@ -664,6 +667,93 @@ #endif } +#ifdef INVARIANT_SUPPORT +#ifndef INVARIANTS +#undef _lockmgr_assert +#endif + +void +_lockmgr_assert(struct lock *lkp, int what, const char *file, int line) +{ + struct thread *td; + uint32_t x; + int slocked = 0; + + x = lkp->lk_flags; + td = lkp->lk_lockholder; + if (panicstr != NULL) + return; + switch (what) { + case CA_SLOCKED: + case CA_SLOCKED | CA_NOTRECURSED: + case CA_SLOCKED | CA_RECURSED: + slocked = 1; + case CA_LOCKED: + case CA_LOCKED | CA_NOTRECURSED: + case CA_LOCKED | CA_RECURSED: +#ifdef WITNESS + /* + * We cannot trust WITNESS if the lock is held in + * exclusive mode and a call to lockmgr_disown() happened. + * Workaround this skipping the check if the lock is + * held in exclusive mode even for the CA_LOCKED case. + */ + if (slocked || (x & LK_HAVE_EXCL) == 0) { + witness_assert(&lkp->lk_object, what, file, line); + break; + } +#endif + if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 && + (slocked || LOCKMGR_NOTOWNER(td)))) + panic("Lock %s not %slocked @ %s:%d\n", + lkp->lk_object.lo_name, slocked ? "share " : "", + file, line); + if ((x & LK_SHARE_NONZERO) == 0) { + if (lockmgr_recursed(lkp)) { + if (what & CA_NOTRECURSED) + panic("Lock %s recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } else if (what & CA_RECURSED) + panic("Lock %s not recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } + break; + case CA_XLOCKED: + case CA_XLOCKED | CA_NOTRECURSED: + case CA_XLOCKED | CA_RECURSED: + if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td)) + panic("Lock %s not exclusively locked @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + if (lockmgr_recursed(lkp)) { + if (what & CA_NOTRECURSED) + panic("Lock %s recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } else if (what & CA_RECURSED) + panic("Lock %s not recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + break; + case CA_UNLOCKED: + if (td == curthread || td == LK_KERNPROC) + panic("Lock %s exclusively locked @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + break; + case CA_HELD: + case CA_UNHELD: + if (LOCKMGR_UNHELD(x)) { + if (what & CA_HELD) + panic("Lock %s not locked by anyone @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } else if (what & CA_UNHELD) + panic("Lock %s locked by someone @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + break; + default: + panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what, + file, line); + } +} +#endif /* INVARIANT_SUPPORT */ + #ifdef DDB /* * Check to see if a thread that is blocked on a sleep queue is actually --- //depot/vendor/freebsd/src/sys/sys/lock.h 2007/11/18 14:44:31 +++ //depot/user/attilio/attilio_schedlock/sys/lock.h 2008/02/08 22:38:30 @@ -105,6 +105,7 @@ #define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */ /* Flags passed to witness_assert. */ +#define LA_MASKASSERT 0x000000ff /* Mask for witness defined asserts. */ #define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */ #define LA_LOCKED 0x00000001 /* Lock is at least share locked. */ #define LA_SLOCKED 0x00000002 /* Lock is exactly share locked. */ --- //depot/vendor/freebsd/src/sys/sys/lockmgr.h 2008/02/10 15:51:52 +++ //depot/user/attilio/attilio_schedlock/sys/lockmgr.h 2008/02/10 16:06:37 @@ -53,7 +53,7 @@ struct lock { struct lock_object lk_object; /* common lock properties */ struct mtx *lk_interlock; /* lock on remaining fields */ - u_int lk_flags; /* see below */ + uint32_t lk_flags; /* see below */ int lk_sharecount; /* # of accepted shared locks */ int lk_waitcount; /* # of processes sleeping for lock */ short lk_exclusivecount; /* # of recursive exclusive locks */ @@ -138,12 +138,26 @@ #define LK_WAITDRAIN 0x00080000 /* process waiting for lock to drain */ #define LK_DRAINING 0x00100000 /* lock is being drained */ #define LK_INTERNAL 0x00200000/* The internal lock is already held */ +#define LK_DESTROYED 0x00400000 /* lock is destroyed */ /* * Internal state flags corresponding to lk_sharecount, and lk_waitcount */ #define LK_SHARE_NONZERO 0x01000000 #define LK_WAIT_NONZERO 0x02000000 +/* + * Assertion flags. + */ +#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) +#define CA_LOCKED LA_LOCKED +#define CA_SLOCKED LA_SLOCKED +#define CA_XLOCKED LA_XLOCKED +#define CA_UNLOCKED LA_UNLOCKED +#define CA_RECURSED LA_RECURSED +#define CA_NOTRECURSED LA_NOTRECURSED +#define CA_HELD (0x01 + LA_MASKASSERT) +#define CA_UNHELD (0x02 + LA_MASKASSERT) +#endif /* * Lock return status. @@ -176,6 +190,9 @@ int _lockmgr(struct lock *, u_int flags, struct mtx *, char *file, int line); +#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) +void _lockmgr_assert(struct lock *, int what, const char *, int); +#endif void _lockmgr_disown(struct lock *, const char *, int); void lockmgr_printinfo(struct lock *); int lockstatus(struct lock *, struct thread *); @@ -187,6 +204,12 @@ _lockmgr_disown((lock), LOCK_FILE, LOCK_LINE) #define lockmgr_recursed(lkp) \ ((lkp)->lk_exclusivecount > 1) +#ifdef INVARIANTS +#define lockmgr_assert(lkp, what) \ + _lockmgr_assert((lkp), (what), LOCK_FILE, LOCK_LINE) +#else +#define lockmgr_assert(lkp, what) +#endif #ifdef DDB int lockmgr_chain(struct thread *td, struct thread **ownerp); #endif