--- //depot/vendor/freebsd/src/sys/gnu/fs/xfs/FreeBSD/xfs_buf.c 2008/01/19 17:39:46 +++ //depot/user/attilio/attilio_schedlock/gnu/fs/xfs/FreeBSD/xfs_buf.c 2008/02/13 12:23:07 @@ -86,8 +86,7 @@ bp->b_bufsize = size; bp->b_bcount = size; - KASSERT(BUF_ISLOCKED(bp), - ("xfs_buf_get_empty: bp %p not locked",bp)); + ASSERT_BUFLOCK_HELD(bp); xfs_buf_set_target(bp, target); } @@ -103,8 +102,7 @@ bp = geteblk(len); if (bp != NULL) { - KASSERT(BUF_ISLOCKED(bp), - ("xfs_buf_get_empty: bp %p not locked",bp)); + ASSERT_BUFLOCK_HELD(bp); xfs_buf_set_target(bp, target); } --- //depot/vendor/freebsd/src/sys/kern/kern_lock.c 2008/02/08 21:50:16 +++ //depot/user/attilio/attilio_schedlock/kern/kern_lock.c 2008/02/10 16:06:37 @@ -62,6 +62,8 @@ #define LOCKMGR_TRYOP(x) ((x) & LK_NOWAIT) #define LOCKMGR_TRYW(x) (LOCKMGR_TRYOP((x)) ? LOP_TRYLOCK : 0) +#define LOCKMGR_UNHELD(x) (((x) & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0) +#define LOCKMGR_NOTOWNER(td) ((td) != curthread && (td) != LK_KERNPROC) static void assert_lockmgr(struct lock_object *lock, int what); #ifdef DDB @@ -82,6 +84,10 @@ .lc_unlock = unlock_lockmgr, }; +#ifndef INVARIANTS +#define _lockmgr_assert(lkp, what, file, line) +#endif + /* * Locking primitives implementation. * Locks provide shared/exclusive sychronization. @@ -205,6 +211,15 @@ error = 0; td = curthread; +#ifdef INVARIANTS + if (lkp->lk_flags & LK_DESTROYED) { + if (flags & LK_INTERLOCK) + mtx_unlock(interlkp); + if (panicstr != NULL) + return (0); + panic("%s: %p lockmgr is destroyed", __func__, lkp); + } +#endif if ((flags & LK_INTERNAL) == 0) mtx_lock(lkp->lk_interlock); CTR6(KTR_LOCK, @@ -280,10 +295,7 @@ /* FALLTHROUGH downgrade */ case LK_DOWNGRADE: - KASSERT(lkp->lk_lockholder == td && lkp->lk_exclusivecount != 0, - ("lockmgr: not holding exclusive lock " - "(owner thread (%p) != thread (%p), exlcnt (%d) != 0", - lkp->lk_lockholder, td, lkp->lk_exclusivecount)); + _lockmgr_assert(lkp, CA_XLOCKED, file, line); sharelock(td, lkp, lkp->lk_exclusivecount); WITNESS_DOWNGRADE(&lkp->lk_object, 0, file, line); COUNT(td, -lkp->lk_exclusivecount); @@ -303,10 +315,7 @@ * after the upgrade). If we return an error, the file * will always be unlocked. */ - if (lkp->lk_lockholder == td) - panic("lockmgr: upgrade exclusive lock"); - if (lkp->lk_sharecount <= 0) - panic("lockmgr: upgrade without shared"); + _lockmgr_assert(lkp, CA_SLOCKED, file, line); shareunlock(td, lkp, 1); if (lkp->lk_sharecount == 0) lock_profile_release_lock(&lkp->lk_object); @@ -419,33 +428,21 @@ break; case LK_RELEASE: + _lockmgr_assert(lkp, CA_LOCKED, file, line); if (lkp->lk_exclusivecount != 0) { - if (lkp->lk_lockholder != td && - lkp->lk_lockholder != LK_KERNPROC) { - panic("lockmgr: thread %p, not %s %p unlocking", - td, "exclusive lock holder", - lkp->lk_lockholder); - } if (lkp->lk_lockholder != LK_KERNPROC) { WITNESS_UNLOCK(&lkp->lk_object, LOP_EXCLUSIVE, file, line); COUNT(td, -1); } - if (lkp->lk_exclusivecount == 1) { + if (lkp->lk_exclusivecount-- == 1) { lkp->lk_flags &= ~LK_HAVE_EXCL; lkp->lk_lockholder = LK_NOPROC; - lkp->lk_exclusivecount = 0; lock_profile_release_lock(&lkp->lk_object); - } else { - lkp->lk_exclusivecount--; } } else if (lkp->lk_flags & LK_SHARE_NONZERO) { WITNESS_UNLOCK(&lkp->lk_object, 0, file, line); shareunlock(td, lkp, 1); - } else { - printf("lockmgr: thread %p unlocking unheld lock\n", - td); - kdb_backtrace(); } if (lkp->lk_flags & LK_WAIT_NONZERO) @@ -562,6 +559,10 @@ CTR2(KTR_LOCK, "lockdestroy(): lkp == %p (lk_wmesg == \"%s\")", lkp, lkp->lk_wmesg); + KASSERT((lkp->lk_flags & (LK_HAVE_EXCL | LK_SHARE_NONZERO)) == 0, + ("lockmgr still held")); + KASSERT(lkp->lk_exclusivecount == 0, ("lockmgr still recursed")); + lkp->lk_flags = LK_DESTROYED; lock_destroy(&lkp->lk_object); } @@ -574,12 +575,10 @@ struct thread *td; td = curthread; - KASSERT(panicstr != NULL || lkp->lk_exclusivecount, - ("%s: %p lockmgr must be exclusively locked", __func__, lkp)); - KASSERT(panicstr != NULL || lkp->lk_lockholder == td || - lkp->lk_lockholder == LK_KERNPROC, - ("%s: %p lockmgr must be locked by curthread (%p)", __func__, lkp, - td)); + + KASSERT(panicstr != NULL || (lkp->lk_flags & LK_DESTROYED) == 0, + ("%s: %p lockmgr is destroyed", __func__, lkp)); + _lockmgr_assert(lkp, CA_XLOCKED | CA_NOTRECURSED, file, line); /* * Drop the lock reference and switch the owner. This will result @@ -608,6 +607,8 @@ KASSERT(td == curthread, ("%s: thread passed argument (%p) is not valid", __func__, td)); + KASSERT((lkp->lk_flags & LK_DESTROYED) == 0, + ("%s: %p lockmgr is destroyed", __func__, lkp)); if (!kdb_active) { interlocked = 1; @@ -635,6 +636,8 @@ { int count; + KASSERT((lkp->lk_flags & LK_DESTROYED) == 0, + ("%s: %p lockmgr is destroyed", __func__, lkp)); mtx_lock(lkp->lk_interlock); count = lkp->lk_waitcount; mtx_unlock(lkp->lk_interlock); @@ -664,6 +667,93 @@ #endif } +#ifdef INVARIANT_SUPPORT +#ifndef INVARIANTS +#undef _lockmgr_assert +#endif + +void +_lockmgr_assert(struct lock *lkp, int what, const char *file, int line) +{ + struct thread *td; + uint32_t x; + int slocked = 0; + + x = lkp->lk_flags; + td = lkp->lk_lockholder; + if (panicstr != NULL) + return; + switch (what) { + case CA_SLOCKED: + case CA_SLOCKED | CA_NOTRECURSED: + case CA_SLOCKED | CA_RECURSED: + slocked = 1; + case CA_LOCKED: + case CA_LOCKED | CA_NOTRECURSED: + case CA_LOCKED | CA_RECURSED: +#ifdef WITNESS + /* + * We cannot trust WITNESS if the lock is held in + * exclusive mode and a call to lockmgr_disown() happened. + * Workaround this skipping the check if the lock is + * held in exclusive mode even for the CA_LOCKED case. + */ + if (slocked || (x & LK_HAVE_EXCL) == 0) { + witness_assert(&lkp->lk_object, what, file, line); + break; + } +#endif + if (LOCKMGR_UNHELD(x) || ((x & LK_SHARE_NONZERO) == 0 && + (slocked || LOCKMGR_NOTOWNER(td)))) + panic("Lock %s not %slocked @ %s:%d\n", + lkp->lk_object.lo_name, slocked ? "share " : "", + file, line); + if ((x & LK_SHARE_NONZERO) == 0) { + if (lockmgr_recursed(lkp)) { + if (what & CA_NOTRECURSED) + panic("Lock %s recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } else if (what & CA_RECURSED) + panic("Lock %s not recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } + break; + case CA_XLOCKED: + case CA_XLOCKED | CA_NOTRECURSED: + case CA_XLOCKED | CA_RECURSED: + if ((x & LK_HAVE_EXCL) == 0 || LOCKMGR_NOTOWNER(td)) + panic("Lock %s not exclusively locked @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + if (lockmgr_recursed(lkp)) { + if (what & CA_NOTRECURSED) + panic("Lock %s recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } else if (what & CA_RECURSED) + panic("Lock %s not recursed @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + break; + case CA_UNLOCKED: + if (td == curthread || td == LK_KERNPROC) + panic("Lock %s exclusively locked @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + break; + case CA_HELD: + case CA_UNHELD: + if (LOCKMGR_UNHELD(x)) { + if (what & CA_HELD) + panic("Lock %s not locked by anyone @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + } else if (what & CA_UNHELD) + panic("Lock %s locked by someone @ %s:%d\n", + lkp->lk_object.lo_name, file, line); + break; + default: + panic("Unknown lockmgr lock assertion: 0x%x @ %s:%d", what, + file, line); + } +} +#endif /* INVARIANT_SUPPORT */ + #ifdef DDB /* * Check to see if a thread that is blocked on a sleep queue is actually --- //depot/vendor/freebsd/src/sys/kern/vfs_bio.c 2008/01/19 17:39:46 +++ //depot/user/attilio/attilio_schedlock/kern/vfs_bio.c 2008/02/13 12:23:07 @@ -658,11 +658,11 @@ { CTR3(KTR_BUF, "bremfree(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); - KASSERT(BUF_ISLOCKED(bp), ("bremfree: buf must be locked.")); KASSERT((bp->b_flags & B_REMFREE) == 0, ("bremfree: buffer %p already marked for delayed removal.", bp)); KASSERT(bp->b_qindex != QUEUE_NONE, ("bremfree: buffer %p not on a queue.", bp)); + ASSERT_BUFLOCK_HELD(bp); bp->b_flags |= B_REMFREE; /* Fixup numfreebuffers count. */ @@ -695,9 +695,9 @@ { CTR3(KTR_BUF, "bremfreel(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); - KASSERT(BUF_ISLOCKED(bp), ("bremfreel: buffer %p not locked.", bp)); KASSERT(bp->b_qindex != QUEUE_NONE, ("bremfreel: buffer %p not on a queue.", bp)); + ASSERT_BUFLOCK_HELD(bp); mtx_assert(&bqlock, MA_OWNED); TAILQ_REMOVE(&bufqueues[bp->b_qindex], bp, b_freelist); @@ -834,8 +834,7 @@ oldflags = bp->b_flags; - if (!BUF_ISLOCKED(bp)) - panic("bufwrite: buffer is not busy???"); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_pin_count > 0) bunpin_wait(bp); @@ -952,7 +951,7 @@ CTR3(KTR_BUF, "bdwrite(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); - KASSERT(BUF_ISLOCKED(bp), ("bdwrite: buffer is not busy")); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_flags & B_INVAL) { brelse(bp); @@ -1047,10 +1046,10 @@ CTR3(KTR_BUF, "bdirty(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); - KASSERT(BUF_ISLOCKED(bp), ("bdirty: bp %p not locked",bp)); KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, ("bdirty: buffer %p still on queue %d", bp, bp->b_qindex)); + ASSERT_BUFLOCK_HELD(bp); bp->b_flags &= ~(B_RELBUF); bp->b_iocmd = BIO_WRITE; @@ -1081,7 +1080,7 @@ KASSERT(bp->b_bufobj != NULL, ("No b_bufobj %p", bp)); KASSERT(bp->b_flags & B_REMFREE || bp->b_qindex == QUEUE_NONE, ("bundirty: buffer %p still on queue %d", bp, bp->b_qindex)); - KASSERT(BUF_ISLOCKED(bp), ("bundirty: bp %p not locked",bp)); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_flags & B_DELWRI) { bp->b_flags &= ~B_DELWRI; @@ -2660,7 +2659,7 @@ bp->b_flags &= ~B_DONE; } CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); - KASSERT(BUF_ISLOCKED(bp), ("getblk: bp %p not locked",bp)); + ASSERT_BUFLOCK_HELD(bp); KASSERT(bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); return (bp); @@ -2681,7 +2680,7 @@ continue; allocbuf(bp, size); bp->b_flags |= B_INVAL; /* b_dep cleared by getnewbuf() */ - KASSERT(BUF_ISLOCKED(bp), ("geteblk: bp %p not locked",bp)); + ASSERT_BUFLOCK_HELD(bp); return (bp); } @@ -2707,8 +2706,7 @@ int newbsize, mbsize; int i; - if (!BUF_ISLOCKED(bp)) - panic("allocbuf: buffer not busy"); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_kvasize < size) panic("allocbuf: buffer too small"); @@ -3150,8 +3148,8 @@ CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); dropobj = NULL; - KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp)); KASSERT(!(bp->b_flags & B_DONE), ("biodone: bp %p already done", bp)); + ASSERT_BUFLOCK_HELD(bp); runningbufwakeup(bp); if (bp->b_iocmd == BIO_WRITE) @@ -3175,7 +3173,7 @@ void bufdone_finish(struct buf *bp) { - KASSERT(BUF_ISLOCKED(bp), ("biodone: bp %p not busy", bp)); + ASSERT_BUFLOCK_HELD(bp); if (!LIST_EMPTY(&bp->b_dep)) buf_complete(bp); --- //depot/vendor/freebsd/src/sys/nfs4client/nfs4_vnops.c 2008/01/19 17:39:46 +++ //depot/user/attilio/attilio_schedlock/nfs4client/nfs4_vnops.c 2008/02/13 12:23:07 @@ -2448,7 +2448,7 @@ KASSERT(!(bp->b_flags & B_DONE), ("nfs4_strategy: buffer %p unexpectedly marked B_DONE", bp)); - KASSERT(BUF_ISLOCKED(bp), ("nfs4_strategy: buffer %p not locked", bp)); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_iocmd == BIO_READ) cr = bp->b_rcred; @@ -2808,8 +2808,7 @@ off_t off; #endif - if (!BUF_ISLOCKED(bp)) - panic("bwrite: buffer is not locked???"); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_flags & B_INVAL) { brelse(bp); --- //depot/vendor/freebsd/src/sys/nfsclient/nfs_vnops.c 2008/01/19 17:39:46 +++ //depot/user/attilio/attilio_schedlock/nfsclient/nfs_vnops.c 2008/02/13 12:23:07 @@ -2694,7 +2694,7 @@ KASSERT(!(bp->b_flags & B_DONE), ("nfs_strategy: buffer %p unexpectedly marked B_DONE", bp)); - KASSERT(BUF_ISLOCKED(bp), ("nfs_strategy: buffer %p not locked", bp)); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_iocmd == BIO_READ) cr = bp->b_rcred; @@ -3088,8 +3088,7 @@ off_t off; #endif - if (!BUF_ISLOCKED(bp)) - panic("bwrite: buffer is not locked???"); + ASSERT_BUFLOCK_HELD(bp); if (bp->b_flags & B_INVAL) { brelse(bp); --- //depot/vendor/freebsd/src/sys/sys/buf.h 2008/01/24 12:35:50 +++ //depot/user/attilio/attilio_schedlock/sys/buf.h 2008/02/13 12:23:07 @@ -333,12 +333,33 @@ /* * Free a buffer lock. */ -#define BUF_LOCKFREE(bp) \ -do { \ - if (BUF_ISLOCKED(bp)) \ - panic("free locked buf"); \ - lockdestroy(&(bp)->b_lock); \ -} while (0) +#define BUF_LOCKFREE(bp) \ + (lockdestroy(&(bp)->b_lock)) + +/* + * Buffer lock assertions. + */ +#if defined(INVARIANTS) && defined(INVARIANT_SUPPORT) +#define ASSERT_BUFLOCK_LOCKED(bp) \ + _lockmgr_assert(&(bp)->b_lock, CA_LOCKED, LOCK_FILE, LOCK_LINE) +#define ASSERT_BUFLOCK_SLOCKED(bp) \ + _lockmgr_assert(&(bp)->b_lock, CA_SLOCKED, LOCK_FILE, LOCK_LINE) +#define ASSERT_BUFLOCK_XLOCKED(bp) \ + _lockmgr_assert(&(bp)->b_lock, CA_XLOCKED, LOCK_FILE, LOCK_LINE) +#define ASSERT_BUFLOCK_UNLOCKED(bp) \ + _lockmgr_assert(&(bp)->b_lock, CA_UNLOCKED, LOCK_FILE, LOCK_LINE) +#define ASSERT_BUFLOCK_HELD(bp) \ + _lockmgr_assert(&(bp)->b_lock, CA_HELD, LOCK_FILE, LOCK_LINE) +#define ASSERT_BUFLOCK_UNHELD(bp) \ + _lockmgr_assert(&(bp)->b_lock, CA_UNHELD, LOCK_FILE, LOCK_LINE) +#else +#define ASSERT_BUFLOCK_LOCKED(bp) +#define ASSERT_BUFLOCK_SLOCKED(bp) +#define ASSERT_BUFLOCK_XLOCKED(bp) +#define ASSERT_BUFLOCK_UNLOCKED(bp) +#define ASSERT_BUFLOCK_HELD(bp) +#define ASSERT_BUFLOCK_UNHELD(bp) +#endif #ifdef _SYS_PROC_H_ /* Avoid #include pollution */ /* --- //depot/vendor/freebsd/src/sys/sys/lock.h 2007/11/18 14:44:31 +++ //depot/user/attilio/attilio_schedlock/sys/lock.h 2008/02/08 22:38:30 @@ -105,6 +105,7 @@ #define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */ /* Flags passed to witness_assert. */ +#define LA_MASKASSERT 0x000000ff /* Mask for witness defined asserts. */ #define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */ #define LA_LOCKED 0x00000001 /* Lock is at least share locked. */ #define LA_SLOCKED 0x00000002 /* Lock is exactly share locked. */ --- //depot/vendor/freebsd/src/sys/sys/lockmgr.h 2008/02/10 15:51:52 +++ //depot/user/attilio/attilio_schedlock/sys/lockmgr.h 2008/02/13 12:23:07 @@ -53,7 +53,7 @@ struct lock { struct lock_object lk_object; /* common lock properties */ struct mtx *lk_interlock; /* lock on remaining fields */ - u_int lk_flags; /* see below */ + uint32_t lk_flags; /* see below */ int lk_sharecount; /* # of accepted shared locks */ int lk_waitcount; /* # of processes sleeping for lock */ short lk_exclusivecount; /* # of recursive exclusive locks */ @@ -138,12 +138,27 @@ #define LK_WAITDRAIN 0x00080000 /* process waiting for lock to drain */ #define LK_DRAINING 0x00100000 /* lock is being drained */ #define LK_INTERNAL 0x00200000/* The internal lock is already held */ +#define LK_DESTROYED 0x00400000 /* lock is destroyed */ /* * Internal state flags corresponding to lk_sharecount, and lk_waitcount */ #define LK_SHARE_NONZERO 0x01000000 #define LK_WAIT_NONZERO 0x02000000 +/* + * Assertion flags. + */ +#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) +#define CA_BASE (LA_MASKASSERT + 1) +#define CA_LOCKED LA_LOCKED +#define CA_SLOCKED LA_SLOCKED +#define CA_XLOCKED LA_XLOCKED +#define CA_UNLOCKED LA_UNLOCKED +#define CA_RECURSED LA_RECURSED +#define CA_NOTRECURSED LA_NOTRECURSED +#define CA_HELD (CA_BASE << 0x00) +#define CA_UNHELD (CA_BASE << 0x01) +#endif /* * Lock return status. @@ -176,6 +191,9 @@ int _lockmgr(struct lock *, u_int flags, struct mtx *, char *file, int line); +#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT) +void _lockmgr_assert(struct lock *, int what, const char *, int); +#endif void _lockmgr_disown(struct lock *, const char *, int); void lockmgr_printinfo(struct lock *); int lockstatus(struct lock *, struct thread *); @@ -187,6 +205,12 @@ _lockmgr_disown((lock), LOCK_FILE, LOCK_LINE) #define lockmgr_recursed(lkp) \ ((lkp)->lk_exclusivecount > 1) +#ifdef INVARIANTS +#define lockmgr_assert(lkp, what) \ + _lockmgr_assert((lkp), (what), LOCK_FILE, LOCK_LINE) +#else +#define lockmgr_assert(lkp, what) +#endif #ifdef DDB int lockmgr_chain(struct thread *td, struct thread **ownerp); #endif