Index: sys/kern/kern_sx.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_sx.c,v retrieving revision 1.1 diff -u -r1.1 kern_sx.c --- sys/kern/kern_sx.c 2001/03/05 19:59:40 1.1 +++ sys/kern/kern_sx.c 2001/03/06 06:09:45 @@ -34,12 +34,12 @@ * Priority propagation will not generally raise the priority of lock holders, * so should not be relied upon in combination with sx locks. * - * The witness code can not detect lock cycles. + * The witness code can not detect lock cycles (yet). * + * XXX: When witness is made to function with sx locks, it will need to + * XXX: be taught to deal with these situations, as they are more involved: * slock --> xlock (deadlock) * slock --> slock (slock recursion, not fatal) - * xlock --> xlock (deadlock) - * xlock --> slock (deadlock) */ #include @@ -58,7 +58,9 @@ cv_init(&sx->sx_shrd_cv, description); sx->sx_shrd_wcnt = 0; cv_init(&sx->sx_excl_cv, description); + sx->sx_descr = description; sx->sx_excl_wcnt = 0; + sx->sx_xholder = NULL; } void @@ -66,7 +68,7 @@ { KASSERT((sx->sx_cnt == 0 && sx->sx_shrd_wcnt == 0 && sx->sx_excl_wcnt == - 0), ("%s: holders or waiters\n", __FUNCTION__)); + 0), ("%s (%s): holders or waiters\n", __FUNCTION__, sx->sx_descr)); mtx_destroy(&sx->sx_lock); cv_destroy(&sx->sx_shrd_cv); @@ -78,6 +80,9 @@ { mtx_lock(&sx->sx_lock); + KASSERT(sx->sx_xholder != curproc, + ("%s (%s): trying to get slock while xlock is held\n", __FUNCTION__, + sx->sx_descr)); /* * Loop in case we lose the race for lock acquisition. @@ -100,6 +105,16 @@ mtx_lock(&sx->sx_lock); + /* + * With sx locks, we're absolutely not permitted to recurse on + * xlocks, as it is fatal (deadlock). Normally, recursion is handled + * by WITNESS, but as it is not semantically correct to hold the + * xlock while in here, we consider it API abuse and put it under + * INVARIANTS. + */ + KASSERT(sx->sx_xholder != curproc, + ("%s (%s): xlock already held", __FUNCTION__, sx->sx_descr)); + /* Loop in case we lose the race for lock acquisition. */ while (sx->sx_cnt != 0) { sx->sx_excl_wcnt++; @@ -107,8 +122,11 @@ sx->sx_excl_wcnt--; } + MPASS(sx->sx_cnt == 0); + /* Acquire an exclusive lock. */ sx->sx_cnt--; + sx->sx_xholder = curproc; mtx_unlock(&sx->sx_lock); } @@ -118,7 +136,7 @@ { mtx_lock(&sx->sx_lock); - KASSERT((sx->sx_cnt > 0), ("%s: lacking slock\n", __FUNCTION__)); + SX_ASSERT_SLOCKED(sx); /* Release. */ sx->sx_cnt--; @@ -143,10 +161,12 @@ { mtx_lock(&sx->sx_lock); - KASSERT((sx->sx_cnt == -1), ("%s: lacking xlock\n", __FUNCTION__)); + SX_ASSERT_XLOCKED(sx); + MPASS(sx->sx_cnt == -1); /* Release. */ sx->sx_cnt++; + sx->sx_xholder = NULL; /* * Wake up waiters if there are any. Give precedence to slock waiters. Index: sys/sys/sx.h =================================================================== RCS file: /home/ncvs/src/sys/sys/sx.h,v retrieving revision 1.1 diff -u -r1.1 sx.h --- sys/sys/sx.h 2001/03/05 19:59:40 1.1 +++ sys/sys/sx.h 2001/03/06 06:09:45 @@ -35,12 +35,14 @@ #include struct sx { - struct mtx sx_lock; /* General protection lock and xlock. */ + struct mtx sx_lock; /* General protection lock. */ + const char *sx_descr; /* sx lock description. */ int sx_cnt; /* -1: xlock, > 0: slock count. */ struct cv sx_shrd_cv; /* slock waiters. */ int sx_shrd_wcnt; /* Number of slock waiters. */ struct cv sx_excl_cv; /* xlock waiters. */ int sx_excl_wcnt; /* Number of xlock waiters. */ + struct proc *sx_xholder; /* Thread presently holding xlock. */ }; #ifdef _KERNEL @@ -58,20 +60,22 @@ */ #define SX_ASSERT_SLOCKED(sx) do { \ mtx_lock(&(sx)->sx_lock); \ - KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock\n", \ - __FUNCTION__)); \ + KASSERT(((sx)->sx_cnt > 0), ("%s: lacking slock %s\n", \ + __FUNCTION__, (sx)->sx_descr)); \ mtx_unlock(&(sx)->sx_lock); \ } while (0) + /* - * SX_ASSERT_XLOCKED() can only detect that at least *some* thread owns an - * xlock, but it cannot guarantee that *this* thread owns an xlock. + * SX_ASSERT_XLOCKED() detects and guarantees that *we* own the xlock. */ #define SX_ASSERT_XLOCKED(sx) do { \ mtx_lock(&(sx)->sx_lock); \ - KASSERT(((sx)->sx_cnt == -1), ("%s: lacking xlock\n", \ - __FUNCTION__)); \ + KASSERT(((sx)->sx_xholder == curproc), \ + ("%s: thread %p lacking xlock %s\n", __FUNCTION__, \ + (sx)->sx_descr, curproc)); \ mtx_unlock(&(sx)->sx_lock); \ } while (0) + #else /* INVARIANTS */ #define SX_ASSERT_SLOCKED(sx) #define SX_ASSERT_XLOCKER(sx)