Only in /cvs/sys_old/compile/EROSTRATUS: simplelock.o diff -ru /cvs/sys_old/i386/include/mutex.h /usr/src/sys/i386/include/mutex.h --- /cvs/sys_old/i386/include/mutex.h Wed Jan 24 21:17:14 2001 +++ /usr/src/sys/i386/include/mutex.h Thu Jan 25 01:25:06 2001 @@ -267,7 +267,7 @@ pushl $0 ; /* dummy __FILE__ */ \ pushl $type ; \ pushl $lck ; \ - call _mtx_enter ; \ + call _mtx_lock ; \ addl $16,%esp #define MTX_EXIT(lck, type) \ @@ -275,7 +275,7 @@ pushl $0 ; /* dummy __FILE__ */ \ pushl $type ; \ pushl $lck ; \ - call _mtx_exit ; \ + call _mtx_unlock ; \ addl $16,%esp #endif /* !LOCORE */ diff -ru /cvs/sys_old/kern/kern_mutex.c /usr/src/sys/kern/kern_mutex.c --- /cvs/sys_old/kern/kern_mutex.c Wed Jan 24 21:17:22 2001 +++ /usr/src/sys/kern/kern_mutex.c Thu Jan 25 01:29:56 2001 @@ -54,8 +54,7 @@ #include "opt_witness.h" /* - * Cause non-inlined mtx_*() to be compiled. - * Must be defined early because other system headers may include mutex.h. + * (XXX XXX XXX) yucky yucky; this is only for the STR stuff and should go away. */ #define _KERN_MUTEX_C_ @@ -100,16 +99,8 @@ #endif /* WITNESS */ /* - * Assembly macros - *------------------------------------------------------------------------------ - */ - -#define _V(x) __STRING(x) - -/* * Default, unoptimized mutex micro-operations */ - #ifndef _obtain_lock /* Actually obtain mtx_lock */ #define _obtain_lock(mp, tid) \ @@ -128,93 +119,28 @@ atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) #endif -#ifndef _getlock_sleep -/* Get a sleep lock, deal with recursion inline. */ -#define _getlock_sleep(mp, tid, type) do { \ - if (!_obtain_lock(mp, tid)) { \ - if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\ - mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \ - else { \ - atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \ - (mp)->mtx_recurse++; \ - } \ - } \ -} while (0) -#endif - -#ifndef _getlock_spin_block -/* Get a spin lock, handle recursion inline (as the less common case) */ -#define _getlock_spin_block(mp, tid, type) do { \ - u_int _mtx_intr = save_intr(); \ - disable_intr(); \ - if (!_obtain_lock(mp, tid)) \ - mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \ - else \ - (mp)->mtx_saveintr = _mtx_intr; \ -} while (0) -#endif - -#ifndef _getlock_norecurse /* - * Get a lock without any recursion handling. Calls the hard enter function if - * we can't get it inline. + * Internal "lock utility" macros. */ -#define _getlock_norecurse(mp, tid, type) do { \ - if (!_obtain_lock(mp, tid)) \ - mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \ -} while (0) -#endif +#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) -#ifndef _exitlock_norecurse -/* - * Release a sleep lock assuming we haven't recursed on it, recursion is handled - * in the hard function. - */ -#define _exitlock_norecurse(mp, tid, type) do { \ - if (!_release_lock(mp, tid)) \ - mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \ -} while (0) -#endif +#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ + : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK)) -#ifndef _exitlock -/* - * Release a sleep lock when its likely we recursed (the code to - * deal with simple recursion is inline). - */ -#define _exitlock(mp, tid, type) do { \ - if (!_release_lock(mp, tid)) { \ - if ((mp)->mtx_lock & MTX_RECURSED) { \ - if (--((mp)->mtx_recurse) == 0) \ - atomic_clear_ptr(&(mp)->mtx_lock, \ - MTX_RECURSED); \ - } else { \ - mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \ - } \ - } \ -} while (0) -#endif - -#ifndef _exitlock_spin -/* Release a spin lock (with possible recursion). */ -#define _exitlock_spin(mp) do { \ - if (!mtx_recursed((mp))) { \ - int _mtx_intr = (mp)->mtx_saveintr; \ - \ - _release_lock_quick(mp); \ - restore_intr(_mtx_intr); \ - } else { \ - (mp)->mtx_recurse--; \ - } \ -} while (0) -#endif +#define RETIP(x) *(((uintptr_t *)(&x)) - 1) +#define SET_PRIO(p, pri) (p)->p_priority = (pri) #ifdef WITNESS +/* + * Prototypes for non-exported witness routines. + */ static void witness_init(struct mtx *, int flag); static void witness_destroy(struct mtx *); static void witness_display(void(*)(const char *fmt, ...)); /* All mutexes in system (used for debug/panic) */ static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 }; + /* * Set to 0 once mutexes have been fully initialized so that witness code can be * safely executed. @@ -223,15 +149,17 @@ #else /* WITNESS */ /* - * flag++ is slezoid way of shutting up unused parameter warning - * in mtx_init() + * Note: flag += 0 is a sleazoid way of shutting up a compiler warning while + * at the same time not destroying `flag' (just in case)... */ -#define witness_init(m, flag) flag++ +#define witness_init(m, flag) (flag) += 0 #define witness_destroy(m) #define witness_try_enter(m, t, f, l) #endif /* WITNESS */ -/* All mutexes in system (used for debug/panic) */ +/* + * All mutex locks in system are kept on the all_mtx list. + */ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head", TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked), { NULL, NULL }, &all_mtx, &all_mtx, @@ -242,19 +170,17 @@ #endif }; +/* + * (XXX XXX XXX); revisit these; do we really need the globals? + */ static int mtx_cur_cnt; static int mtx_max_cnt; +/* + * Prototypes for internal non-exported routines (debugging functions, such + * as WITNESS and company, are prototyped elsewhere). + */ static void propagate_priority(struct proc *); -static void mtx_enter_hard(struct mtx *, int type, int saveintr); -static void mtx_exit_hard(struct mtx *, int type); - -#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) -#define mtx_owner(m) (mtx_unowned(m) ? NULL \ - : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK)) - -#define RETIP(x) *(((uintptr_t *)(&x)) - 1) -#define SET_PRIO(p, pri) (p)->p_priority = (pri) static void propagate_priority(struct proc *p) @@ -349,7 +275,7 @@ p1 = TAILQ_PREV(p, rq, p_procq); if (p1->p_priority <= pri) { printf( - "XXX: previous process %d(%s) has higher priority\n", + "XXX: previous process %d(%s) has higher priority\n", p->p_pid, p->p_comm); continue; } @@ -376,161 +302,62 @@ } /* - * Get lock 'm', the macro handles the easy (and most common cases) and leaves - * the slow stuff to the mtx_enter_hard() function. - * - * Note: since type is usually a constant much of this code is optimized out. - */ -void -_mtx_enter(struct mtx *mtxp, int type, const char *file, int line) -{ - struct mtx *mpp = mtxp; - - /* bits only valid on mtx_exit() */ - MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0, - STR_mtx_bad_type, file, line); - - if ((type) & MTX_SPIN) { - /* - * Easy cases of spin locks: - * - * 1) We already own the lock and will simply recurse on it (if - * RLIKELY) - * - * 2) The lock is free, we just get it - */ - if ((type) & MTX_RLIKELY) { - /* - * Check for recursion, if we already have this - * lock we just bump the recursion count. - */ - if (mpp->mtx_lock == (uintptr_t)CURTHD) { - mpp->mtx_recurse++; - goto done; - } - } - - if (((type) & MTX_TOPHALF) == 0) { - /* - * If an interrupt thread uses this we must block - * interrupts here. - */ - if ((type) & MTX_FIRST) { - ASS_IEN; - disable_intr(); - _getlock_norecurse(mpp, CURTHD, - (type) & MTX_HARDOPTS); - } else { - _getlock_spin_block(mpp, CURTHD, - (type) & MTX_HARDOPTS); - } - } else - _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS); - } else { - /* Sleep locks */ - if ((type) & MTX_RLIKELY) - _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS); - else - _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS); - } -done: - WITNESS_ENTER(mpp, type, file, line); - if (((type) & MTX_QUIET) == 0) - CTR5(KTR_LOCK, STR_mtx_enter_fmt, - mpp->mtx_description, mpp, mpp->mtx_recurse, file, line); - -} - -/* - * Attempt to get MTX_DEF lock, return non-zero if lock acquired. - * - * XXX DOES NOT HANDLE RECURSION + * The important part of mtx_try_lock{_opts}() + * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that + * if we're called, it's because we know we don't already own this lock. */ int -_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line) +_mtx_try_lock(struct mtx *m, int opts, const char *file, int line) { - struct mtx *const mpp = mtxp; int rval; - rval = _obtain_lock(mpp, CURTHD); + MPASS((opts & MTX_NOSWITCH) == 0); + rval = _obtain_lock(m, CURTHD); + #ifdef WITNESS - if (rval && mpp->mtx_witness != NULL) { - MPASS(mpp->mtx_recurse == 0); - witness_try_enter(mpp, type, file, line); + if (rval && m->mtx_witness != NULL) { + MPASS(m->mtx_recurse == 0); + witness_try_enter(m, opts, file, line); } #endif /* WITNESS */ - if (((type) & MTX_QUIET) == 0) + + if ((opts & MTX_QUIET) == 0) CTR5(KTR_LOCK, STR_mtx_try_enter_fmt, - mpp->mtx_description, mpp, rval, file, line); + m->mtx_description, m, rval, file, line); return rval; } /* - * Release lock m. + * The important part of mtx_lock(). Here we actually acquire the lock `m' + * regardless of whether it is a spin or sleep lock. We take care to handle + * recursion. */ void -_mtx_exit(struct mtx *mtxp, int type, const char *file, int line) -{ - struct mtx *const mpp = mtxp; - - MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line); - WITNESS_EXIT(mpp, type, file, line); - if (((type) & MTX_QUIET) == 0) - CTR5(KTR_LOCK, STR_mtx_exit_fmt, - mpp->mtx_description, mpp, mpp->mtx_recurse, file, line); - if ((type) & MTX_SPIN) { - if ((type) & MTX_NORECURSE) { - int mtx_intr = mpp->mtx_saveintr; - - MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse, - file, line); - _release_lock_quick(mpp); - if (((type) & MTX_TOPHALF) == 0) { - if ((type) & MTX_FIRST) { - ASS_IDIS; - enable_intr(); - } else - restore_intr(mtx_intr); - } - } else { - if (((type & MTX_TOPHALF) == 0) && - (type & MTX_FIRST)) { - ASS_IDIS; - ASS_SIEN(mpp); - } - _exitlock_spin(mpp); - } - } else { - /* Handle sleep locks */ - if ((type) & MTX_RLIKELY) - _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS); - else { - _exitlock_norecurse(mpp, CURTHD, - (type) & MTX_HARDOPTS); - } - } -} - -void -mtx_enter_hard(struct mtx *m, int type, int saveintr) +_mtx_lock(struct mtx *m, int opts, const char *file, int line) { struct proc *p = CURPROC; KASSERT(p != NULL, ("curproc is NULL in mutex")); - switch (type) { + switch (m->mtx_flags & MTX_TYPES) { + case MTX_DEF: + { + if (_obtain_lock(m, p)) + goto got_lock; + if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) { m->mtx_recurse++; atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_enter: %p recurse", m); - return; + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "mtx_lock: %p recurse", m); + goto got_lock; } - if ((type & MTX_QUIET) == 0) + + if ((opts & MTX_QUIET) == 0) CTR3(KTR_LOCK, - "mtx_enter: %p contested (lock=%p) [%p]", + "mtx_lock: %p contested (lock=%p) [%p]", m, (void *)m->mtx_lock, (void *)RETIP(m)); /* @@ -548,30 +375,34 @@ uintptr_t v; struct proc *p1; - mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY); + mtx_lock(&sched_lock); /* * check if the lock has been released while * waiting for the schedlock. */ if ((v = m->mtx_lock) == MTX_UNOWNED) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock(&sched_lock); continue; } + /* * The mutex was marked contested on release. This * means that there are processes blocked on it. */ if (v == MTX_CONTESTED) { p1 = TAILQ_FIRST(&m->mtx_blocked); - KASSERT(p1 != NULL, ("contested mutex has no contesters")); - KASSERT(p != NULL, ("curproc is NULL for contested mutex")); + KASSERT(p1 != NULL, + ("contested mutex has no contesters")); + KASSERT(p != NULL, + ("curproc is NULL for contested mutex")); m->mtx_lock = (uintptr_t)p | MTX_CONTESTED; if (p1->p_priority < p->p_priority) { SET_PRIO(p, p1->p_priority); } - mtx_exit(&sched_lock, MTX_SPIN); - return; + mtx_unlock(&sched_lock); + goto got_lock; } + /* * If the mutex isn't already contested and * a failure occurs setting the contested bit the @@ -581,7 +412,7 @@ if ((v & MTX_CONTESTED) == 0 && !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, (void *)(v | MTX_CONTESTED))) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock(&sched_lock); continue; } @@ -597,9 +428,9 @@ ithd_t *it = (ithd_t *)p; if (it->it_interrupted) { - if ((type & MTX_QUIET) == 0) + if ((opts & MTX_QUIET) == 0) CTR2(KTR_LOCK, - "mtx_enter: 0x%x interrupted 0x%x", + "mtx_unlock: 0x%x interrupted 0x%x", it, it->it_interrupted); intr_thd_fixup(it); } @@ -624,39 +455,53 @@ p_procq); } - p->p_blocked = m; /* Who we're blocked on */ + /* Save who we're blocked on. */ + p->p_blocked = m; p->p_mtxname = m->mtx_description; p->p_stat = SMTX; #if 0 propagate_priority(p); #endif - if ((type & MTX_QUIET) == 0) + if ((opts & MTX_QUIET) == 0) CTR3(KTR_LOCK, - "mtx_enter: p %p blocked on [%p] %s", + "mtx_unlock: p %p blocked on [%p] %s", p, m, m->mtx_description); + mi_switch(); - if ((type & MTX_QUIET) == 0) + + if ((opts & MTX_QUIET) == 0) CTR3(KTR_LOCK, - "mtx_enter: p %p free from blocked on [%p] %s", + "mtx_unlock: p %p free from blocked on [%p] %s", p, m, m->mtx_description); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock(&sched_lock); } - return; + + break; + } + case MTX_SPIN: - case MTX_SPIN | MTX_FIRST: - case MTX_SPIN | MTX_TOPHALF: { int i = 0; + u_int mtx_intr = save_intr(); + + disable_intr(); + if (_obtain_lock(m, p)) { + m->mtx_saveintr = mtx_intr; + goto got_lock; + } if (m->mtx_lock == (uintptr_t)p) { m->mtx_recurse++; - return; + goto got_lock; } - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_enter: %p spinning", m); + + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "mtx_lock: %p spinning", m); + for (;;) { if (_obtain_lock(m, p)) break; + while (m->mtx_lock != MTX_UNOWNED) { if (i++ < 1000000) continue; @@ -673,121 +518,154 @@ (void *)m->mtx_lock); } } - -#ifdef MUTEX_DEBUG - if (type != MTX_SPIN) - m->mtx_saveintr = 0xbeefface; - else -#endif - m->mtx_saveintr = saveintr; - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_enter: %p spin done", m); - return; + + m->mtx_saveintr = mtx_intr; + break; } + + default: + panic("mtx_lock: unsupported mutex type: 0x%x\n", + m->mtx_flags & MTX_TYPES); + } + +got_lock: + WITNESS_ENTER(m, (m->mtx_flags | opts), file, line); + + if ((opts & MTX_QUIET) == 0) { + if (m->mtx_flags & MTX_DEF) + CTR1(KTR_LOCK, "mtx_lock: %p sleep done", m); + else + CTR1(KTR_LOCK, "mtx_lock: %p spin done", m); } + + return; } +/* + * The important part of mtx_unlock(). Here we actually release the lock `m' + * regardless of whether it is a spin or sleep lock. We take care to handle + * recursion. + */ void -mtx_exit_hard(struct mtx *m, int type) +_mtx_unlock(struct mtx *m, int opts, const char *file, int line) { - struct proc *p, *p1; - struct mtx *m1; - int pri; + struct proc *p; p = CURPROC; - switch (type) { + KASSERT(p != NULL, ("curproc is NULL in _mtx_unlock")); + MPASS4(mtx_owned(m), STR_mtx_owned, file, line); + WITNESS_EXIT(m, (m->mtx_flags | opts), file, line); + + if ((opts & MTX_QUIET) == 0) + CTR5(KTR_LOCK, STR_mtx_exit_fmt, m->mtx_description, m, + m->mtx_recurse, file, line); + + switch (m->mtx_flags & MTX_TYPES) { + case MTX_DEF: - case MTX_DEF | MTX_NOSWITCH: + { + struct mtx *m1; + struct proc *p1; + int pri; + + if (_release_lock(m, p)) + return; + if (mtx_recursed(m)) { if (--(m->mtx_recurse) == 0) atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m); + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "mtx_unlock: %p unrecurse", m); return; } - mtx_enter(&sched_lock, MTX_SPIN); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_exit: %p contested", m); + + mtx_lock(&sched_lock); + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "mtx_unlock: %p contested", m); p1 = TAILQ_FIRST(&m->mtx_blocked); MPASS(p->p_magic == P_MAGIC); MPASS(p1->p_magic == P_MAGIC); TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq); + if (TAILQ_EMPTY(&m->mtx_blocked)) { LIST_REMOVE(m, mtx_contested); _release_lock_quick(m); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_exit: %p not held", m); + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "mtx_unlock: %p not held", m); } else atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); + pri = MAXPRI; LIST_FOREACH(m1, &p->p_contested, mtx_contested) { int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority; if (cp < pri) pri = cp; } + if (pri > p->p_nativepri) pri = p->p_nativepri; SET_PRIO(p, pri); - if ((type & MTX_QUIET) == 0) + + if ((opts & MTX_QUIET) == 0) CTR2(KTR_LOCK, - "mtx_exit: %p contested setrunqueue %p", m, p1); + "mtx_unlock: %p contested setrunqueue %p", m, p1); + p1->p_blocked = NULL; p1->p_mtxname = NULL; p1->p_stat = SRUN; setrunqueue(p1); - if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) { + + if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) { #ifdef notyet if (p->p_flag & (P_ITHD | P_SITHD)) { ithd_t *it = (ithd_t *)p; if (it->it_interrupted) { - if ((type & MTX_QUIET) == 0) + if ((opts & MTX_QUIET) == 0) CTR2(KTR_LOCK, - "mtx_exit: 0x%x interruped 0x%x", + "mtx_unlock: 0x%x interruped 0x%x", it, it->it_interrupted); intr_thd_fixup(it); } } #endif setrunqueue(p); - if ((type & MTX_QUIET) == 0) + if ((opts & MTX_QUIET) == 0) CTR2(KTR_LOCK, - "mtx_exit: %p switching out lock=%p", + "mtx_unlock: %p switching out lock=%p", m, (void *)m->mtx_lock); + mi_switch(); - if ((type & MTX_QUIET) == 0) + + if ((opts & MTX_QUIET) == 0) CTR2(KTR_LOCK, - "mtx_exit: %p resuming lock=%p", + "mtx_unlock: %p resuming lock=%p", m, (void *)m->mtx_lock); } - mtx_exit(&sched_lock, MTX_SPIN); - break; + + mtx_unlock(&sched_lock); + return; + } + case MTX_SPIN: - case MTX_SPIN | MTX_FIRST: - if (mtx_recursed(m)) { - m->mtx_recurse--; - return; - } - MPASS(mtx_owned(m)); - _release_lock_quick(m); - if (type & MTX_FIRST) - enable_intr(); /* XXX is this kosher? */ - else { - MPASS(m->mtx_saveintr != 0xbeefface); - restore_intr(m->mtx_saveintr); - } - break; - case MTX_SPIN | MTX_TOPHALF: + { + u_int mtx_intr = m->mtx_saveintr; + if (mtx_recursed(m)) { m->mtx_recurse--; return; } - MPASS(mtx_owned(m)); + _release_lock_quick(m); - break; + MPASS(mtx_intr != 0); + restore_intr(mtx_intr); + return; + } + default: - panic("mtx_exit_hard: unsupported type 0x%x\n", type); + panic("mtx_unlock: unsupported type 0x%x\n", m->mtx_flags & + MTX_TYPES); } } @@ -843,7 +721,7 @@ if (m == &all_mtx || cold) return 0; - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); /* * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly * we can re-enable the kernacc() checks. @@ -887,16 +765,23 @@ retval = 1; } } - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); return (retval); } #endif +/* + * Mutex initialization routine; initialize lock `m' of type contained in + * `opts' with options contained in `opts' and description `description.' + * Place on "all_mtx" queue. + */ void -mtx_init(struct mtx *m, const char *t, int flag) +mtx_init(struct mtx *m, const char *description, int opts) { - if ((flag & MTX_QUIET) == 0) - CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t); + + if ((opts & MTX_QUIET) == 0) + CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description); + #ifdef MUTEX_DEBUG if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */ return; @@ -904,6 +789,7 @@ bzero((void *)m, sizeof *m); TAILQ_INIT(&m->mtx_blocked); + #ifdef WITNESS if (!witness_cold) { /* XXX - should not use DEVBUF */ @@ -912,25 +798,30 @@ MPASS(m->mtx_debug != NULL); } #endif - m->mtx_description = t; - m->mtx_flags = flag; + m->mtx_description = description; + m->mtx_flags = opts; m->mtx_lock = MTX_UNOWNED; + /* Put on all mutex queue */ - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); m->mtx_next = &all_mtx; m->mtx_prev = all_mtx.mtx_prev; m->mtx_prev->mtx_next = m; all_mtx.mtx_prev = m; if (++mtx_cur_cnt > mtx_max_cnt) mtx_max_cnt = mtx_cur_cnt; - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); + #ifdef WITNESS if (!witness_cold) - witness_init(m, flag); + witness_init(m, opts); #endif } +/* + * Remove lock `m' from all_mtx queue. + */ void mtx_destroy(struct mtx *m) { @@ -940,6 +831,7 @@ __FUNCTION__)); #endif CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description); + #ifdef MUTEX_DEBUG if (m->mtx_next == NULL) panic("mtx_destroy: %p (%s) already destroyed", @@ -959,7 +851,7 @@ #endif /* WITNESS */ /* Remove from the all mutex queue */ - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); m->mtx_next->mtx_prev = m->mtx_prev; m->mtx_prev->mtx_next = m->mtx_next; #ifdef MUTEX_DEBUG @@ -970,7 +862,7 @@ m->mtx_debug = NULL; #endif mtx_cur_cnt--; - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); } /* @@ -988,9 +880,9 @@ * We have to release Giant before initializing its witness * structure so that WITNESS doesn't get confused. */ - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); mtx_assert(&Giant, MA_NOTOWNED); - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); /* Iterate through all mutexes and finish up mutex initialization. */ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) { @@ -1002,12 +894,12 @@ witness_init(mp, mp->mtx_flags); } - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); /* Mark the witness code as being ready for use. */ atomic_store_rel_int(&witness_cold, 0); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); } SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL) @@ -1211,17 +1103,17 @@ file, line); return; } - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_opts(&w_mtx, MTX_QUIET); i = PCPU_GET(witness_spin_check); if (i != 0 && w->w_level < i) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); panic("mutex_enter(%s:%x, MTX_SPIN) out of order @" " %s:%d already holding %s:%x", m->mtx_description, w->w_level, file, line, spin_order_list[ffs(i)-1], i); } PCPU_SET(witness_spin_check, i | w->w_level); - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); w->w_file = file; w->w_line = line; m->mtx_line = line; @@ -1245,7 +1137,7 @@ goto out; if (!mtx_legal2block()) - panic("blockable mtx_enter() of %s when not legal @ %s:%d", + panic("blockable mtx_lock() of %s when not legal @ %s:%d", m->mtx_description, file, line); /* * Is this the first mutex acquired @@ -1267,16 +1159,16 @@ goto out; } MPASS(!mtx_owned(&w_mtx)); - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_opts(&w_mtx, MTX_QUIET); /* * If we have a known higher number just say ok */ if (witness_watch > 1 && w->w_level > w1->w_level) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); goto out; } if (isitmydescendant(m1->mtx_witness, w)) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); goto out; } for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) { @@ -1284,7 +1176,7 @@ MPASS(i < 200); w1 = m1->mtx_witness; if (isitmydescendant(w, w1)) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); if (blessed(w, w1)) goto out; if (m1 == &Giant) { @@ -1313,7 +1205,7 @@ } m1 = LIST_FIRST(&p->p_heldmtx); if (!itismychild(m1->mtx_witness, w)) - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); out: #ifdef DDB @@ -1356,10 +1248,10 @@ m->mtx_description, file, line); return; } - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_opts(&w_mtx, MTX_QUIET); PCPU_SET(witness_spin_check, PCPU_GET(witness_spin_check) | w->w_level); - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); w->w_file = file; w->w_line = line; m->mtx_line = line; @@ -1407,10 +1299,10 @@ file, line); return; } - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_opts(&w_mtx, MTX_QUIET); PCPU_SET(witness_spin_check, PCPU_GET(witness_spin_check) & ~w->w_level); - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); return; } if ((m->mtx_flags & MTX_SPIN) != 0) @@ -1426,7 +1318,7 @@ } if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold) - panic("switchable mtx_exit() of %s when not legal @ %s:%d", + panic("switchable mtx_unlock() of %s when not legal @ %s:%d", m->mtx_description, file, line); LIST_REMOVE(m, mtx_held); m->mtx_held.le_prev = NULL; @@ -1497,10 +1389,10 @@ } if ((flag & MTX_SPIN) && witness_skipspin) return (NULL); - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_opts(&w_mtx, MTX_QUIET); for (w = w_all; w; w = w->w_next) { if (strcmp(description, w->w_description) == 0) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); return (w); } } @@ -1509,7 +1401,7 @@ w->w_next = w_all; w_all = w; w->w_description = description; - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); if (flag & MTX_SPIN) { w->w_spin = 1; @@ -1731,7 +1623,7 @@ if ((w = w_free) == NULL) { witness_dead = 1; - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_opts(&w_mtx, MTX_QUIET); printf("witness exhausted\n"); return (NULL); } diff -ru /cvs/sys_old/sys/mutex.h /usr/src/sys/sys/mutex.h --- /cvs/sys_old/sys/mutex.h Wed Jan 24 21:18:00 2001 +++ /usr/src/sys/sys/mutex.h Thu Jan 25 01:25:43 2001 @@ -48,31 +48,34 @@ #ifdef _KERNEL /* - * Mutex flags - * - * Types + * Mutex types and options stored in mutex->mtx_flags */ -#define MTX_DEF 0x0 /* Default (spin/sleep) */ -#define MTX_SPIN 0x1 /* Spin only lock */ +#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ +#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ +#define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */ -/* Options */ -#define MTX_RECURSE 0x2 /* Recursive lock (for mtx_init) */ -#define MTX_RLIKELY 0x4 /* Recursion likely */ -#define MTX_NORECURSE 0x8 /* No recursion possible */ -#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */ -#define MTX_NOSWITCH 0x20 /* Do not switch on release */ -#define MTX_FIRST 0x40 /* First spin lock holder */ -#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */ -#define MTX_QUIET 0x100 /* Don't log a mutex event */ - -/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */ -#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH) - -/* Flags/value used in mtx_lock */ -#define MTX_RECURSED 0x01 /* (non-spin) lock held recursively */ -#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */ +/* + * Mask out all non-strictly-type bits in mutex->mtx_flags such that we can + * fetch `the type of lock' by looking at (mutex->mtx_flags & MTX_TYPES). + */ +#define MTX_TYPES (MTX_DEF | MTX_SPIN) + +/* + * Internal options for mtx_lock() and/or mtx_unlock() (passed as `opts' + * argument by wrapper macros, if necessary); use mtx_{lock, unlock}_opts() + * if one of these needs to be used. + */ +#define MTX_NOSWITCH 0x00000004 /* Do not switch on release */ +#define MTX_QUIET 0x00000008 /* Don't log a mutex event */ + +/* + * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, + * with the exception of MTX_UNOWNED, applies to spin locks. + */ +#define MTX_RECURSED 0x00000001 /* (non-spin) lock held recursively */ +#define MTX_CONTESTED 0x00000002 /* (non-spin) lock contested */ +#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ #define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) -#define MTX_UNOWNED 0x8 /* Cookie for free mutex */ #endif /* _KERNEL */ @@ -84,59 +87,126 @@ * Sleep/spin mutex */ struct mtx { - volatile uintptr_t mtx_lock; /* lock owner/gate/flags */ + volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */ volatile u_int mtx_recurse; /* number of recursive holds */ u_int mtx_saveintr; /* saved flags (for spin locks) */ int mtx_flags; /* flags passed to mtx_init() */ const char *mtx_description; - TAILQ_HEAD(, proc) mtx_blocked; - LIST_ENTRY(mtx) mtx_contested; - struct mtx *mtx_next; /* all locks in system */ - struct mtx *mtx_prev; - struct mtx_debug *mtx_debug; + TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */ + LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */ + struct mtx *mtx_next; /* all existing locks */ + struct mtx *mtx_prev; /* in system... */ + struct mtx_debug *mtx_debug; /* debugging information... */ }; +/* + * XXX: Friendly reminder to fix things in MP code that is presently being + * XXX: worked on. + */ #define mp_fixme(string) #ifdef _KERNEL -/* Prototypes */ -void mtx_init(struct mtx *m, const char *description, int flag); -void mtx_destroy(struct mtx *m); /* - * Wrap the following functions with cpp macros so that filenames and line - * numbers are embedded in the code correctly. + * Prototypes + * + * NOTE: Functions prepended with `_' (underscore) are exported to other parts + * of the kernel via macros, thus allowing us to use the cpp __FILE__ + * and __LINE__. These functions should not be called directly by any + * code using the IPI. Their macros cover their functionality. + * + * [See below for descriptions] + * */ -void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line); -int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line); -void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line); +void mtx_init(struct mtx *m, const char *description, int opts); +void mtx_destroy(struct mtx *m); +void _mtx_lock(struct mtx *m, int opts, const char *file, int line); +void _mtx_unlock(struct mtx *m, int opts, const char *file, int line); +int _mtx_try_lock(struct mtx *m, int opts, const char *file, int line); +/* + * Compatibility hacks (XXX XXX XXX: REMOVE THIS BEFORE COMMIT) + */ #define mtx_enter(mtxp, type) \ - _mtx_enter((mtxp), (type), __FILE__, __LINE__) + _mtx_lock((mtxp), (type), __FILE__, __LINE__) #define mtx_try_enter(mtxp, type) \ - _mtx_try_enter((mtxp), (type), __FILE__, __LINE__) + _mtx_try_lock((mtxp), (type), __FILE__, __LINE__) #define mtx_exit(mtxp, type) \ - _mtx_exit((mtxp), (type), __FILE__, __LINE__) + _mtx_lock((mtxp), (type), __FILE__, __LINE__) + +/* + * Exported lock manipulation interface. + * + * mtx_init(m, description, opts) initializes lock `m' with `description' and + * registers as a type `opts' lock. MTX_RECURSE can also be set in `opts' if + * the caller wishes to allow the mutex to recurse. + * + * mtx_destroy(m) [cleanly] removes a lock from the system. + * + * mtx_lock(m) locks mutex `m' + * + * mtx_unlock(m) unlocks mutex `m' + * + * mtx_try_lock(m) attempts to acquire a lock and obtain it as long as the lock + * is not already held. It will _not_ spin or sleep in the case where the lock + * is held and, furthermore, it will _not_ handle recursion. + * + * mtx_owned(m) checks if lock `m' is owned by the thread that's calling it and + * returns non-zero if it is. + * + * mtx_recursed(m) checks if the lock `m' is presently recursed. Note that the + * lock `m' need not be a sleep lock; this works for all types of locks. + * + * Note: mtx_{lock, unlock, try_enter}_opts may also be called as alternatives + * to the standard verions of the routines in order to pass in options + * flags, such as MTX_NOSWITCH or MTX_QUIET. + * + */ +#define mtx_lock(m) \ + _mtx_lock((m), 0, __FILE__, __LINE__) + +#define mtx_unlock(m) \ + _mtx_unlock((m), 0, __FILE__, __LINE__) + +#define mtx_lock_opts(m, opts) \ + _mtx_lock((m), (opts), __FILE__, __LINE__) + +#define mtx_unlock_opts(m, opts) \ + _mtx_unlock((m), (opts), __FILE__, __LINE__) -/* Global locks */ +#define mtx_try_lock(m) \ + _mtx_try_lock((m), 0, __FILE__, __LINE__) + +#define mtx_try_lock_opts(m, opts) \ + _mtx_try_lock((m), (opts), __FILE__, __LINE__) + +#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD) + +#define mtx_recursed(m) ((m)->mtx_recurse != 0) + +/* + * Global locks. + */ extern struct mtx sched_lock; extern struct mtx Giant; /* + * Giant lock manipulation and clean exit macros. * Used to replace return with an exit Giant and return. + * + * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() */ - #define EGAR(a) \ do { \ - mtx_exit(&Giant, MTX_DEF); \ + mtx_unlock(&Giant); \ return (a); \ } while (0) #define VEGAR \ do { \ - mtx_exit(&Giant, MTX_DEF); \ + mtx_unlock(&Giant); \ return; \ } while (0) @@ -148,7 +218,7 @@ if (mtx_owned(&Giant)) \ WITNESS_SAVE(&Giant, Giant); \ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ - mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH) + mtx_unlock_opts(&Giant, MTX_NOSWITCH) #define DROP_GIANT() \ do { \ @@ -158,12 +228,12 @@ if (mtx_owned(&Giant)) \ WITNESS_SAVE(&Giant, Giant); \ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ - mtx_exit(&Giant, MTX_DEF) + mtx_unlock(&Giant) #define PICKUP_GIANT() \ mtx_assert(&Giant, MA_NOTOWNED); \ while (_giantcnt--) \ - mtx_enter(&Giant, MTX_DEF); \ + mtx_lock(&Giant); \ if (mtx_owned(&Giant)) \ WITNESS_RESTORE(&Giant, Giant); \ } while (0) @@ -171,37 +241,49 @@ #define PARTIAL_PICKUP_GIANT() \ mtx_assert(&Giant, MA_NOTOWNED); \ while (_giantcnt--) \ - mtx_enter(&Giant, MTX_DEF); \ + mtx_lock(&Giant); \ if (mtx_owned(&Giant)) \ WITNESS_RESTORE(&Giant, Giant) /* - * Debugging + * The INVARIANTS-enabled mtx_assert() functionality. */ #ifdef INVARIANTS -#define MA_OWNED 1 -#define MA_NOTOWNED 2 -#define MA_RECURSED 4 -#define MA_NOTRECURSED 8 +#define MA_OWNED 0x01 +#define MA_NOTOWNED 0x02 +#define MA_RECURSED 0x04 +#define MA_NOTRECURSED 0x08 + void _mtx_assert(struct mtx *m, int what, const char *file, int line); -#define mtx_assert(m, what) _mtx_assert((m), (what), __FILE__, __LINE__) +#define mtx_assert(m, what) \ + _mtx_assert((m), (what), __FILE__, __LINE__) + #else /* INVARIANTS */ #define mtx_assert(m, what) #endif /* INVARIANTS */ +/* + * The MUTEX_DEBUG-enabled MPASS*() extra sanity-check macros. + */ #ifdef MUTEX_DEBUG #define MPASS(ex) \ if (!(ex)) \ - panic("Assertion %s failed at %s:%d", #ex, __FILE__, __LINE__) + panic("Assertion %s failed at %s:%d", #ex, __FILE__, \ + __LINE__) + #define MPASS2(ex, what) \ if (!(ex)) \ - panic("Assertion %s failed at %s:%d", what, __FILE__, __LINE__) + panic("Assertion %s failed at %s:%d", what, __FILE__, \ + __LINE__) + #define MPASS3(ex, file, line) \ if (!(ex)) \ panic("Assertion %s failed at %s:%d", #ex, file, line) + #define MPASS4(ex, what, file, line) \ if (!(ex)) \ panic("Assertion %s failed at %s:%d", what, file, line) + #else /* MUTEX_DEBUG */ #define MPASS(ex) #define MPASS2(ex, what) @@ -210,21 +292,8 @@ #endif /* MUTEX_DEBUG */ /* - * Externally visible mutex functions. - *------------------------------------------------------------------------------ - */ - -/* - * Return non-zero if a mutex is already owned by the current thread. + * Common strings (XXX XXX XXX: These should probably go!) */ -#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD) - -/* - * Return non-zero if a mutex has been recursively acquired. - */ -#define mtx_recursed(m) ((m)->mtx_recurse != 0) - -/* Common strings */ #ifdef _KERN_MUTEX_C_ char STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d at %s:%d"; char STR_mtx_exit_fmt[] = "REL %s [%p] r=%d at %s:%d"; @@ -241,6 +310,9 @@ extern char STR_mtx_try_enter_fmt[]; #endif /* _KERN_MUTEX_C_ */ +/* + * Exported WITNESS-enabled functions and corresponding wrapper macros. + */ #ifdef WITNESS void witness_save(struct mtx *, const char **, int *); void witness_restore(struct mtx *, const char *, int); @@ -250,16 +322,25 @@ int witness_list(struct proc *); int witness_sleep(int, struct mtx *, const char *, int); -#define WITNESS_ENTER(m, t, f, l) witness_enter((m), (t), (f), (l)) -#define WITNESS_EXIT(m, t, f, l) witness_exit((m), (t), (f), (l)) -#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__) +#define WITNESS_ENTER(m, t, f, l) \ + witness_enter((m), (t), (f), (l)) + +#define WITNESS_EXIT(m, t, f, l) \ + witness_exit((m), (t), (f), (l)) + +#define WITNESS_SLEEP(check, m) \ + witness_sleep(check, (m), __FILE__, __LINE__) + #define WITNESS_SAVE_DECL(n) \ const char * __CONCAT(n, __wf); \ int __CONCAT(n, __wl) + #define WITNESS_SAVE(m, n) \ witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)) + #define WITNESS_RESTORE(m, n) \ witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)) + #else /* WITNESS */ #define witness_enter(m, t, f, l) #define witness_tryenter(m, t, f, l)