Index: sys/alpha/alpha/busdma_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/busdma_machdep.c,v retrieving revision 1.14 diff -u -r1.14 busdma_machdep.c --- sys/alpha/alpha/busdma_machdep.c 2000/12/03 20:46:46 1.14 +++ sys/alpha/alpha/busdma_machdep.c 2001/02/07 03:01:09 @@ -614,11 +614,11 @@ break; } bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); STAILQ_INSERT_TAIL(&bounce_page_list, bpage, links); total_bpages++; free_bpages++; - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); count++; numpages--; } @@ -653,7 +653,7 @@ panic("add_bounce_page: map doesn't need any pages"); map->pagesreserved--; - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); bpage = STAILQ_FIRST(&bounce_page_list); if (bpage == NULL) panic("add_bounce_page: free page list is empty"); @@ -661,7 +661,7 @@ STAILQ_REMOVE_HEAD(&bounce_page_list, links); reserved_bpages--; active_bpages++; - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); bpage->datavaddr = vaddr; bpage->datacount = size; @@ -677,7 +677,7 @@ bpage->datavaddr = 0; bpage->datacount = 0; - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); STAILQ_INSERT_HEAD(&bounce_page_list, bpage, links); free_bpages++; active_bpages--; @@ -690,7 +690,7 @@ sched_swi(vm_ih, SWI_NOSWITCH); } } - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); } void @@ -698,13 +698,13 @@ { struct bus_dmamap *map; - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); bus_dmamap_load(map->dmat, map, map->buf, map->buflen, map->callback, map->callback_arg, /*flags*/0); - mtx_enter(&bounce_lock, MTX_DEF); + mtx_lock(&bounce_lock); } - mtx_exit(&bounce_lock, MTX_DEF); + mtx_unlock(&bounce_lock); } Index: sys/alpha/alpha/interrupt.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/interrupt.c,v retrieving revision 1.38 diff -u -r1.38 interrupt.c --- sys/alpha/alpha/interrupt.c 2001/02/05 19:34:25 1.38 +++ sys/alpha/alpha/interrupt.c 2001/02/07 03:05:17 @@ -560,7 +560,7 @@ "alpha_dispatch_intr: disabling vector 0x%x", i->vector); i->disable(i->vector); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (ithd->it_proc->p_stat == SWAIT) { /* not on the run queue and not running */ CTR1(KTR_INTR, "alpha_dispatch_intr: setrunqueue %d", @@ -587,7 +587,7 @@ ithd->it_proc->p_pid, ithd->it_need, ithd->it_proc->p_stat); need_resched(); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } void @@ -626,10 +626,10 @@ ih->ih_flags); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } /* @@ -646,7 +646,7 @@ * set again, so we have to check it again. */ mtx_assert(&Giant, MA_NOTOWNED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (!ithd->it_need) { ithd->it_proc->p_stat = SWAIT; /* we're idle */ CTR1(KTR_INTR, "ithd_loop pid %d: done", @@ -655,7 +655,7 @@ CTR1(KTR_INTR, "ithd_loop pid %d: resumed", ithd->it_proc->p_pid); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } Index: sys/alpha/alpha/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/machdep.c,v retrieving revision 1.112 diff -u -r1.112 machdep.c --- sys/alpha/alpha/machdep.c 2001/02/04 07:00:46 1.112 +++ sys/alpha/alpha/machdep.c 2001/02/07 03:01:09 @@ -1012,7 +1012,7 @@ */ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * Look at arguments passed to us and compute boothowto. Index: sys/alpha/alpha/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/mp_machdep.c,v retrieving revision 1.11 diff -u -r1.11 mp_machdep.c --- sys/alpha/alpha/mp_machdep.c 2001/01/24 19:49:13 1.11 +++ sys/alpha/alpha/mp_machdep.c 2001/02/07 03:01:09 @@ -162,7 +162,7 @@ alpha_pal_wrent(XentUna, ALPHA_KENTRY_UNA); alpha_pal_wrent(XentSys, ALPHA_KENTRY_SYS); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("smp_init_secondary: called\n"); CTR0(KTR_SMP, "smp_init_secondary"); @@ -176,7 +176,7 @@ spl0(); smp_ipi_all(0); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } extern void smp_init_secondary_glue(void); @@ -657,14 +657,14 @@ return; if (!forward_signal_enabled) return; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); while (1) { if (p->p_stat != SRUN) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } id = p->p_oncpu; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (id == 0xff) return; map = (1<p_oncpu) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } } @@ -841,7 +841,7 @@ { /* obtain rendezvous lock */ - mtx_enter(&smp_rv_mtx, MTX_SPIN); + mtx_lock_spin(&smp_rv_mtx); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -858,7 +858,7 @@ smp_rendezvous_action(); /* release lock */ - mtx_exit(&smp_rv_mtx, MTX_SPIN); + mtx_unlock_spin(&smp_rv_mtx); } /* Index: sys/alpha/alpha/procfs_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/procfs_machdep.c,v retrieving revision 1.5 diff -u -r1.5 procfs_machdep.c --- sys/alpha/alpha/procfs_machdep.c 2001/01/24 10:16:23 1.5 +++ sys/alpha/alpha/procfs_machdep.c 2001/02/07 03:01:09 @@ -86,12 +86,12 @@ struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_regs(p, regs)); } @@ -101,12 +101,12 @@ struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_regs(p, regs)); } @@ -121,12 +121,12 @@ struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_fpregs(p, fpregs)); } @@ -136,12 +136,12 @@ struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_fpregs(p, fpregs)); } Index: sys/alpha/alpha/trap.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/trap.c,v retrieving revision 1.42 diff -u -r1.42 trap.c --- sys/alpha/alpha/trap.c 2001/02/06 09:20:17 1.42 +++ sys/alpha/alpha/trap.c 2001/02/07 03:01:09 @@ -106,10 +106,10 @@ /* take pending signals */ while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_priority = p->p_usrpri; if (want_resched) { /* @@ -125,30 +125,30 @@ setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); splx(s); while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_sflag & PS_PROFIL) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, frame->tf_regs[FRAME_PC], (int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } static void @@ -230,9 +230,9 @@ ucode = 0; user = (framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) != 0; if (user) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_tf = framep; #if 0 /* This is to catch some weird stuff on the UDB (mj) */ @@ -259,12 +259,12 @@ * and per-process unaligned-access-handling flags). */ if (user) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if ((i = unaligned_fixup(a0, a1, a2, p)) == 0) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); ucode = a0; /* VA */ break; } @@ -288,13 +288,13 @@ * is not requested or if the completion fails. */ if (user) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if (a0 & EXCSUM_SWC) if (fp_software_completion(a1, p)) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); i = SIGFPE; ucode = a0; /* exception summary */ break; @@ -415,7 +415,7 @@ goto out; } - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * It is only a kernel address space fault iff: * 1. !user and @@ -529,11 +529,11 @@ rv = KERN_INVALID_ADDRESS; } if (rv == KERN_SUCCESS) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); if (!user) { /* Check for copyin/copyout fault */ if (p != NULL && @@ -575,7 +575,7 @@ framep->tf_regs[FRAME_SP] = alpha_pal_rdusp(); userret(p, framep, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } return; @@ -621,7 +621,7 @@ * Find our per-cpu globals. */ globalp = (struct globaldata *) alpha_pal_rdval(); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); framep->tf_regs[FRAME_TRAPARG_A0] = 0; framep->tf_regs[FRAME_TRAPARG_A1] = 0; @@ -635,9 +635,9 @@ p = curproc; p->p_md.md_tf = framep; opc = framep->tf_regs[FRAME_PC] - 4; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); #ifdef DIAGNOSTIC alpha_fpstate_check(p); @@ -739,7 +739,7 @@ * is not the case, this code will need to be revisited. */ STOPEVENT(p, S_SCX, code); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #ifdef WITNESS if (witness_list(p)) { @@ -763,9 +763,9 @@ u_quad_t sticks; p = curproc; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_tf = framep; if ((framep->tf_regs[FRAME_PS] & ALPHA_PSL_USERMODE) == 0) @@ -774,36 +774,36 @@ cnt.v_soft++; PCPU_SET(astpending, 0); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; - mtx_exit(&sched_lock, MTX_SPIN); - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, p->p_stats->p_prof.pr_addr, p->p_stats->p_prof.pr_ticks); } if (p->p_sflag & PS_ALRMPEND) { p->p_sflag &= ~PS_ALRMPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGVTALRM); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } if (p->p_sflag & PS_PROFPEND) { p->p_sflag &= ~PS_PROFPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGPROF); } else - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); userret(p, framep, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } /* Index: sys/alpha/alpha/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/alpha/vm_machdep.c,v retrieving revision 1.42 diff -u -r1.42 vm_machdep.c --- sys/alpha/alpha/vm_machdep.c 2001/01/26 23:32:38 1.42 +++ sys/alpha/alpha/vm_machdep.c 2001/02/07 03:01:09 @@ -253,8 +253,8 @@ { alpha_fpstate_drop(p); - mtx_enter(&sched_lock, MTX_SPIN); - mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH); + mtx_lock_spin(&sched_lock); + mtx_unlock_flags(&Giant, MTX_NOSWITCH); mtx_assert(&Giant, MA_NOTOWNED); /* @@ -437,7 +437,7 @@ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) return(0); - if (mtx_try_enter(&Giant, MTX_DEF)) { + if (mtx_trylock(&Giant)) { s = splvm(); m = vm_page_list_find(PQ_FREE, free_rover, FALSE); zero_state = 0; @@ -466,7 +466,7 @@ } free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; splx(s); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); return (1); } return (0); Index: sys/alpha/include/cpu.h =================================================================== RCS file: /home/ncvs/src/sys/alpha/include/cpu.h,v retrieving revision 1.23 diff -u -r1.23 cpu.h --- sys/alpha/include/cpu.h 2001/01/24 22:00:13 1.23 +++ sys/alpha/include/cpu.h 2001/02/07 03:01:09 @@ -81,9 +81,9 @@ * through trap, marking the proc as needing a profiling tick. */ #define need_proftick(p) do { \ - mtx_enter(&sched_lock, MTX_SPIN); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ - mtx_exit(&sched_lock, MTX_SPIN); \ + mtx_unlock_spin(&sched_lock); \ aston(); \ } while (0) Index: sys/alpha/include/mutex.h =================================================================== RCS file: /home/ncvs/src/sys/alpha/include/mutex.h,v retrieving revision 1.17 diff -u -r1.17 mutex.h --- sys/alpha/include/mutex.h 2001/01/21 22:34:42 1.17 +++ sys/alpha/include/mutex.h 2001/02/07 03:01:09 @@ -39,26 +39,12 @@ /* * Debugging */ -#ifdef MUTEX_DEBUG - -#ifdef _KERN_MUTEX_C_ -char STR_IEN[] = "ps & IPL == IPL_0"; -char STR_IDIS[] = "ps & IPL == IPL_HIGH"; -char STR_SIEN[] = "mpp->mtx_saveintr == IPL_0"; -#else /* _KERN_MUTEX_C_ */ -extern char STR_IEN[]; -extern char STR_IDIS[]; -extern char STR_SIEN[]; -#endif /* _KERN_MUTEX_C_ */ - -#endif /* MUTEX_DEBUG */ - #define ASS_IEN MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \ - == ALPHA_PSL_IPL_0, STR_IEN) + == ALPHA_PSL_IPL_0, "ps & IPL == IPL_0") #define ASS_IDIS MPASS2((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) \ - == ALPHA_PSL_IPL_HIGH, STR_IDIS) + == ALPHA_PSL_IPL_HIGH, "ps & IPL == IPL_HIGH") #define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr \ - == ALPHA_PSL_IPL_0, STR_SIEN) + == ALPHA_PSL_IPL_0, "mpp->mtx_saveintr == IPL_0") #define mtx_legal2block() \ ((alpha_pal_rdps() & ALPHA_PSL_IPL_MASK) == ALPHA_PSL_IPL_0) @@ -68,34 +54,33 @@ *-------------------------------------------------------------------------- */ -#ifdef _KERN_MUTEX_C_ - -#define _V(x) __STRING(x) - /* - * Get a spin lock, handle recusion inline (as the less common case) + * Get a spin lock, handle recusion inline. */ - -#define _getlock_spin_block(mp, tid, type) do { \ +#define _get_spin_lock(mp, tid, opts) do { \ u_int _ipl = alpha_pal_swpipl(ALPHA_PSL_IPL_HIGH); \ - if (!_obtain_lock(mp, tid)) \ - mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _ipl); \ - else { \ + if (!_obtain_lock((mp), (tid))) { \ + if ((mp)->mtx_lock == (uintptr_t)(tid)) \ + (mp)->mtx_recurse++; \ + else \ + _mtx_lock_spin((mp), (opts), _ipl, __FILE__, \ + __LINE__); \ + } else { \ alpha_mb(); \ (mp)->mtx_saveintr = _ipl; \ } \ } while (0) -#undef _V - -#endif /* _KERN_MUTEX_C_ */ - #endif /* _KERNEL */ #else /* !LOCORE */ /* * Simple assembly macros to get and release non-recursive spin locks + * + * XXX: These are presently unused and cannot be used right now. Need to be + * re-written (they are wrong). If you plan to use this and still see + * this message, know not to unless you fix them first! :-) */ #define MTX_ENTER(lck) \ ldiq a0, ALPHA_PSL_IPL_HIGH; \ Index: sys/alpha/osf1/osf1_misc.c =================================================================== RCS file: /home/ncvs/src/sys/alpha/osf1/osf1_misc.c,v retrieving revision 1.11 diff -u -r1.11 osf1_misc.c --- sys/alpha/osf1/osf1_misc.c 2001/02/06 10:39:38 1.11 +++ sys/alpha/osf1/osf1_misc.c 2001/02/07 03:01:09 @@ -1341,9 +1341,9 @@ switch (uap->who) { case RUSAGE_SELF: rup = &p->p_stats->p_ru; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); break; case RUSAGE_CHILDREN: Index: sys/compat/linprocfs/linprocfs_misc.c =================================================================== RCS file: /home/ncvs/src/sys/compat/linprocfs/linprocfs_misc.c,v retrieving revision 1.18 diff -u -r1.18 linprocfs_misc.c --- sys/compat/linprocfs/linprocfs_misc.c 2001/01/23 22:13:07 1.18 +++ sys/compat/linprocfs/linprocfs_misc.c 2001/02/07 03:01:09 @@ -454,12 +454,12 @@ sbuf_new(&sb, NULL, 1024, 0); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat > sizeof state_str / sizeof *state_str) state = state_str[0]; else state = state_str[(int)p->p_stat]; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROCTREE_LOCK(PT_SHARED); ppid = p->p_pptr ? p->p_pptr->p_pid : 0; Index: sys/compat/linux/linux_misc.c =================================================================== RCS file: /home/ncvs/src/sys/compat/linux/linux_misc.c,v retrieving revision 1.93 diff -u -r1.93 linux_misc.c --- sys/compat/linux/linux_misc.c 2001/01/27 00:01:26 1.93 +++ sys/compat/linux/linux_misc.c 2001/02/07 03:01:09 @@ -642,9 +642,9 @@ #ifdef DEBUG printf("Linux-emul(%ld): times(*)\n", (long)p->p_pid); #endif - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); calcru(p, &ru.ru_utime, &ru.ru_stime, NULL); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); tms.tms_utime = CONVTCK(ru.ru_utime); tms.tms_stime = CONVTCK(ru.ru_stime); Index: sys/compat/svr4/svr4_misc.c =================================================================== RCS file: /home/ncvs/src/sys/compat/svr4/svr4_misc.c,v retrieving revision 1.23 diff -u -r1.23 svr4_misc.c --- sys/compat/svr4/svr4_misc.c 2001/01/27 00:01:28 1.23 +++ sys/compat/svr4/svr4_misc.c 2001/02/07 03:01:09 @@ -1141,7 +1141,7 @@ if (p) { i.si_pid = p->p_pid; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat == SZOMB) { i.si_stime = p->p_ru->ru_stime.tv_sec; i.si_utime = p->p_ru->ru_utime.tv_sec; @@ -1150,7 +1150,7 @@ i.si_stime = p->p_stats->p_ru.ru_stime.tv_sec; i.si_utime = p->p_stats->p_ru.ru_utime.tv_sec; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } if (WIFEXITED(st)) { @@ -1226,10 +1226,10 @@ } nfound++; PROC_LOCK(q); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (q->p_stat == SZOMB && ((SCARG(uap, options) & (SVR4_WEXITED|SVR4_WTRAPPED)))) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROC_UNLOCK(q); PROCTREE_LOCK(PT_RELEASE); *retval = 0; @@ -1357,7 +1357,7 @@ if (q->p_stat == SSTOP && (q->p_flag & P_WAITED) == 0 && (q->p_flag & P_TRACED || (SCARG(uap, options) & (SVR4_WSTOPPED|SVR4_WCONTINUED)))) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); DPRINTF(("jobcontrol %d\n", q->p_pid)); if (((SCARG(uap, options) & SVR4_WNOWAIT)) == 0) q->p_flag |= P_WAITED; @@ -1366,7 +1366,7 @@ return svr4_setinfo(q, W_STOPCODE(q->p_xstat), SCARG(uap, info)); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROC_UNLOCK(q); } Index: sys/dev/acpica/Osd/OsdSynch.c =================================================================== RCS file: /home/ncvs/src/sys/dev/acpica/Osd/OsdSynch.c,v retrieving revision 1.4 diff -u -r1.4 OsdSynch.c --- sys/dev/acpica/Osd/OsdSynch.c 2001/01/31 09:35:50 1.4 +++ sys/dev/acpica/Osd/OsdSynch.c 2001/02/07 03:01:09 @@ -139,7 +139,7 @@ tmo = 1; } - mtx_enter(&as->as_mtx, MTX_DEF); + mtx_lock(&as->as_mtx); DEBUG_PRINT(TRACE_MUTEX, ("get %d units from semaphore %p (has %d), timeout %d\n", Units, as, as->as_units, Timeout)); for (;;) { @@ -163,7 +163,7 @@ break; } } - mtx_exit(&as->as_mtx, MTX_DEF); + mtx_unlock(&as->as_mtx); return_ACPI_STATUS(result); #else @@ -182,14 +182,14 @@ if (as == NULL) return_ACPI_STATUS(AE_BAD_PARAMETER); - mtx_enter(&as->as_mtx, MTX_DEF); + mtx_lock(&as->as_mtx); DEBUG_PRINT(TRACE_MUTEX, ("return %d units to semaphore %p (has %d)\n", Units, as, as->as_units)); as->as_units += Units; if (as->as_units > as->as_maxunits) as->as_units = as->as_maxunits; wakeup(as); - mtx_exit(&as->as_mtx, MTX_DEF); + mtx_unlock(&as->as_mtx); return_ACPI_STATUS(AE_OK); #else return(AE_OK); Index: sys/dev/an/if_anreg.h =================================================================== RCS file: /home/ncvs/src/sys/dev/an/if_anreg.h,v retrieving revision 1.4 diff -u -r1.4 if_anreg.h --- sys/dev/an/if_anreg.h 2000/12/08 19:00:10 1.4 +++ sys/dev/an/if_anreg.h 2001/02/07 03:01:09 @@ -844,8 +844,8 @@ device_t an_dev; }; -#define AN_LOCK(_sc) mtx_enter(&(_sc)->an_mtx, MTX_DEF) -#define AN_UNLOCK(_sc) mtx_exit(&(_sc)->an_mtx, MTX_DEF) +#define AN_LOCK(_sc) mtx_lock(&(_sc)->an_mtx) +#define AN_UNLOCK(_sc) mtx_unlock(&(_sc)->an_mtx) void an_release_resources __P((device_t)); int an_alloc_port __P((device_t, int, int)); Index: sys/dev/ichsmb/ichsmb.c =================================================================== RCS file: /home/ncvs/src/sys/dev/ichsmb/ichsmb.c,v retrieving revision 1.3 diff -u -r1.3 ichsmb.c --- sys/dev/ichsmb/ichsmb.c 2001/01/26 03:18:17 1.3 +++ sys/dev/ichsmb/ichsmb.c 2001/02/07 03:01:10 @@ -167,7 +167,7 @@ switch (how) { case SMB_QREAD: case SMB_QWRITE: - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_QUICK; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | (how == SMB_QREAD ? @@ -175,7 +175,7 @@ bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); break; default: smb_error = SMB_ENOTSUPP; @@ -193,7 +193,7 @@ DBG("slave=0x%02x byte=0x%02x\n", slave, (u_char)byte); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd)); - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_WRITE); @@ -201,7 +201,7 @@ bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } @@ -215,7 +215,7 @@ DBG("slave=0x%02x\n", slave); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd)); - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_READ); @@ -223,7 +223,7 @@ ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) *byte = bus_space_read_1(sc->io_bst, sc->io_bsh, ICH_D0); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d byte=0x%02x\n", smb_error, (u_char)*byte); return (smb_error); } @@ -238,7 +238,7 @@ slave, (u_char)cmd, (u_char)byte); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd)); - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE_DATA; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_WRITE); @@ -247,7 +247,7 @@ bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } @@ -262,7 +262,7 @@ slave, (u_char)cmd, (u_int16_t)word); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd)); - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_WORD_DATA; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_WRITE); @@ -272,7 +272,7 @@ bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } @@ -286,7 +286,7 @@ DBG("slave=0x%02x cmd=0x%02x\n", slave, (u_char)cmd); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd)); - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BYTE_DATA; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_READ); @@ -295,7 +295,7 @@ ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) *byte = bus_space_read_1(sc->io_bst, sc->io_bsh, ICH_D0); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d byte=0x%02x\n", smb_error, (u_char)*byte); return (smb_error); } @@ -309,7 +309,7 @@ DBG("slave=0x%02x cmd=0x%02x\n", slave, (u_char)cmd); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd)); - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_WORD_DATA; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_READ); @@ -322,7 +322,7 @@ | (bus_space_read_1(sc->io_bst, sc->io_bsh, ICH_D1) << 8); } - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d word=0x%04x\n", smb_error, (u_int16_t)*word); return (smb_error); } @@ -337,7 +337,7 @@ slave, (u_char)cmd, (u_int16_t)sdata); KASSERT(sc->ich_cmd == -1, ("%s: ich_cmd=%d\n", __FUNCTION__ , sc->ich_cmd)); - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_PROC_CALL; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_WRITE); @@ -352,7 +352,7 @@ | (bus_space_read_1(sc->io_bst, sc->io_bsh, ICH_D1) << 8); } - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d rdata=0x%04x\n", smb_error, (u_int16_t)*rdata); return (smb_error); } @@ -388,7 +388,7 @@ sc->block_index = 1; sc->block_write = 1; - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BLOCK; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_WRITE); @@ -398,7 +398,7 @@ bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_CNT, ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); smb_error = ichsmb_wait(sc); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); return (smb_error); } @@ -419,7 +419,7 @@ sc->block_index = 0; sc->block_write = 0; - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); sc->ich_cmd = ICH_HST_CNT_SMB_CMD_BLOCK; bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_XMIT_SLVA, (slave << 1) | ICH_XMIT_SLVA_READ); @@ -429,7 +429,7 @@ ICH_HST_CNT_START | ICH_HST_CNT_INTREN | sc->ich_cmd); if ((smb_error = ichsmb_wait(sc)) == SMB_ENOERR) bcopy(sc->block_data, buf, sc->block_count); - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); DBG("smb_error=%d\n", smb_error); #if ICHSMB_DEBUG #define DISP(ch) (((ch) < 0x20 || (ch) >= 0x7e) ? '.' : (ch)) @@ -491,7 +491,7 @@ int cmd_index; int count; - mtx_enter(&sc->mutex, MTX_DEF); + mtx_lock(&sc->mutex); for (count = 0; count < maxloops; count++) { /* Get and reset status bits */ @@ -603,7 +603,7 @@ /* Clear status bits and try again */ bus_space_write_1(sc->io_bst, sc->io_bsh, ICH_HST_STA, status); } - mtx_exit(&sc->mutex, MTX_DEF); + mtx_unlock(&sc->mutex); /* Too many loops? */ if (count == maxloops) { Index: sys/dev/isp/isp_freebsd.c =================================================================== RCS file: /home/ncvs/src/sys/dev/isp/isp_freebsd.c,v retrieving revision 1.55 diff -u -r1.55 isp_freebsd.c --- sys/dev/isp/isp_freebsd.c 2001/01/16 07:15:36 1.55 +++ sys/dev/isp/isp_freebsd.c 2001/02/07 03:01:10 @@ -1992,9 +1992,9 @@ XS_CMD_S_CLEAR(sccb); ISP_UNLOCK(isp); #ifdef ISP_SMPLOCK - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); xpt_done((union ccb *) sccb); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #else xpt_done((union ccb *) sccb); #endif Index: sys/dev/isp/isp_freebsd.h =================================================================== RCS file: /home/ncvs/src/sys/dev/isp/isp_freebsd.h,v retrieving revision 1.44 diff -u -r1.44 isp_freebsd.h --- sys/dev/isp/isp_freebsd.h 2001/01/15 18:36:09 1.44 +++ sys/dev/isp/isp_freebsd.h 2001/02/07 03:01:10 @@ -124,8 +124,8 @@ */ #ifdef ISP_SMPLOCK -#define ISP_LOCK(x) mtx_enter(&(x)->isp_osinfo.lock, MTX_DEF) -#define ISP_UNLOCK(x) mtx_exit(&(x)->isp_osinfo.lock, MTX_DEF) +#define ISP_LOCK(x) mtx_lock(&(x)->isp_osinfo.lock) +#define ISP_UNLOCK(x) mtx_unlock(&(x)->isp_osinfo.lock) #else #define ISP_LOCK isp_lock #define ISP_UNLOCK isp_unlock Index: sys/dev/pccbb/pccbb.c =================================================================== RCS file: /home/ncvs/src/sys/dev/pccbb/pccbb.c,v retrieving revision 1.10 diff -u -r1.10 pccbb.c --- sys/dev/pccbb/pccbb.c 2001/01/07 16:31:09 1.10 +++ sys/dev/pccbb/pccbb.c 2001/02/07 03:01:10 @@ -525,13 +525,13 @@ if (error > 0) return ENXIO; - mtx_enter(&sc->sc_mtx, MTX_DEF); + mtx_lock(&sc->sc_mtx); bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intrhand); sc->sc_flags |= PCCBB_KTHREAD_DONE; if (sc->sc_flags & PCCBB_KTHREAD_RUNNING) { wakeup(sc); - mtx_exit(&sc->sc_mtx, MTX_DEF); + mtx_unlock(&sc->sc_mtx); DEVPRINTF((dev, "waiting for kthread exit...")); error = tsleep(sc, PWAIT, "pccbb-detach-wait", 60 * hz); if (error) @@ -539,7 +539,7 @@ else DPRINTF(("done\n")); } else - mtx_exit(&sc->sc_mtx, MTX_DEF); + mtx_unlock(&sc->sc_mtx); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bus_release_resource(dev, SYS_RES_MEMORY, PCCBBR_SOCKBASE, @@ -567,17 +567,17 @@ sc->sc_cbdev = devlist[tmp]; if ((sc->sc_socketreg->socket_state & PCCBB_SOCKET_STAT_CD) == 0) { - mtx_enter(&sc->sc_mtx, MTX_DEF); + mtx_lock(&sc->sc_mtx); wakeup(sc); - mtx_exit(&sc->sc_mtx, MTX_DEF); + mtx_unlock(&sc->sc_mtx); } } else if (strcmp(driver->name, "pccard") == 0) { sc->sc_pccarddev = devlist[tmp]; if ((sc->sc_socketreg->socket_state & PCCBB_SOCKET_STAT_CD) == 0) { - mtx_enter(&sc->sc_mtx, MTX_DEF); + mtx_lock(&sc->sc_mtx); wakeup(sc); - mtx_exit(&sc->sc_mtx, MTX_DEF); + mtx_unlock(&sc->sc_mtx); } } else device_printf(dev, @@ -610,7 +610,7 @@ struct pccbb_softc *sc = arg; u_int32_t status; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); for(;;) { if (!(sc->sc_flags & PCCBB_KTHREAD_RUNNING)) sc->sc_flags |= PCCBB_KTHREAD_RUNNING; @@ -623,7 +623,7 @@ */ tsleep (&sc->sc_flags, PWAIT, "pccbbev", 1*hz); } - mtx_enter(&sc->sc_mtx, MTX_DEF); + mtx_lock(&sc->sc_mtx); if (sc->sc_flags & PCCBB_KTHREAD_DONE) break; @@ -633,9 +633,9 @@ } else { pccbb_removal(sc); } - mtx_exit(&sc->sc_mtx, MTX_DEF); + mtx_unlock(&sc->sc_mtx); } - mtx_exit(&sc->sc_mtx, MTX_DEF); + mtx_unlock(&sc->sc_mtx); sc->sc_flags &= ~PCCBB_KTHREAD_RUNNING; wakeup(sc); kthread_exit(0); @@ -738,9 +738,9 @@ sc->sc_socketreg->socket_event = sockevent | 0x01; if (sockevent & PCCBB_SOCKET_EVENT_CD) { - mtx_enter(&sc->sc_mtx, MTX_DEF); + mtx_lock(&sc->sc_mtx); wakeup(sc); - mtx_exit(&sc->sc_mtx, MTX_DEF); + mtx_unlock(&sc->sc_mtx); } else { if (sockevent & PCCBB_SOCKET_EVENT_CSTS) { DPRINTF((" cstsevent occures, 0x%08x\n", Index: sys/dev/random/harvest.c =================================================================== RCS file: /home/ncvs/src/sys/dev/random/harvest.c,v retrieving revision 1.10 diff -u -r1.10 harvest.c --- sys/dev/random/harvest.c 2001/01/09 04:33:08 1.10 +++ sys/dev/random/harvest.c 2001/02/07 03:01:10 @@ -123,7 +123,7 @@ random_set_wakeup_exit(void *control) { wakeup(control); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); kthread_exit(0); /* NOTREACHED */ } Index: sys/dev/random/yarrow.c =================================================================== RCS file: /home/ncvs/src/sys/dev/random/yarrow.c,v retrieving revision 1.28 diff -u -r1.28 yarrow.c --- sys/dev/random/yarrow.c 2001/01/14 17:50:15 1.28 +++ sys/dev/random/yarrow.c 2001/02/07 03:01:10 @@ -96,10 +96,10 @@ struct source *source; #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("OWNERSHIP Giant == %d sched_lock == %d\n", mtx_owned(&Giant), mtx_owned(&sched_lock)); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif for (pl = 0; pl < 2; pl++) @@ -114,11 +114,11 @@ else { #ifdef DEBUG1 - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("HARVEST src=%d bits=%d/%d pool=%d count=%lld\n", event->source, event->bits, event->frac, event->pool, event->somecounter); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif /* Suck the harvested entropy out of the queue and hash @@ -160,9 +160,9 @@ /* Is the thread scheduled for a shutdown? */ if (random_kthread_control != 0) { #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random kthread setting terminate\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif random_set_wakeup_exit(&random_kthread_control); /* NOTREACHED */ @@ -179,9 +179,9 @@ int error; #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random initialise\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif /* This can be turned off by the very paranoid @@ -213,9 +213,9 @@ random_init_harvester(random_harvest_internal, read_random_real); #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random initalise finish\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif return 0; @@ -225,31 +225,31 @@ random_deinit(void) { #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random deinitalise\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif /* Deregister the randomness harvesting routine */ random_deinit_harvester(); #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random deinitalise waiting for thread to terminate\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif /* Command the hash/reseed thread to end and wait for it to finish */ - mtx_enter(&harvestring.lockout_mtx, MTX_DEF); + mtx_lock(&harvestring.lockout_mtx); random_kthread_control = -1; msleep((void *)&random_kthread_control, &harvestring.lockout_mtx, PUSER, "rndend", 0); - mtx_exit(&harvestring.lockout_mtx, MTX_DEF); + mtx_unlock(&harvestring.lockout_mtx); #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random deinitalise removing mutexes\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif /* Remove the mutexes */ @@ -257,9 +257,9 @@ mtx_destroy(&harvestring.lockout_mtx); #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random deinitalise finish\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif } @@ -276,13 +276,13 @@ int i, j; #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Reseed type %d\n", fastslow); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif /* The reseed task must not be jumped on */ - mtx_enter(&random_reseed_mtx, MTX_DEF); + mtx_lock(&random_reseed_mtx); /* 1. Hash the accumulated entropy into v[0] */ @@ -353,12 +353,12 @@ /* XXX Not done here yet */ /* Release the reseed mutex */ - mtx_exit(&random_reseed_mtx, MTX_DEF); + mtx_unlock(&random_reseed_mtx); #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Reseed finish\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif if (!random_state.seeded) { @@ -379,7 +379,7 @@ u_int retval; /* The reseed task must not be jumped on */ - mtx_enter(&random_reseed_mtx, MTX_DEF); + mtx_lock(&random_reseed_mtx); if (gate) { generator_gate(); @@ -423,7 +423,7 @@ cur -= retval; } } - mtx_exit(&random_reseed_mtx, MTX_DEF); + mtx_unlock(&random_reseed_mtx); return retval; } @@ -462,9 +462,9 @@ u_char temp[KEYSIZE]; #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Generator gate\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif for (i = 0; i < KEYSIZE; i += sizeof(random_state.counter)) { @@ -477,9 +477,9 @@ memset((void *)temp, 0, KEYSIZE); #ifdef DEBUG - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Generator gate finish\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif } @@ -495,16 +495,16 @@ int newhead, tail; #ifdef DEBUG1 - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Random harvest\n"); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #endif if (origin < ENTROPYSOURCE) { /* Add the harvested data to the ring buffer, but * do not block. */ - if (mtx_try_enter(&harvestring.lockout_mtx, MTX_DEF)) { + if (mtx_trylock(&harvestring.lockout_mtx)) { tail = atomic_load_acq_int(&harvestring.tail); newhead = (harvestring.head + 1) % HARVEST_RING_SIZE; @@ -533,7 +533,7 @@ wakeup(&harvestring.head); } - mtx_exit(&harvestring.lockout_mtx, MTX_DEF); + mtx_unlock(&harvestring.lockout_mtx); } Index: sys/dev/usb/if_auereg.h =================================================================== RCS file: /home/ncvs/src/sys/dev/usb/if_auereg.h,v retrieving revision 1.6 diff -u -r1.6 if_auereg.h --- sys/dev/usb/if_auereg.h 2000/10/24 22:38:54 1.6 +++ sys/dev/usb/if_auereg.h 2001/02/07 03:01:10 @@ -249,8 +249,8 @@ struct mtx aue_mtx; }; -#define AUE_LOCK(_sc) mtx_enter(&(_sc)->aue_mtx, MTX_DEF) -#define AUE_UNLOCK(_sc) mtx_exit(&(_sc)->aue_mtx, MTX_DEF) +#define AUE_LOCK(_sc) mtx_lock(&(_sc)->aue_mtx) +#define AUE_UNLOCK(_sc) mtx_unlock(&(_sc)->aue_mtx) #define AUE_TIMEOUT 1000 #define ETHER_ALIGN 2 Index: sys/dev/usb/if_cuereg.h =================================================================== RCS file: /home/ncvs/src/sys/dev/usb/if_cuereg.h,v retrieving revision 1.6 diff -u -r1.6 if_cuereg.h --- sys/dev/usb/if_cuereg.h 2000/10/24 22:38:54 1.6 +++ sys/dev/usb/if_cuereg.h 2001/02/07 03:01:10 @@ -182,5 +182,5 @@ struct mtx cue_mtx; }; -#define CUE_LOCK(_sc) mtx_enter(&(_sc)->cue_mtx, MTX_DEF) -#define CUE_UNLOCK(_sc) mtx_exit(&(_sc)->cue_mtx, MTX_DEF) +#define CUE_LOCK(_sc) mtx_lock(&(_sc)->cue_mtx) +#define CUE_UNLOCK(_sc) mtx_unlock(&(_sc)->cue_mtx) Index: sys/dev/usb/if_kuereg.h =================================================================== RCS file: /home/ncvs/src/sys/dev/usb/if_kuereg.h,v retrieving revision 1.6 diff -u -r1.6 if_kuereg.h --- sys/dev/usb/if_kuereg.h 2000/10/24 22:38:54 1.6 +++ sys/dev/usb/if_kuereg.h 2001/02/07 03:01:10 @@ -173,5 +173,5 @@ struct mtx kue_mtx; }; -#define KUE_LOCK(_sc) mtx_enter(&(_sc)->kue_mtx, MTX_DEF) -#define KUE_UNLOCK(_sc) mtx_exit(&(_sc)->kue_mtx, MTX_DEF) +#define KUE_LOCK(_sc) mtx_lock(&(_sc)->kue_mtx) +#define KUE_UNLOCK(_sc) mtx_unlock(&(_sc)->kue_mtx) Index: sys/dev/vinum/vinumdaemon.c =================================================================== RCS file: /home/ncvs/src/sys/dev/vinum/vinumdaemon.c,v retrieving revision 1.19 diff -u -r1.19 vinumdaemon.c --- sys/dev/vinum/vinumdaemon.c 2001/01/24 10:28:19 1.19 +++ sys/dev/vinum/vinumdaemon.c 2001/02/07 03:01:10 @@ -72,9 +72,9 @@ PROC_LOCK(curproc); curproc->p_flag |= P_SYSTEM; /* we're a system process */ PROC_UNLOCK(curproc); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); curproc->p_sflag |= PS_INMEM; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); daemon_save_config(); /* start by saving the configuration */ daemonpid = curproc->p_pid; /* mark our territory */ while (1) { Index: sys/dev/vinum/vinumlock.c =================================================================== RCS file: /home/ncvs/src/sys/dev/vinum/vinumlock.c,v retrieving revision 1.21 diff -u -r1.21 vinumlock.c --- sys/dev/vinum/vinumlock.c 2001/01/14 06:34:57 1.21 +++ sys/dev/vinum/vinumlock.c 2001/02/07 03:01:10 @@ -132,7 +132,7 @@ * increment all addresses by 1. */ stripe++; - mtx_enter(&plex->lockmtx, MTX_DEF); + mtx_lock(&plex->lockmtx); /* Wait here if the table is full */ while (plex->usedlocks == PLEX_LOCKS) /* all in use */ @@ -187,7 +187,7 @@ pos->stripe = stripe; pos->bp = bp; plex->usedlocks++; /* one more lock */ - mtx_exit(&plex->lockmtx, MTX_DEF); + mtx_unlock(&plex->lockmtx); #ifdef VINUMDEBUG if (debug & DEBUG_LASTREQS) logrq(loginfo_lock, (union rqinfou) pos, bp); Index: sys/dev/wi/if_wireg.h =================================================================== RCS file: /home/ncvs/src/sys/dev/wi/if_wireg.h,v retrieving revision 1.10 diff -u -r1.10 if_wireg.h --- sys/dev/wi/if_wireg.h 2000/12/15 23:34:13 1.10 +++ sys/dev/wi/if_wireg.h 2001/02/07 03:01:10 @@ -128,8 +128,8 @@ int wi_prism2; /* set to 1 if it uses a Prism II chip */ }; -#define WI_LOCK(_sc) mtx_enter(&(_sc)->wi_mtx, MTX_DEF) -#define WI_UNLOCK(_sc) mtx_exit(&(_sc)->wi_mtx, MTX_DEF) +#define WI_LOCK(_sc) mtx_lock(&(_sc)->wi_mtx) +#define WI_UNLOCK(_sc) mtx_unlock(&(_sc)->wi_mtx) #define WI_TIMEOUT 65536 Index: sys/fs/hpfs/hpfs_hash.c =================================================================== RCS file: /home/ncvs/src/sys/fs/hpfs/hpfs_hash.c,v retrieving revision 1.8 diff -u -r1.8 hpfs_hash.c --- sys/fs/hpfs/hpfs_hash.c 2001/02/04 11:53:50 1.8 +++ sys/fs/hpfs/hpfs_hash.c 2001/02/07 03:01:10 @@ -92,11 +92,11 @@ { struct hpfsnode *hp; - mtx_enter(&hpfs_hphash_mtx, MTX_DEF); + mtx_lock(&hpfs_hphash_mtx); LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash) if (ino == hp->h_no && dev == hp->h_dev) break; - mtx_exit(&hpfs_hphash_mtx, MTX_DEF); + mtx_unlock(&hpfs_hphash_mtx); return (hp); } @@ -110,14 +110,14 @@ struct hpfsnode *hp; loop: - mtx_enter(&hpfs_hphash_mtx, MTX_DEF); + mtx_lock(&hpfs_hphash_mtx); LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash) { if (ino == hp->h_no && dev == hp->h_dev) { LOCKMGR(&hp->h_intlock, LK_EXCLUSIVE | LK_INTERLOCK, &hpfs_hphash_slock, NULL); return (hp); } } - mtx_exit(&hpfs_hphash_mtx, MTX_DEF); + mtx_unlock(&hpfs_hphash_mtx); return (hp); } #endif @@ -132,18 +132,18 @@ struct vnode *vp; loop: - mtx_enter(&hpfs_hphash_mtx, MTX_DEF); + mtx_lock(&hpfs_hphash_mtx); LIST_FOREACH(hp, HPNOHASH(dev, ino), h_hash) { if (ino == hp->h_no && dev == hp->h_dev) { vp = HPTOV(hp); - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&hpfs_hphash_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&hpfs_hphash_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) goto loop; return (vp); } } - mtx_exit(&hpfs_hphash_mtx, MTX_DEF); + mtx_unlock(&hpfs_hphash_mtx); return (NULLVP); } @@ -156,11 +156,11 @@ { struct hphashhead *hpp; - mtx_enter(&hpfs_hphash_mtx, MTX_DEF); + mtx_lock(&hpfs_hphash_mtx); hpp = HPNOHASH(hp->h_dev, hp->h_no); hp->h_flag |= H_HASHED; LIST_INSERT_HEAD(hpp, hp, h_hash); - mtx_exit(&hpfs_hphash_mtx, MTX_DEF); + mtx_unlock(&hpfs_hphash_mtx); } /* @@ -170,10 +170,10 @@ hpfs_hphashrem(hp) struct hpfsnode *hp; { - mtx_enter(&hpfs_hphash_mtx, MTX_DEF); + mtx_lock(&hpfs_hphash_mtx); if (hp->h_flag & H_HASHED) { hp->h_flag &= ~H_HASHED; LIST_REMOVE(hp, h_hash); } - mtx_exit(&hpfs_hphash_mtx, MTX_DEF); + mtx_unlock(&hpfs_hphash_mtx); } Index: sys/gnu/ext2fs/ext2_vfsops.c =================================================================== RCS file: /home/ncvs/src/sys/gnu/ext2fs/ext2_vfsops.c,v retrieving revision 1.75 diff -u -r1.75 ext2_vfsops.c --- sys/gnu/ext2fs/ext2_vfsops.c 2001/02/04 13:12:06 1.75 +++ sys/gnu/ext2fs/ext2_vfsops.c 2001/02/07 03:01:10 @@ -561,10 +561,10 @@ brelse(bp); loop: - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); for (vp = LIST_FIRST(&mountp->mnt_vnodelist); vp != NULL; vp = nvp) { if (vp->v_mount != mountp) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); goto loop; } nvp = LIST_NEXT(vp, v_mntvnodes); @@ -576,8 +576,8 @@ /* * Step 5: invalidate all cached file data. */ - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&mntvnode_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { goto loop; } @@ -599,9 +599,9 @@ &ip->i_din); brelse(bp); vput(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); return (0); } @@ -918,7 +918,7 @@ /* * Write back each (modified) inode. */ - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); loop: for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { /* @@ -927,20 +927,20 @@ */ if (vp->v_mount != mp) goto loop; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); nvp = LIST_NEXT(vp, v_mntvnodes); ip = VTOI(vp); if (vp->v_type == VNON || ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && (TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); continue; } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); if (error) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (error == ENOENT) goto loop; continue; @@ -949,9 +949,9 @@ allerror = error; VOP_UNLOCK(vp, 0, p); vrele(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); /* * Force stale file system control information to be flushed. */ Index: sys/i386/i386/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/machdep.c,v retrieving revision 1.438 diff -u -r1.438 machdep.c --- sys/i386/i386/machdep.c 2001/02/04 06:19:24 1.438 +++ sys/i386/i386/machdep.c 2001/02/07 03:01:10 @@ -1910,7 +1910,7 @@ * Giant is used early for at least debugger traps and unexpected traps. */ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* make ldt memory segments */ /* Index: sys/i386/i386/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/mp_machdep.c,v retrieving revision 1.144 diff -u -r1.144 mp_machdep.c --- sys/i386/i386/mp_machdep.c 2001/01/30 04:02:27 1.144 +++ sys/i386/i386/mp_machdep.c 2001/02/07 03:01:10 @@ -2268,7 +2268,7 @@ PCPU_SET(curproc, PCPU_GET(idleproc)); /* lock against other AP's that are waking up */ - mtx_enter(&ap_boot_mtx, MTX_SPIN); + mtx_lock_spin(&ap_boot_mtx); /* BSP may have changed PTD while we're waiting for the lock */ cpu_invltlb(); @@ -2317,7 +2317,7 @@ } /* let other AP's wake up now */ - mtx_exit(&ap_boot_mtx, MTX_SPIN); + mtx_unlock_spin(&ap_boot_mtx); /* wait until all the AP's are up */ while (smp_started == 0) @@ -2328,7 +2328,7 @@ /* ok, now grab sched_lock and enter the scheduler */ enable_intr(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); cpu_throw(); /* doesn't return */ panic("scheduler returned us to ap_init"); @@ -2662,14 +2662,14 @@ return; if (!forward_signal_enabled) return; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); while (1) { if (p->p_stat != SRUN) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } id = p->p_oncpu; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (id == 0xff) return; map = (1<p_oncpu) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } } @@ -2867,7 +2867,7 @@ { /* obtain rendezvous lock */ - mtx_enter(&smp_rv_mtx, MTX_SPIN); + mtx_lock_spin(&smp_rv_mtx); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -2886,7 +2886,7 @@ smp_rendezvous_action(); /* release lock */ - mtx_exit(&smp_rv_mtx, MTX_SPIN); + mtx_unlock_spin(&smp_rv_mtx); } void Index: sys/i386/i386/mpapic.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/mpapic.c,v retrieving revision 1.46 diff -u -r1.46 mpapic.c --- sys/i386/i386/mpapic.c 2001/01/28 01:07:53 1.46 +++ sys/i386/i386/mpapic.c 2001/02/07 03:01:10 @@ -210,11 +210,11 @@ * shouldn't and stop the carnage. */ vector = NRSVIDT + pin; /* IDT vec */ - mtx_enter(&imen_mtx, MTX_SPIN); + mtx_lock_spin(&imen_mtx); io_apic_write(apic, select, (io_apic_read(apic, select) & ~IOART_INTMASK & ~0xff)|IOART_INTMSET|vector); - mtx_exit(&imen_mtx, MTX_SPIN); + mtx_unlock_spin(&imen_mtx); /* we only deal with vectored INTs here */ if (apic_int_type(apic, pin) != 0) @@ -258,10 +258,10 @@ printf("IOAPIC #%d intpin %d -> irq %d\n", apic, pin, irq); vector = NRSVIDT + irq; /* IDT vec */ - mtx_enter(&imen_mtx, MTX_SPIN); + mtx_lock_spin(&imen_mtx); io_apic_write(apic, select, flags | vector); io_apic_write(apic, select + 1, target); - mtx_exit(&imen_mtx, MTX_SPIN); + mtx_unlock_spin(&imen_mtx); } int Index: sys/i386/i386/procfs_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/procfs_machdep.c,v retrieving revision 1.15 diff -u -r1.15 procfs_machdep.c --- sys/i386/i386/procfs_machdep.c 2001/01/24 09:49:49 1.15 +++ sys/i386/i386/procfs_machdep.c 2001/02/07 03:01:10 @@ -86,12 +86,12 @@ struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_regs(p, regs)); } @@ -101,12 +101,12 @@ struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_regs(p, regs)); } @@ -116,12 +116,12 @@ struct dbreg *dbregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_dbregs(p, dbregs)); } @@ -131,12 +131,12 @@ struct dbreg *dbregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_dbregs(p, dbregs)); } @@ -151,12 +151,12 @@ struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_fpregs(p, fpregs)); } @@ -166,12 +166,12 @@ struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_fpregs(p, fpregs)); } @@ -180,11 +180,11 @@ struct proc *p; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (ptrace_single_step(p)); } Index: sys/i386/i386/trap.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/trap.c,v retrieving revision 1.170 diff -u -r1.170 trap.c --- sys/i386/i386/trap.c 2001/02/06 11:20:24 1.170 +++ sys/i386/i386/trap.c 2001/02/07 03:01:10 @@ -174,11 +174,11 @@ while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_priority = p->p_usrpri; if (resched_wanted()) { /* @@ -193,30 +193,30 @@ setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } /* * Charge system time if profiling. */ if (p->p_sflag & PS_PROFIL) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* XXX - do we need Giant? */ if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, frame->tf_eip, (u_int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -281,9 +281,9 @@ ((frame.tf_eflags & PSL_VM) && !in_vm86call)) { /* user trap */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_regs = &frame; switch (type) { @@ -311,9 +311,9 @@ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ if (frame.tf_eflags & PSL_VM) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); i = vm86_emulate((struct vm86frame *)&frame); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); if (i == 0) goto user; break; @@ -338,9 +338,9 @@ */ eva = rcr2(); enable_intr(); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); i = trap_pfault(&frame, TRUE, eva); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #if defined(I586_CPU) && !defined(NO_F00F_HACK) if (i == -2) { /* @@ -370,13 +370,13 @@ #ifndef TIMER_FREQ # define TIMER_FREQ 1193182 #endif - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if (time_second - lastalert > 10) { log(LOG_WARNING, "NMI: power fail\n"); sysbeep(TIMER_FREQ/880, hz); lastalert = time_second; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; #else /* !POWERFAIL_NMI */ /* machine/parity/power fail/"kitchen sink" faults */ @@ -420,9 +420,9 @@ ucode = FPE_FPU_NP_TRAP; break; } - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); i = (*pmath_emulate)(&frame); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); if (i == 0) { if (!(frame.tf_eflags & PSL_T)) goto out; @@ -451,9 +451,9 @@ */ eva = rcr2(); enable_intr(); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); (void) trap_pfault(&frame, FALSE, eva); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; case T_DNA: @@ -476,9 +476,9 @@ case T_PROTFLT: /* general protection fault */ case T_STKFLT: /* stack fault */ if (frame.tf_eflags & PSL_VM) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); i = vm86_emulate((struct vm86frame *)&frame); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); if (i != 0) /* * returns to original process @@ -509,9 +509,9 @@ */ if (frame.tf_eip == (int)cpu_switch_load_gs) { PCPU_GET(curpcb)->pcb_gs = 0; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGBUS); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } @@ -620,13 +620,13 @@ #ifdef DEV_ISA case T_NMI: #ifdef POWERFAIL_NMI - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if (time_second - lastalert > 10) { log(LOG_WARNING, "NMI: power fail\n"); sysbeep(TIMER_FREQ/880, hz); lastalert = time_second; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; #else /* !POWERFAIL_NMI */ /* XXX Giant */ @@ -650,13 +650,13 @@ #endif /* DEV_ISA */ } - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); trap_fatal(&frame, eva); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* Translate fault for emulators (e.g. Linux) */ if (*p->p_sysent->sv_transtrap) i = (*p->p_sysent->sv_transtrap)(i, type); @@ -672,12 +672,12 @@ uprintf("\n"); } #endif - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); user: userret(p, &frame, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); out: return; } @@ -1102,15 +1102,15 @@ #ifdef DIAGNOSTIC if (ISPL(frame.tf_cs) != SEL_UPL) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); panic("syscall"); /* NOT REACHED */ } #endif - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_regs = &frame; params = (caddr_t)frame.tf_esp + sizeof(int); @@ -1120,9 +1120,9 @@ /* * The prep code is not MP aware. */ - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); (*p->p_sysent->sv_prepsyscall)(&frame, args, &code, ¶ms); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } else { /* * Need to check if this is a 32 bit or 64 bit syscall. @@ -1159,7 +1159,7 @@ */ if (params && (i = narg * sizeof(int)) && (error = copyin(params, (caddr_t)args, (u_int)i))) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); #ifdef KTRACE if (KTRPOINT(p, KTR_SYSCALL)) ktrsyscall(p->p_tracep, code, narg, args); @@ -1173,13 +1173,13 @@ * we are ktracing */ if ((callp->sy_narg & SYF_MPSAFE) == 0) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); } #ifdef KTRACE if (KTRPOINT(p, KTR_SYSCALL)) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ktrsyscall(p->p_tracep, code, narg, args); } #endif @@ -1229,7 +1229,7 @@ */ if ((frame.tf_eflags & PSL_T) && !(frame.tf_eflags & PSL_VM)) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); frame.tf_eflags &= ~PSL_T; trapsignal(p, SIGTRAP, 0); } @@ -1242,7 +1242,7 @@ #ifdef KTRACE if (KTRPOINT(p, KTR_SYSRET)) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ktrsysret(p->p_tracep, code, error, p->p_retval[0]); } #endif @@ -1258,7 +1258,7 @@ * Release Giant if we had to get it */ if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #ifdef WITNESS if (witness_list(p)) { @@ -1277,38 +1277,38 @@ struct proc *p = CURPROC; u_quad_t sticks; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; astoff(); atomic_add_int(&cnt.v_soft, 1); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; - mtx_exit(&sched_lock, MTX_SPIN); - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, p->p_stats->p_prof.pr_addr, p->p_stats->p_prof.pr_ticks); } if (p->p_sflag & PS_ALRMPEND) { p->p_sflag &= ~PS_ALRMPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGVTALRM); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } if (p->p_sflag & PS_PROFPEND) { p->p_sflag &= ~PS_PROFPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGPROF); } else - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); userret(p, &frame, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } Index: sys/i386/i386/vm86.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/vm86.c,v retrieving revision 1.37 diff -u -r1.37 vm86.c --- sys/i386/i386/vm86.c 2001/01/21 07:52:14 1.37 +++ sys/i386/i386/vm86.c 2001/02/07 03:01:10 @@ -576,9 +576,9 @@ return (EINVAL); vmf->vmf_trapno = intnum; - mtx_enter(&vm86pcb_lock, MTX_DEF); + mtx_lock(&vm86pcb_lock); retval = vm86_bioscall(vmf); - mtx_exit(&vm86pcb_lock, MTX_DEF); + mtx_unlock(&vm86pcb_lock); return (retval); } @@ -606,9 +606,9 @@ } vmf->vmf_trapno = intnum; - mtx_enter(&vm86pcb_lock, MTX_DEF); + mtx_lock(&vm86pcb_lock); retval = vm86_bioscall(vmf); - mtx_exit(&vm86pcb_lock, MTX_DEF); + mtx_unlock(&vm86pcb_lock); for (i = 0; i < vmc->npages; i++) { entry = vmc->pmap[i].pte_num; Index: sys/i386/i386/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/i386/vm_machdep.c,v retrieving revision 1.150 diff -u -r1.150 vm_machdep.c --- sys/i386/i386/vm_machdep.c 2001/01/29 09:38:38 1.150 +++ sys/i386/i386/vm_machdep.c 2001/02/07 03:01:10 @@ -261,8 +261,8 @@ reset_dbregs(); pcb->pcb_flags &= ~PCB_DBREGS; } - mtx_enter(&sched_lock, MTX_SPIN); - mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH); + mtx_lock_spin(&sched_lock); + mtx_unlock_flags(&Giant, MTX_NOSWITCH); mtx_assert(&Giant, MA_NOTOWNED); /* @@ -574,7 +574,7 @@ if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) return(0); - if (mtx_try_enter(&Giant, MTX_DEF)) { + if (mtx_trylock(&Giant)) { s = splvm(); zero_state = 0; m = vm_page_list_find(PQ_FREE, free_rover, FALSE); @@ -597,7 +597,7 @@ } free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; splx(s); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); return (1); } return (0); Index: sys/i386/include/cpu.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/cpu.h,v retrieving revision 1.56 diff -u -r1.56 cpu.h --- sys/i386/include/cpu.h 2001/01/24 09:56:49 1.56 +++ sys/i386/include/cpu.h 2001/02/07 03:01:10 @@ -92,9 +92,9 @@ * counter in the proc table and flag isn't really necessary. */ #define need_proftick(p) do { \ - mtx_enter(&sched_lock, MTX_SPIN); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ - mtx_exit(&sched_lock, MTX_SPIN); \ + mtx_unlock_spin(&sched_lock); \ aston(); \ } while (0) Index: sys/i386/include/lock.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/lock.h,v retrieving revision 1.17 diff -u -r1.17 lock.h --- sys/i386/include/lock.h 2001/01/24 12:35:48 1.17 +++ sys/i386/include/lock.h 2001/02/07 03:01:10 @@ -39,8 +39,8 @@ /* * Protects the IO APIC and apic_imen as a critical region. */ -#define IMASK_LOCK MTX_ENTER(_imen_mtx, MTX_SPIN) -#define IMASK_UNLOCK MTX_EXIT(_imen_mtx, MTX_SPIN) +#define IMASK_LOCK MTX_LOCK_SPIN(_imen_mtx, 0) +#define IMASK_UNLOCK MTX_UNLOCK_SPIN(_imen_mtx) #else /* SMP */ @@ -62,8 +62,8 @@ * XXX should rc (RISCom/8) use this? */ #ifdef USE_COMLOCK -#define COM_LOCK() mtx_enter(&com_mtx, MTX_SPIN) -#define COM_UNLOCK() mtx_exit(&com_mtx, MTX_SPIN) +#define COM_LOCK() mtx_lock_spin(&com_mtx) +#define COM_UNLOCK() mtx_unlock_spin(&com_mtx) #else #define COM_LOCK() #define COM_UNLOCK() Index: sys/i386/include/mutex.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/mutex.h,v retrieving revision 1.26 diff -u -r1.26 mutex.h --- sys/i386/include/mutex.h 2001/01/21 22:34:42 1.26 +++ sys/i386/include/mutex.h 2001/02/07 03:01:10 @@ -43,32 +43,17 @@ /* * Debugging */ -#ifdef MUTEX_DEBUG +#define ASS_IEN MPASS2(read_eflags() & PSL_I, "fl & PSL_I") +#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, "!(fl & PSL_I)") +#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, \ + "mpp->mtx_saveintr & PSL_I") -#ifdef _KERN_MUTEX_C_ -char STR_IEN[] = "fl & PSL_I"; -char STR_IDIS[] = "!(fl & PSL_I)"; -char STR_SIEN[] = "mpp->mtx_saveintr & PSL_I"; -#else /* _KERN_MUTEX_C_ */ -extern char STR_IEN[]; -extern char STR_IDIS[]; -extern char STR_SIEN[]; -#endif /* _KERN_MUTEX_C_ */ -#endif /* MUTEX_DEBUG */ - -#define ASS_IEN MPASS2(read_eflags() & PSL_I, STR_IEN) -#define ASS_IDIS MPASS2((read_eflags() & PSL_I) == 0, STR_IDIS) -#define ASS_SIEN(mpp) MPASS2((mpp)->mtx_saveintr & PSL_I, STR_SIEN) - #define mtx_legal2block() (read_eflags() & PSL_I) /* * Assembly macros (for internal use only) *------------------------------------------------------------------------------ */ - -#ifdef _KERN_MUTEX_C_ - #define _V(x) __STRING(x) #if 0 @@ -252,22 +237,80 @@ #undef _V -#endif /* _KERN_MUTEX_C_ */ - #endif /* _KERNEL */ #else /* !LOCORE */ /* * Simple assembly macros to get and release mutexes. + * + * Note: All of these macros accept a "flags" argument and are analoguous + * to the mtx_lock_flags and mtx_unlock_flags general macros. If one + * desires to not pass a flag, the value 0 may be passed as second + * argument. + * + * XXX: We only have MTX_LOCK_SPIN and MTX_UNLOCK_SPIN for now, since that's + * all we use right now. We should add MTX_LOCK and MTX_UNLOCK (for sleep + * locks) in the near future, however. */ +#define MTX_LOCK_SPIN(lck, flags) \ + pushl %eax ; \ + pushl %ecx ; \ + pushl %ebx ; \ + movl $(MTX_UNOWNED) , %eax ; \ + movl PCPU(CURPROC), %ebx ; \ + pushfl ; \ + popl %ecx ; \ + cli ; \ + MPLOCKED cmpxchgl %ebx, lck+MTX_LOCK ; \ + jz 2f ; \ + cmpl lck+MTX_LOCK, %ebx ; \ + je 3f ; \ + pushl $0 ; \ + pushl $0 ; \ + pushl %ecx ; \ + pushl $flags ; \ + pushl $lck ; \ + call _mtx_lock_spin ; \ + addl $0x14, %esp ; \ + jmp 1f ; \ +3: movl lck+MTX_RECURSECNT, %ebx ; \ + incl %ebx ; \ + movl %ebx, lck+MTX_RECURSECNT ; \ + jmp 1f ; \ +2: movl %ecx, lck+MTX_SAVEINTR ; \ +1: popl %ebx ; \ + popl %ecx ; \ + popl %eax + +#define MTX_UNLOCK_SPIN(lck) \ + pushl %edx ; \ + pushl %eax ; \ + movl lck+MTX_SAVEINTR, %edx ; \ + movl lck+MTX_RECURSECNT, %eax ; \ + testl %eax, %eax ; \ + jne 2f ; \ + movl $(MTX_UNOWNED), %eax ; \ + xchgl %eax, lck+MTX_LOCK ; \ + pushl %edx ; \ + popfl ; \ + jmp 1f ; \ +2: decl %eax ; \ + movl %eax, lck+MTX_RECURSECNT ; \ +1: popl %eax ; \ + popl %edx +/* + * XXX: These two are broken right now and need to be made to work for + * XXX: sleep locks, as the above two work for spin locks. We're not in + * XXX: too much of a rush to do these as we do not use them right now. + */ #define MTX_ENTER(lck, type) \ pushl $0 ; /* dummy __LINE__ */ \ pushl $0 ; /* dummy __FILE__ */ \ pushl $type ; \ pushl $lck ; \ - call _mtx_enter ; \ + call _mtx_lock_XXX ; \ addl $16,%esp #define MTX_EXIT(lck, type) \ @@ -275,7 +318,7 @@ pushl $0 ; /* dummy __FILE__ */ \ pushl $type ; \ pushl $lck ; \ - call _mtx_exit ; \ + call _mtx_unlock_XXX ; \ addl $16,%esp #endif /* !LOCORE */ Index: sys/i386/include/profile.h =================================================================== RCS file: /home/ncvs/src/sys/i386/include/profile.h,v retrieving revision 1.21 diff -u -r1.21 profile.h --- sys/i386/include/profile.h 2001/01/24 12:35:48 1.21 +++ sys/i386/include/profile.h 2001/02/07 03:01:10 @@ -66,8 +66,8 @@ #ifdef SMP #define MCOUNT_ENTER(s) { s = read_eflags(); \ __asm __volatile("cli" : : : "memory"); \ - mtx_enter(&mcount_mtx, MTX_DEF); } -#define MCOUNT_EXIT(s) { mtx_exit(&mcount_mtx, MTX_DEF); write_eflags(s); } + mtx_lock(&mcount_mtx); } +#define MCOUNT_EXIT(s) { mtx_unlock(&mcount_mtx); write_eflags(s); } #else #define MCOUNT_ENTER(s) { s = read_eflags(); disable_intr(); } #define MCOUNT_EXIT(s) (write_eflags(s)) Index: sys/i386/isa/clock.c =================================================================== RCS file: /home/ncvs/src/sys/i386/isa/clock.c,v retrieving revision 1.167 diff -u -r1.167 clock.c --- sys/i386/isa/clock.c 2001/01/29 11:57:25 1.167 +++ sys/i386/isa/clock.c 2001/02/07 03:01:11 @@ -207,7 +207,7 @@ { if (timecounter->tc_get_timecount == i8254_get_timecount) { - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); if (i8254_ticked) i8254_ticked = 0; else { @@ -215,7 +215,7 @@ i8254_lastcount = 0; } clkintr_pending = 0; - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); } timer_func(&frame); switch (timer0_state) { @@ -232,14 +232,14 @@ break; case ACQUIRE_PENDING: - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); i8254_offset = i8254_get_timecount(NULL); i8254_lastcount = 0; timer0_max_count = TIMER_DIV(new_rate); outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); timer_func = new_function; timer0_state = ACQUIRED; break; @@ -247,7 +247,7 @@ case RELEASE_PENDING: if ((timer0_prescaler_count += timer0_max_count) >= hardclock_max_count) { - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); i8254_offset = i8254_get_timecount(NULL); i8254_lastcount = 0; timer0_max_count = hardclock_max_count; @@ -255,7 +255,7 @@ TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); timer0_prescaler_count = 0; timer_func = hardclock; timer0_state = RELEASED; @@ -403,7 +403,7 @@ { int high, low; - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); /* Select timer0 and latch counter value. */ outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); @@ -411,7 +411,7 @@ low = inb(TIMER_CNTR0); high = inb(TIMER_CNTR0); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); return ((high << 8) | low); } @@ -525,10 +525,10 @@ splx(x); return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */ } - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); outb(TIMER_CNTR2, pitch); outb(TIMER_CNTR2, (pitch>>8)); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); if (!beeping) { /* enable counter2 output to speaker */ outb(IO_PPI, inb(IO_PPI) | 3); @@ -679,7 +679,7 @@ { int new_timer0_max_count; - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); timer_freq = freq; new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq); if (new_timer0_max_count != timer0_max_count) { @@ -688,7 +688,7 @@ outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); } - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); } /* @@ -703,11 +703,11 @@ i8254_restore(void) { - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); } /* @@ -1194,7 +1194,7 @@ u_int eflags; eflags = read_eflags(); - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); /* Select timer0 and latch counter value. */ outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); @@ -1218,7 +1218,7 @@ } i8254_lastcount = count; count += i8254_offset; - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); return (count); } Index: sys/i386/isa/if_el.c =================================================================== RCS file: /home/ncvs/src/sys/i386/isa/if_el.c,v retrieving revision 1.53 diff -u -r1.53 if_el.c --- sys/i386/isa/if_el.c 2001/01/19 01:58:45 1.53 +++ sys/i386/isa/if_el.c 2001/02/07 03:01:11 @@ -110,8 +110,8 @@ #define CSR_READ_1(sc, reg) \ bus_space_read_1(sc->el_btag, sc->el_bhandle, reg) -#define EL_LOCK(_sc) mtx_enter(&(_sc)->el_mtx, MTX_DEF) -#define EL_UNLOCK(_sc) mtx_exit(&(_sc)->el_mtx, MTX_DEF) +#define EL_LOCK(_sc) mtx_lock(&(_sc)->el_mtx) +#define EL_UNLOCK(_sc) mtx_unlock(&(_sc)->el_mtx) /* Probe routine. See if the card is there and at the right place. */ static int Index: sys/i386/isa/intr_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/i386/isa/intr_machdep.c,v retrieving revision 1.49 diff -u -r1.49 intr_machdep.c --- sys/i386/isa/intr_machdep.c 2001/01/29 11:57:26 1.49 +++ sys/i386/isa/intr_machdep.c 2001/02/07 03:01:11 @@ -701,7 +701,7 @@ ithds[ithd->irq] = NULL; if ((idesc->ih_flags & INTR_FAST) == 0) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (ithd->it_proc->p_stat == SWAIT) { ithd->it_proc->p_intr_nesting_level = 0; ithd->it_proc->p_stat = SRUN; @@ -713,7 +713,7 @@ * XXX: should we lower the threads priority? */ } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } free(idesc->ih_name, M_DEVBUF); Index: sys/i386/isa/ithread.c =================================================================== RCS file: /home/ncvs/src/sys/i386/isa/ithread.c,v retrieving revision 1.11 diff -u -r1.11 ithread.c --- sys/i386/isa/ithread.c 2001/02/01 03:34:20 1.11 +++ sys/i386/isa/ithread.c 2001/02/07 03:01:11 @@ -114,7 +114,7 @@ * is higher priority than their current thread, it gets run now. */ ir->it_need = 1; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (ir->it_proc->p_stat == SWAIT) { /* not on run queue */ CTR1(KTR_INTR, "sched_ithd: setrunqueue %d", ir->it_proc->p_pid); @@ -134,7 +134,7 @@ ir->it_proc->p_stat ); need_resched(); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -163,7 +163,7 @@ me->it_proc->p_pid, me->it_proc->p_comm); curproc->p_ithd = NULL; free(me, M_DEVBUF); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); kthread_exit(0); } @@ -188,10 +188,10 @@ ih->ih_flags); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } } @@ -201,7 +201,7 @@ * set again, so we have to check it again. */ mtx_assert(&Giant, MA_NOTOWNED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (!me->it_need) { INTREN (1 << me->irq); /* reset the mask bit */ @@ -217,6 +217,6 @@ CTR1(KTR_INTR, "ithd_loop pid %d: resumed", me->it_proc->p_pid); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } Index: sys/i386/isa/npx.c =================================================================== RCS file: /home/ncvs/src/sys/i386/isa/npx.c,v retrieving revision 1.90 diff -u -r1.90 npx.c --- sys/i386/isa/npx.c 2001/02/01 03:34:20 1.90 +++ sys/i386/isa/npx.c 2001/02/07 03:01:11 @@ -724,7 +724,7 @@ u_short control; struct intrframe *frame; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if (PCPU_GET(npxproc) == NULL || !npx_exists) { printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n", PCPU_GET(npxproc), curproc, npx_exists); @@ -783,7 +783,7 @@ */ psignal(curproc, SIGFPE); } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } /* Index: sys/ia64/ia64/interrupt.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/interrupt.c,v retrieving revision 1.6 diff -u -r1.6 interrupt.c --- sys/ia64/ia64/interrupt.c 2001/01/21 19:25:06 1.6 +++ sys/ia64/ia64/interrupt.c 2001/02/07 03:01:11 @@ -86,7 +86,7 @@ case 240: /* clock interrupt */ CTR0(KTR_INTR, "clock interrupt"); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); cnt.v_intr++; #ifdef EVCNT_COUNTERS clock_intr_evcnt.ev_count++; @@ -98,11 +98,11 @@ /* divide hz (1024) by 8 to get stathz (128) */ if((++schedclk2 & 0x7) == 0) statclock((struct clockframe *)framep); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); break; default: - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); panic("unexpected interrupt: vec %ld\n", vector); /* NOTREACHED */ } Index: sys/ia64/ia64/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/machdep.c,v retrieving revision 1.21 diff -u -r1.21 machdep.c --- sys/ia64/ia64/machdep.c 2001/02/04 07:00:46 1.21 +++ sys/ia64/ia64/machdep.c 2001/02/07 03:01:11 @@ -597,7 +597,7 @@ */ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); mtx_init(&sched_lock, "sched lock", MTX_SPIN | MTX_RECURSE); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); #if 0 /* Index: sys/ia64/ia64/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/mp_machdep.c,v retrieving revision 1.9 diff -u -r1.9 mp_machdep.c --- sys/ia64/ia64/mp_machdep.c 2001/01/24 17:12:27 1.9 +++ sys/ia64/ia64/mp_machdep.c 2001/02/07 03:01:11 @@ -628,7 +628,7 @@ { /* obtain rendezvous lock */ - mtx_enter(&smp_rv_mtx, MTX_SPIN); + mtx_lock_spin(&smp_rv_mtx); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -645,7 +645,7 @@ smp_rendezvous_action(); /* release lock */ - mtx_exit(&smp_rv_mtx, MTX_SPIN); + mtx_unlock_spin(&smp_rv_mtx); } /* Index: sys/ia64/ia64/procfs_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/procfs_machdep.c,v retrieving revision 1.3 diff -u -r1.3 procfs_machdep.c --- sys/ia64/ia64/procfs_machdep.c 2001/01/24 17:11:33 1.3 +++ sys/ia64/ia64/procfs_machdep.c 2001/02/07 03:01:11 @@ -86,12 +86,12 @@ struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (fill_regs(p, regs)); } @@ -101,12 +101,12 @@ struct reg *regs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_regs(p, regs)); } @@ -121,9 +121,9 @@ struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } return (fill_fpregs(p, fpregs)); @@ -135,12 +135,12 @@ struct fpreg *fpregs; { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (EIO); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (set_fpregs(p, fpregs)); } Index: sys/ia64/ia64/trap.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/trap.c,v retrieving revision 1.11 diff -u -r1.11 trap.c --- sys/ia64/ia64/trap.c 2001/01/24 17:11:09 1.11 +++ sys/ia64/ia64/trap.c 2001/02/07 03:01:11 @@ -90,10 +90,10 @@ /* take pending signals */ while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_priority = p->p_usrpri; if (want_resched) { /* @@ -109,30 +109,30 @@ setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); splx(s); while ((sig = CURSIG(p)) != 0) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); postsig(sig); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } /* * If profiling, charge recent system time to the trapped pc. */ if (p->p_sflag & PS_PROFIL) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, frame->tf_cr_iip, (int)(p->p_sticks - oticks) * psratio); } curpriority = p->p_priority; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } static const char *ia64_vector_names[] = { @@ -249,9 +249,9 @@ user = ((framep->tf_cr_ipsr & IA64_PSR_CPL) == IA64_PSR_CPL_USER); if (user) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_tf = framep; } else { sticks = 0; /* XXX bogus -Wuninitialized warning */ @@ -265,12 +265,12 @@ * and per-process unaligned-access-handling flags). */ if (user) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if ((i = unaligned_fixup(framep, p)) == 0) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); ucode = framep->tf_cr_ifa; /* VA */ break; } @@ -330,7 +330,7 @@ vm_prot_t ftype = 0; int rv; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * If it was caused by fuswintr or suswintr, * just punt. Note that we check the faulting @@ -345,7 +345,7 @@ p->p_addr->u_pcb.pcb_accessaddr == va) { framep->tf_cr_iip = p->p_addr->u_pcb.pcb_onfault; p->p_addr->u_pcb.pcb_onfault = 0; - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } @@ -455,11 +455,11 @@ rv = KERN_INVALID_ADDRESS; } if (rv == KERN_SUCCESS) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); goto out; } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); ucode = va; i = SIGSEGV; #ifdef DEBUG @@ -480,7 +480,7 @@ if (user) { userret(p, framep, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } return; @@ -521,11 +521,11 @@ cnt.v_syscall++; p = curproc; p->p_md.md_tf = framep; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * Skip past the break instruction. Remember old address in case * we have to restart. @@ -618,7 +618,7 @@ * is not the case, this code will need to be revisited. */ STOPEVENT(p, S_SCX, code); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); #ifdef WITNESS if (witness_list(p)) { @@ -646,13 +646,13 @@ #ifdef KTRACE if (KTRPOINT(p, KTR_SYSRET)) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ktrsysret(p->p_tracep, SYS_fork, 0, 0); } #endif if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } /* @@ -667,9 +667,9 @@ u_quad_t sticks; p = curproc; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); sticks = p->p_sticks; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_md.md_tf = framep; if ((framep->tf_cr_ipsr & IA64_PSR_CPL) != IA64_PSR_CPL_USER) @@ -678,36 +678,36 @@ cnt.v_soft++; PCPU_SET(astpending, 0); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_OWEUPC) { p->p_sflag &= ~PS_OWEUPC; - mtx_exit(&sched_lock, MTX_SPIN); - mtx_enter(&Giant, MTX_DEF); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); + mtx_lock(&Giant); + mtx_lock_spin(&sched_lock); addupc_task(p, p->p_stats->p_prof.pr_addr, p->p_stats->p_prof.pr_ticks); } if (p->p_sflag & PS_ALRMPEND) { p->p_sflag &= ~PS_ALRMPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGVTALRM); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } if (p->p_sflag & PS_PROFPEND) { p->p_sflag &= ~PS_PROFPEND; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); psignal(p, SIGPROF); } else - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); userret(p, framep, sticks); if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } extern int ia64_unaligned_print, ia64_unaligned_fix; Index: sys/ia64/ia64/vm_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/ia64/ia64/vm_machdep.c,v retrieving revision 1.10 diff -u -r1.10 vm_machdep.c --- sys/ia64/ia64/vm_machdep.c 2000/12/23 19:43:08 1.10 +++ sys/ia64/ia64/vm_machdep.c 2001/02/07 03:01:11 @@ -303,8 +303,8 @@ ia64_fpstate_drop(p); (void) splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); - mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH); + mtx_lock_spin(&sched_lock); + mtx_unlock_flags(&Giant, MTX_NOSWITCH); mtx_assert(&Giant, MA_NOTOWNED); /* Index: sys/ia64/include/cpu.h =================================================================== RCS file: /home/ncvs/src/sys/ia64/include/cpu.h,v retrieving revision 1.9 diff -u -r1.9 cpu.h --- sys/ia64/include/cpu.h 2001/01/24 10:38:58 1.9 +++ sys/ia64/include/cpu.h 2001/02/07 03:01:11 @@ -83,10 +83,10 @@ * through trap, marking the proc as needing a profiling tick. */ #define need_proftick(p) do { \ - mtx_enter(&sched_lock, MTX_SPIN); \ + mtx_lock_spin(&sched_lock); \ (p)->p_sflag |= PS_OWEUPC; \ aston(); \ - mtx_exit(&sched_lock, MTX_SPIN); \ + mtx_unlock_spin(&sched_lock); \ } while (0) /* Index: sys/ia64/include/mutex.h =================================================================== RCS file: /home/ncvs/src/sys/ia64/include/mutex.h,v retrieving revision 1.7 diff -u -r1.7 mutex.h --- sys/ia64/include/mutex.h 2001/01/28 08:05:55 1.7 +++ sys/ia64/include/mutex.h 2001/02/07 03:01:11 @@ -42,22 +42,10 @@ * Debugging */ #ifdef MUTEX_DEBUG - -#ifdef _KERN_MUTEX_C_ -char STR_IEN[] = "psr.i"; -char STR_IDIS[] = "!psr.i"; -char STR_SIEN[] = "mpp->mtx_saveintr & IA64_PSR_I"; -#else /* _KERN_MUTEX_C_ */ -extern char STR_IEN[]; -extern char STR_IDIS[]; -extern char STR_SIEN[]; -#endif /* _KERN_MUTEX_C_ */ - -#endif /* MUTEX_DEBUG */ - -#define ASS_IEN MPASS2((save_intr() & IA64_PSR_I), STR_IEN) -#define ASS_IDIS MPASS2(!(save_intr() & IA64_PSR_I), STR_IDIS) -#define ASS_SIEN(mpp) MPASS2(((mpp)->mtx_saveintr & IA64_PSR_I), STR_SIEN) +#define ASS_IEN MPASS2((save_intr() & IA64_PSR_I), "psr.i") +#define ASS_IDIS MPASS2(!(save_intr() & IA64_PSR_I), "!psr.i") +#define ASS_SIEN(mpp) MPASS2(((mpp)->mtx_saveintr & IA64_PSR_I), \ + "mpp->mtx_saveintr & IA64_PSR_I") #define mtx_legal2block() (save_intr() & IA64_PSR_I) Index: sys/isa/sio.c =================================================================== RCS file: /home/ncvs/src/sys/isa/sio.c,v retrieving revision 1.322 diff -u -r1.322 sio.c --- sys/isa/sio.c 2001/01/31 10:54:44 1.322 +++ sys/isa/sio.c 2001/02/07 03:01:11 @@ -856,7 +856,7 @@ * but mask them in the processor as well in case there are some * (misconfigured) shared interrupts. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); /* EXTRA DELAY? */ /* @@ -953,7 +953,7 @@ CLR_FLAG(dev, COM_C_IIR_TXRDYBUG); } sio_setreg(com, com_cfcr, CFCR_8BITS); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); bus_release_resource(dev, SYS_RES_IOPORT, rid, port); return (iobase == siocniobase ? 0 : result); } @@ -993,7 +993,7 @@ irqmap[3] = isa_irq_pending(); failures[9] = (sio_getreg(com, com_iir) & IIR_IMASK) - IIR_NOPEND; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); irqs = irqmap[1] & ~irqmap[0]; if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 && @@ -1181,7 +1181,7 @@ } else com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED; if (siosetwater(com, com->it_in.c_ispeed) != 0) { - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); /* * Leave i/o resources allocated if this is a `cn'-level * console, so that other devices can't snarf them. @@ -1190,7 +1190,7 @@ bus_release_resource(dev, SYS_RES_IOPORT, rid, port); return (ENOMEM); } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); termioschars(&com->it_in); com->it_out = com->it_in; @@ -1485,7 +1485,7 @@ } } - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); (void) inb(com->line_status_port); (void) inb(com->data_port); com->prev_modem_status = com->last_modem_status @@ -1497,7 +1497,7 @@ outb(com->intr_ctl_port, IER_ERXRDY | IER_ETXRDY | IER_ERLS | IER_EMSC); } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); /* * Handle initial DCD. Callout devices get a fake initial * DCD (trapdoor DCD). If we are callout, then any sleeping @@ -1753,7 +1753,7 @@ * semantics instead of the save-and-disable semantics * that are used everywhere else. */ - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); incc = com->iptr - buf; if (tp->t_rawq.c_cc + incc > tp->t_ihiwat && (com->state & CS_RTS_IFLOW @@ -1774,7 +1774,7 @@ tp->t_lflag &= ~FLUSHO; comstart(tp); } - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); } while (buf < com->iptr); } else { do { @@ -1783,7 +1783,7 @@ * semantics instead of the save-and-disable semantics * that are used everywhere else. */ - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); line_status = buf[com->ierroff]; recv_data = *buf++; if (line_status @@ -1798,7 +1798,7 @@ recv_data |= TTY_PE; } (*linesw[tp->t_line].l_rint)(recv_data, tp); - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); } while (buf < com->iptr); } com_events -= (com->iptr - com->ibuf); @@ -1823,9 +1823,9 @@ #ifndef COM_MULTIPORT com = (struct com_s *)arg; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); siointr1(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); #else /* COM_MULTIPORT */ bool_t possibly_more_intrs; int unit; @@ -1837,7 +1837,7 @@ * devices, then the edge from one may be lost because another is * on. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); do { possibly_more_intrs = FALSE; for (unit = 0; unit < sio_numunits; ++unit) { @@ -1856,7 +1856,7 @@ /* XXX COM_UNLOCK(); */ } } while (possibly_more_intrs); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); #endif /* COM_MULTIPORT */ } @@ -2264,7 +2264,7 @@ * Discard any events related to never-opened or * going-away devices. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); incc = com->iptr - com->ibuf; com->iptr = com->ibuf; if (com->state & CS_CHECKMSR) { @@ -2272,33 +2272,33 @@ com->state &= ~CS_CHECKMSR; } com_events -= incc; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); continue; } if (com->iptr != com->ibuf) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); sioinput(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } if (com->state & CS_CHECKMSR) { u_char delta_modem_status; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); delta_modem_status = com->last_modem_status ^ com->prev_modem_status; com->prev_modem_status = com->last_modem_status; com_events -= LOTS_OF_EVENTS; com->state &= ~CS_CHECKMSR; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (delta_modem_status & MSR_DCD) (*linesw[tp->t_line].l_modem) (tp, com->prev_modem_status & MSR_DCD); } if (com->state & CS_ODONE) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); com_events -= LOTS_OF_EVENTS; com->state &= ~CS_ODONE; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (!(com->state & CS_BUSY) && !(com->extra_state & CSE_BUSYCHECK)) { timeout(siobusycheck, com, hz / 100); @@ -2484,7 +2484,7 @@ if (com->state >= (CS_BUSY | CS_TTGO)) siointr1(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); splx(s); comstart(tp); if (com->ibufold != NULL) { @@ -2518,7 +2518,7 @@ for (ibufsize = 128; ibufsize < cp4ticks;) ibufsize <<= 1; if (ibufsize == com->ibufsize) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); return (0); } @@ -2528,7 +2528,7 @@ */ ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT); if (ibuf == NULL) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); return (ENOMEM); } @@ -2546,7 +2546,7 @@ * Read current input buffer, if any. Continue with interrupts * disabled. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->iptr != com->ibuf) sioinput(com); @@ -2581,7 +2581,7 @@ if (com == NULL) return; s = spltty(); - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (tp->t_state & TS_TTSTOP) com->state &= ~CS_TTGO; else @@ -2594,7 +2594,7 @@ && com->state & CS_RTS_IFLOW) outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS); } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) { ttwwakeup(tp); splx(s); @@ -2610,7 +2610,7 @@ sizeof com->obuf1); com->obufs[0].l_next = NULL; com->obufs[0].l_queued = TRUE; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->state & CS_BUSY) { qp = com->obufq.l_next; while ((next = qp->l_next) != NULL) @@ -2622,7 +2622,7 @@ com->obufq.l_next = &com->obufs[0]; com->state |= CS_BUSY; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) { com->obufs[1].l_tail @@ -2630,7 +2630,7 @@ sizeof com->obuf2); com->obufs[1].l_next = NULL; com->obufs[1].l_queued = TRUE; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->state & CS_BUSY) { qp = com->obufq.l_next; while ((next = qp->l_next) != NULL) @@ -2642,14 +2642,14 @@ com->obufq.l_next = &com->obufs[1]; com->state |= CS_BUSY; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } tp->t_state |= TS_BUSY; } - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->state >= (CS_BUSY | CS_TTGO)) siointr1(com); /* fake interrupt to start output */ - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); ttwwakeup(tp); splx(s); } @@ -2664,7 +2664,7 @@ com = com_addr(DEV_TO_UNIT(tp->t_dev)); if (com == NULL || com->gone) return; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (rw & FWRITE) { if (com->hasfifo) #ifdef COM_ESP @@ -2691,7 +2691,7 @@ com_events -= (com->iptr - com->ibuf); com->iptr = com->ibuf; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); comstart(tp); } @@ -2734,7 +2734,7 @@ mcr |= MCR_RTS; if (com->gone) return(0); - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); switch (how) { case DMSET: outb(com->modem_ctl_port, @@ -2747,7 +2747,7 @@ outb(com->modem_ctl_port, com->mcr_image &= ~mcr); break; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); return (0); } @@ -2806,9 +2806,9 @@ com = com_addr(unit); if (com != NULL && !com->gone && (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); siointr1(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } } @@ -2830,10 +2830,10 @@ u_int delta; u_long total; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); delta = com->delta_error_counts[errnum]; com->delta_error_counts[errnum] = 0; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (delta == 0) continue; total = com->error_counts[errnum] += delta; Index: sys/isofs/cd9660/cd9660_node.c =================================================================== RCS file: /home/ncvs/src/sys/isofs/cd9660/cd9660_node.c,v retrieving revision 1.35 diff -u -r1.35 cd9660_node.c --- sys/isofs/cd9660/cd9660_node.c 2001/01/24 12:35:50 1.35 +++ sys/isofs/cd9660/cd9660_node.c 2001/02/07 03:01:11 @@ -102,18 +102,18 @@ struct vnode *vp; loop: - mtx_enter(&cd9660_ihash_mtx, MTX_DEF); + mtx_lock(&cd9660_ihash_mtx); for (ip = isohashtbl[INOHASH(dev, inum)]; ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) { vp = ITOV(ip); - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&cd9660_ihash_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&cd9660_ihash_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) goto loop; return (vp); } } - mtx_exit(&cd9660_ihash_mtx, MTX_DEF); + mtx_unlock(&cd9660_ihash_mtx); return (NULL); } @@ -127,14 +127,14 @@ struct proc *p = curproc; /* XXX */ struct iso_node **ipp, *iq; - mtx_enter(&cd9660_ihash_mtx, MTX_DEF); + mtx_lock(&cd9660_ihash_mtx); ipp = &isohashtbl[INOHASH(ip->i_dev, ip->i_number)]; if ((iq = *ipp) != NULL) iq->i_prev = &ip->i_next; ip->i_next = iq; ip->i_prev = ipp; *ipp = ip; - mtx_exit(&cd9660_ihash_mtx, MTX_DEF); + mtx_unlock(&cd9660_ihash_mtx); lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p); } @@ -148,7 +148,7 @@ { register struct iso_node *iq; - mtx_enter(&cd9660_ihash_mtx, MTX_DEF); + mtx_lock(&cd9660_ihash_mtx); if ((iq = ip->i_next) != NULL) iq->i_prev = ip->i_prev; *ip->i_prev = iq; @@ -156,7 +156,7 @@ ip->i_next = NULL; ip->i_prev = NULL; #endif - mtx_exit(&cd9660_ihash_mtx, MTX_DEF); + mtx_unlock(&cd9660_ihash_mtx); } /* Index: sys/kern/imgact_elf.c =================================================================== RCS file: /home/ncvs/src/sys/kern/imgact_elf.c,v retrieving revision 1.88 diff -u -r1.88 imgact_elf.c --- sys/kern/imgact_elf.c 2001/01/27 00:01:25 1.88 +++ sys/kern/imgact_elf.c 2001/02/07 03:01:11 @@ -485,9 +485,9 @@ * a context switch. Better safe than sorry; I really don't want * the file to change while it's being loaded. */ - mtx_enter(&imgp->vp->v_interlock, MTX_DEF); + mtx_lock(&imgp->vp->v_interlock); imgp->vp->v_flag |= VTEXT; - mtx_exit(&imgp->vp->v_interlock, MTX_DEF); + mtx_unlock(&imgp->vp->v_interlock); if ((error = exec_extract_strings(imgp)) != 0) goto fail; Index: sys/kern/init_main.c =================================================================== RCS file: /home/ncvs/src/sys/kern/init_main.c,v retrieving revision 1.153 diff -u -r1.153 init_main.c --- sys/kern/init_main.c 2001/01/24 10:40:56 1.153 +++ sys/kern/init_main.c 2001/02/07 03:01:11 @@ -455,7 +455,7 @@ char *ucp, **uap, *arg0, *arg1; struct proc *p; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); p = curproc; @@ -555,7 +555,7 @@ * to user mode as init! */ if ((error = execve(p, &args)) == 0) { - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); return; } if (error != ENOENT) @@ -584,9 +584,9 @@ PROC_LOCK(initproc); initproc->p_flag |= P_SYSTEM; PROC_UNLOCK(initproc); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); initproc->p_sflag |= PS_INMEM; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); cpu_set_fork_handler(initproc, start_init, NULL); } SYSINIT(init, SI_SUB_CREATE_INIT, SI_ORDER_FIRST, create_init, NULL) @@ -598,9 +598,9 @@ kick_init(const void *udata __unused) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); initproc->p_stat = SRUN; setrunqueue(initproc); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } SYSINIT(kickinit, SI_SUB_KTHREAD_INIT, SI_ORDER_FIRST, kick_init, NULL) Index: sys/kern/kern_acct.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_acct.c,v retrieving revision 1.29 diff -u -r1.29 kern_acct.c --- sys/kern/kern_acct.c 2001/01/27 00:01:25 1.29 +++ sys/kern/kern_acct.c 2001/02/07 03:01:11 @@ -194,9 +194,9 @@ bcopy(p->p_comm, acct.ac_comm, sizeof acct.ac_comm); /* (2) The amount of user and system time that was used */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); calcru(p, &ut, &st, NULL); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); acct.ac_utime = encode_comp_t(ut.tv_sec, ut.tv_usec); acct.ac_stime = encode_comp_t(st.tv_sec, st.tv_usec); Index: sys/kern/kern_clock.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_clock.c,v retrieving revision 1.119 diff -u -r1.119 kern_clock.c --- sys/kern/kern_clock.c 2001/01/24 10:43:25 1.119 +++ sys/kern/kern_clock.c 2001/02/07 03:01:11 @@ -170,17 +170,17 @@ if (CLKF_USERMODE(frame) && timevalisset(&pstats->p_timer[ITIMER_VIRTUAL].it_value) && itimerdecr(&pstats->p_timer[ITIMER_VIRTUAL], tick) == 0) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_sflag |= PS_ALRMPEND; aston(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } if (timevalisset(&pstats->p_timer[ITIMER_PROF].it_value) && itimerdecr(&pstats->p_timer[ITIMER_PROF], tick) == 0) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_sflag |= PS_PROFPEND; aston(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } @@ -200,13 +200,13 @@ * Process callouts at a very low cpu priority, so we don't keep the * relatively high clock interrupt priority any longer than necessary. */ - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); ticks++; if (TAILQ_FIRST(&callwheel[ticks & callwheelmask]) != NULL) { need_softclock = 1; } else if (softticks + 1 == ticks) ++softticks; - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); /* * sched_swi acquires sched_lock, so we don't want to call it with @@ -292,7 +292,7 @@ * it should be protected later on by a time_lock, which would * cover psdiv, etc. as well. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_PROFIL) == 0) { p->p_sflag |= PS_PROFIL; if (++profprocs == 1 && stathz != 0) { @@ -302,7 +302,7 @@ splx(s); } } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -314,7 +314,7 @@ { int s; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_PROFIL) { p->p_sflag &= ~PS_PROFIL; if (--profprocs == 0 && stathz != 0) { @@ -324,7 +324,7 @@ splx(s); } } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -347,7 +347,7 @@ struct rusage *ru; struct vmspace *vm; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (CLKF_USERMODE(frame)) { /* @@ -362,7 +362,7 @@ forward_statclock(pscnt); #endif if (--pscnt > 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } /* @@ -392,7 +392,7 @@ forward_statclock(pscnt); #endif if (--pscnt > 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } /* @@ -435,7 +435,7 @@ ru->ru_maxrss = rss; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* Index: sys/kern/kern_condvar.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_condvar.c,v retrieving revision 1.2 diff -u -r1.2 kern_condvar.c --- sys/kern/kern_condvar.c 2001/01/24 10:44:01 1.2 +++ sys/kern/kern_condvar.c 2001/02/07 03:01:11 @@ -138,9 +138,9 @@ * stopped, p->p_wchan will be 0 upon return from CURSIG. */ p->p_sflag |= PS_SINTR; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); sig = CURSIG(p); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (sig != 0) { if (p->p_wchan != NULL) cv_waitq_remove(p); @@ -199,7 +199,7 @@ WITNESS_SLEEP(0, mp); WITNESS_SAVE(mp, mp); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (cold || panicstr) { /* * After a panic, or during autoconfiguration, just give @@ -207,25 +207,25 @@ * procs or panic below, in case this is the idle process and * already asleep. */ - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } CV_WAIT_VALIDATE(cvp, mp); DROP_GIANT_NOSWITCH(); - mtx_exit(mp, MTX_DEF | MTX_NOSWITCH); + mtx_unlock_flags(mp, MTX_NOSWITCH); cv_waitq_add(cvp, p); cv_switch(p); curpriority = p->p_usrpri; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); #ifdef KTRACE if (KTRPOINT(p, KTR_CSW)) ktrcsw(p->p_tracep, 0, 0); #endif PICKUP_GIANT(); - mtx_enter(mp, MTX_DEF); + mtx_lock(mp); WITNESS_RESTORE(mp, mp); } @@ -253,7 +253,7 @@ WITNESS_SLEEP(0, mp); WITNESS_SAVE(mp, mp); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (cold || panicstr) { /* * After a panic, or during autoconfiguration, just give @@ -261,19 +261,19 @@ * procs or panic below, in case this is the idle process and * already asleep. */ - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return 0; } CV_WAIT_VALIDATE(cvp, mp); DROP_GIANT_NOSWITCH(); - mtx_exit(mp, MTX_DEF | MTX_NOSWITCH); + mtx_unlock_flags(mp, MTX_NOSWITCH); cv_waitq_add(cvp, p); sig = cv_switch_catch(p); curpriority = p->p_usrpri; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); /* proc_lock(p); */ @@ -291,7 +291,7 @@ if (KTRPOINT(p, KTR_CSW)) ktrcsw(p->p_tracep, 0, 0); #endif - mtx_enter(mp, MTX_DEF); + mtx_lock(mp); WITNESS_RESTORE(mp, mp); return (rval); @@ -319,7 +319,7 @@ WITNESS_SLEEP(0, mp); WITNESS_SAVE(mp, mp); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (cold || panicstr) { /* * After a panic, or during autoconfiguration, just give @@ -327,13 +327,13 @@ * procs or panic below, in case this is the idle process and * already asleep. */ - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return 0; } CV_WAIT_VALIDATE(cvp, mp); DROP_GIANT_NOSWITCH(); - mtx_exit(mp, MTX_DEF | MTX_NOSWITCH); + mtx_unlock_flags(mp, MTX_NOSWITCH); cv_waitq_add(cvp, p); callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p); @@ -346,13 +346,13 @@ } else callout_stop(&p->p_slpcallout); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); #ifdef KTRACE if (KTRPOINT(p, KTR_CSW)) ktrcsw(p->p_tracep, 0, 0); #endif PICKUP_GIANT(); - mtx_enter(mp, MTX_DEF); + mtx_lock(mp); WITNESS_RESTORE(mp, mp); return (rval); @@ -382,7 +382,7 @@ WITNESS_SLEEP(0, mp); WITNESS_SAVE(mp, mp); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (cold || panicstr) { /* * After a panic, or during autoconfiguration, just give @@ -390,13 +390,13 @@ * procs or panic below, in case this is the idle process and * already asleep. */ - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return 0; } CV_WAIT_VALIDATE(cvp, mp); DROP_GIANT_NOSWITCH(); - mtx_exit(mp, MTX_DEF | MTX_NOSWITCH); + mtx_unlock_flags(mp, MTX_NOSWITCH); cv_waitq_add(cvp, p); callout_reset(&p->p_slpcallout, timo, cv_timedwait_end, p); @@ -409,7 +409,7 @@ } else callout_stop(&p->p_slpcallout); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); /* proc_lock(p); */ @@ -427,7 +427,7 @@ if (KTRPOINT(p, KTR_CSW)) ktrcsw(p->p_tracep, 0, 0); #endif - mtx_enter(mp, MTX_DEF); + mtx_lock(mp); WITNESS_RESTORE(mp, mp); return (rval); @@ -480,12 +480,12 @@ { KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__)); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (!TAILQ_EMPTY(&cvp->cv_waitq)) { CV_SIGNAL_VALIDATE(cvp); cv_wakeup(cvp); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -497,11 +497,11 @@ { KASSERT(cvp != NULL, ("%s: cvp NULL", __FUNCTION__)); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); CV_SIGNAL_VALIDATE(cvp); while (!TAILQ_EMPTY(&cvp->cv_waitq)) cv_wakeup(cvp); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -513,13 +513,13 @@ { struct cv *cvp; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((cvp = p->p_wchan) != NULL && p->p_sflag & PS_CVWAITQ) { TAILQ_REMOVE(&cvp->cv_waitq, p, p_slpq); p->p_sflag &= ~PS_CVWAITQ; p->p_wchan = NULL; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -534,7 +534,7 @@ p = arg; CTR3(KTR_PROC, "cv_timedwait_end: proc %p (pid %d, %s)", p, p->p_pid, p->p_comm); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan != NULL) { if (p->p_stat == SSLEEP) setrunnable(p); @@ -542,5 +542,5 @@ cv_waitq_remove(p); p->p_sflag |= PS_TIMEOUT; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } Index: sys/kern/kern_exit.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_exit.c,v retrieving revision 1.114 diff -u -r1.114 kern_exit.c --- sys/kern/kern_exit.c 2001/01/24 00:33:44 1.114 +++ sys/kern/kern_exit.c 2001/02/07 03:01:12 @@ -314,9 +314,9 @@ */ p->p_xstat = rv; *p->p_ru = p->p_stats->p_ru; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); calcru(p, &p->p_ru->ru_utime, &p->p_ru->ru_stime, NULL); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); ruadd(p->p_ru, &p->p_stats->p_cru); /* @@ -457,9 +457,9 @@ } nfound++; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat == SZOMB) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); PROCTREE_LOCK(PT_RELEASE); @@ -579,7 +579,7 @@ } if (p->p_stat == SSTOP && (p->p_flag & P_WAITED) == 0 && (p->p_flag & P_TRACED || uap->options & WUNTRACED)) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); p->p_flag |= P_WAITED; PROC_UNLOCK(p); PROCTREE_LOCK(PT_RELEASE); @@ -598,7 +598,7 @@ error = 0; return (error); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); } PROCTREE_LOCK(PT_RELEASE); Index: sys/kern/kern_fork.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_fork.c,v retrieving revision 1.96 diff -u -r1.96 kern_fork.c --- sys/kern/kern_fork.c 2001/01/26 23:51:40 1.96 +++ sys/kern/kern_fork.c 2001/02/07 03:01:12 @@ -380,11 +380,11 @@ * The p_stats and p_sigacts substructs are set in vm_fork. */ p2->p_flag = 0; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p2->p_sflag = PS_INMEM; if (p1->p_sflag & PS_PROFIL) startprofclock(p2); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); MALLOC(p2->p_cred, struct pcred *, sizeof(struct pcred), M_SUBPROC, M_WAITOK); bcopy(p1->p_cred, p2->p_cred, sizeof(*p2->p_cred)); @@ -554,10 +554,10 @@ p2->p_acflag = AFORK; if ((flags & RFSTOPPED) == 0) { splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p2->p_stat = SRUN; setrunqueue(p2); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); spl0(); } @@ -649,7 +649,7 @@ { struct proc *p; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* * XXX: We really shouldn't have to do this. */ @@ -674,7 +674,7 @@ */ p = CURPROC; if (p->p_flag & P_KTHREAD) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("Kernel thread \"%s\" (pid %d) exited prematurely.\n", p->p_comm, p->p_pid); kthread_exit(0); @@ -698,11 +698,11 @@ #ifdef KTRACE if (KTRPOINT(p, KTR_SYSRET)) { if (!mtx_owned(&Giant)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ktrsysret(p->p_tracep, SYS_fork, 0, 0); } #endif if (mtx_owned(&Giant)) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); mtx_assert(&Giant, MA_NOTOWNED); } Index: sys/kern/kern_idle.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_idle.c,v retrieving revision 1.11 diff -u -r1.11 kern_idle.c --- sys/kern/kern_idle.c 2000/10/20 07:58:03 1.11 +++ sys/kern/kern_idle.c 2001/02/07 03:01:12 @@ -105,8 +105,8 @@ #endif } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } Index: sys/kern/kern_intr.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_intr.c,v retrieving revision 1.37 diff -u -r1.37 kern_intr.c --- sys/kern/kern_intr.c 2001/01/19 09:57:29 1.37 +++ sys/kern/kern_intr.c 2001/02/07 03:01:12 @@ -168,7 +168,7 @@ ih->ih_need = 1; if (!(flag & SWI_DELAY)) { it->it_need = 1; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat == SWAIT) { /* not on run queue */ CTR1(KTR_INTR, "sched_swi: setrunqueue %d", p->p_pid); /* membar_lock(); */ @@ -180,7 +180,7 @@ CTR3(KTR_INTR, "sched_swi %d: it_need %d, state %d", p->p_pid, it->it_need, p->p_stat ); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); need_resched(); } } @@ -223,10 +223,10 @@ ih->ih_flags); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); ih->ih_handler(ih->ih_argument); if ((ih->ih_flags & INTR_MPSAFE) == 0) - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } } @@ -236,14 +236,14 @@ * set again, so we have to check it again. */ mtx_assert(&Giant, MA_NOTOWNED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (!it->it_need) { p->p_stat = SWAIT; /* we're idle */ CTR1(KTR_INTR, "sithd_loop pid %d: done", p->p_pid); mi_switch(); CTR1(KTR_INTR, "sithd_loop pid %d: resumed", p->p_pid); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } Index: sys/kern/kern_kthread.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_kthread.c,v retrieving revision 1.12 diff -u -r1.12 kern_kthread.c --- sys/kern/kern_kthread.c 2001/01/24 10:47:50 1.12 +++ sys/kern/kern_kthread.c 2001/02/07 03:01:12 @@ -103,13 +103,13 @@ cpu_set_fork_handler(p2, func, arg); /* Delay putting it on the run queue until now. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p2->p_sflag |= PS_INMEM; if (!(flags & RFSTOPPED)) { p2->p_stat = SRUN; setrunqueue(p2); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return 0; } Index: sys/kern/kern_lock.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_lock.c,v retrieving revision 1.40 diff -u -r1.40 kern_lock.c --- sys/kern/kern_lock.c 2001/01/24 12:35:50 1.40 +++ sys/kern/kern_lock.c 2001/02/07 03:01:12 @@ -144,11 +144,11 @@ return 0; #ifdef SMP for (lock_wait = LOCK_WAIT_TIME; lock_wait > 0; lock_wait--) { - mtx_exit(lkp->lk_interlock, MTX_DEF); + mtx_unlock(lkp->lk_interlock); for (i = LOCK_SAMPLE_WAIT; i > 0; i--) if ((lkp->lk_flags & flags) == 0) break; - mtx_enter(lkp->lk_interlock, MTX_DEF); + mtx_lock(lkp->lk_interlock); if ((lkp->lk_flags & flags) == 0) return 0; } @@ -236,9 +236,9 @@ else pid = p->p_pid; - mtx_enter(lkp->lk_interlock, MTX_DEF); + mtx_lock(lkp->lk_interlock); if (flags & LK_INTERLOCK) - mtx_exit(interlkp, MTX_DEF); + mtx_unlock(interlkp); extflags = (flags | lkp->lk_flags) & LK_EXTFLG_MASK; @@ -451,7 +451,7 @@ break; default: - mtx_exit(lkp->lk_interlock, MTX_DEF); + mtx_unlock(lkp->lk_interlock); panic("lockmgr: unknown locktype request %d", flags & LK_TYPE_MASK); /* NOTREACHED */ @@ -462,7 +462,7 @@ lkp->lk_flags &= ~LK_WAITDRAIN; wakeup((void *)&lkp->lk_flags); } - mtx_exit(lkp->lk_interlock, MTX_DEF); + mtx_unlock(lkp->lk_interlock); return (error); } @@ -506,12 +506,12 @@ "timo == %d, flags = 0x%x\n", lkp, prio, wmesg, timo, flags); if (lock_mtx_array != NULL) { - mtx_enter(&lock_mtx, MTX_DEF); + mtx_lock(&lock_mtx); lkp->lk_interlock = &lock_mtx_array[lock_mtx_selector]; lock_mtx_selector++; if (lock_mtx_selector == lock_nmtx) lock_mtx_selector = 0; - mtx_exit(&lock_mtx, MTX_DEF); + mtx_unlock(&lock_mtx); } else { /* * Giving lockmgr locks that are initialized during boot a @@ -561,7 +561,7 @@ { int lock_type = 0; - mtx_enter(lkp->lk_interlock, MTX_DEF); + mtx_lock(lkp->lk_interlock); if (lkp->lk_exclusivecount != 0) { if (p == NULL || lkp->lk_lockholder == p->p_pid) lock_type = LK_EXCLUSIVE; @@ -569,7 +569,7 @@ lock_type = LK_EXCLOTHER; } else if (lkp->lk_sharecount != 0) lock_type = LK_SHARED; - mtx_exit(lkp->lk_interlock, MTX_DEF); + mtx_unlock(lkp->lk_interlock); return (lock_type); } @@ -582,9 +582,9 @@ { int count; - mtx_enter(lkp->lk_interlock, MTX_DEF); + mtx_lock(lkp->lk_interlock); count = lkp->lk_exclusivecount + lkp->lk_sharecount; - mtx_exit(lkp->lk_interlock, MTX_DEF); + mtx_unlock(lkp->lk_interlock); return (count); } Index: sys/kern/kern_malloc.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_malloc.c,v retrieving revision 1.82 diff -u -r1.82 kern_malloc.c --- sys/kern/kern_malloc.c 2001/01/31 04:50:18 1.82 +++ sys/kern/kern_malloc.c 2001/02/07 03:01:12 @@ -154,7 +154,7 @@ indx = BUCKETINDX(size); kbp = &bucket[indx]; s = splmem(); - mtx_enter(&malloc_mtx, MTX_DEF); + mtx_lock(&malloc_mtx); while (ksp->ks_memuse >= ksp->ks_limit) { if (flags & M_ASLEEP) { if (ksp->ks_limblocks < 65535) @@ -163,7 +163,7 @@ } if (flags & M_NOWAIT) { splx(s); - mtx_exit(&malloc_mtx, MTX_DEF); + mtx_unlock(&malloc_mtx); return ((void *) NULL); } if (ksp->ks_limblocks < 65535) @@ -183,7 +183,7 @@ allocsize = 1 << indx; npg = btoc(allocsize); - mtx_exit(&malloc_mtx, MTX_DEF); + mtx_unlock(&malloc_mtx); va = (caddr_t) kmem_malloc(kmem_map, (vm_size_t)ctob(npg), flags); if (va == NULL) { @@ -194,7 +194,7 @@ * Enter malloc_mtx after the error check to avoid having to * immediately exit it again if there is an error. */ - mtx_enter(&malloc_mtx, MTX_DEF); + mtx_lock(&malloc_mtx); kbp->kb_total += kbp->kb_elmpercl; kup = btokup(va); @@ -278,7 +278,7 @@ if (ksp->ks_memuse > ksp->ks_maxused) ksp->ks_maxused = ksp->ks_memuse; splx(s); - mtx_exit(&malloc_mtx, MTX_DEF); + mtx_unlock(&malloc_mtx); /* XXX: Do idle pre-zeroing. */ if (va != NULL && (flags & M_ZERO)) bzero(va, size); @@ -314,7 +314,7 @@ size = 1 << kup->ku_indx; kbp = &bucket[kup->ku_indx]; s = splmem(); - mtx_enter(&malloc_mtx, MTX_DEF); + mtx_lock(&malloc_mtx); #ifdef INVARIANTS /* * Check for returns of data that do not point to the @@ -329,9 +329,9 @@ (void *)addr, size, type->ks_shortdesc, alloc); #endif /* INVARIANTS */ if (size > MAXALLOCSAVE) { - mtx_exit(&malloc_mtx, MTX_DEF); + mtx_unlock(&malloc_mtx); kmem_free(kmem_map, (vm_offset_t)addr, ctob(kup->ku_pagecnt)); - mtx_enter(&malloc_mtx, MTX_DEF); + mtx_lock(&malloc_mtx); size = kup->ku_pagecnt << PAGE_SHIFT; ksp->ks_memuse -= size; @@ -343,7 +343,7 @@ ksp->ks_inuse--; kbp->kb_total -= 1; splx(s); - mtx_exit(&malloc_mtx, MTX_DEF); + mtx_unlock(&malloc_mtx); return; } freep = (struct freelist *)addr; @@ -410,7 +410,7 @@ } #endif splx(s); - mtx_exit(&malloc_mtx, MTX_DEF); + mtx_unlock(&malloc_mtx); } /* @@ -540,7 +540,7 @@ #ifdef INVARIANTS s = splmem(); - mtx_enter(&malloc_mtx, MTX_DEF); + mtx_lock(&malloc_mtx); for (indx = 0; indx < MINBUCKET + 16; indx++) { kbp = bucket + indx; freep = (struct freelist*)kbp->kb_next; @@ -551,7 +551,7 @@ } } splx(s); - mtx_exit(&malloc_mtx, MTX_DEF); + mtx_unlock(&malloc_mtx); if (type->ks_memuse != 0) printf("malloc_uninit: %ld bytes of '%s' still allocated\n", Index: sys/kern/kern_mutex.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_mutex.c,v retrieving revision 1.40 diff -u -r1.40 kern_mutex.c --- sys/kern/kern_mutex.c 2001/01/27 07:51:34 1.40 +++ sys/kern/kern_mutex.c 2001/02/07 03:01:12 @@ -31,6 +31,11 @@ */ /* + * Machine independent bits of mutex implementation and implementation of + * `witness' structure & related debugging routines. + */ + +/* * Main Entry: witness * Pronunciation: 'wit-n&s * Function: noun @@ -53,12 +58,6 @@ #include "opt_ddb.h" #include "opt_witness.h" -/* - * Cause non-inlined mtx_*() to be compiled. - * Must be defined early because other system headers may include mutex.h. - */ -#define _KERN_MUTEX_C_ - #include #include #include @@ -82,9 +81,8 @@ #include /* - * Machine independent bits of the mutex implementation + * The WITNESS-enabled mutex debug structure. */ - #ifdef WITNESS struct mtx_debug { struct witness *mtxd_witness; @@ -99,139 +97,55 @@ #define mtx_witness mtx_debug->mtxd_witness #endif /* WITNESS */ -/* - * Assembly macros - *------------------------------------------------------------------------------ - */ - -#define _V(x) __STRING(x) - /* - * Default, unoptimized mutex micro-operations + * Internal utility macros. */ - -#ifndef _obtain_lock -/* Actually obtain mtx_lock */ -#define _obtain_lock(mp, tid) \ - atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) -#endif - -#ifndef _release_lock -/* Actually release mtx_lock */ -#define _release_lock(mp, tid) \ - atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) -#endif +#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) -#ifndef _release_lock_quick -/* Actually release mtx_lock quickly assuming that we own it */ -#define _release_lock_quick(mp) \ - atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) -#endif +#define mtx_owner(m) (mtx_unowned((m)) ? NULL \ + : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK)) -#ifndef _getlock_sleep -/* Get a sleep lock, deal with recursion inline. */ -#define _getlock_sleep(mp, tid, type) do { \ - if (!_obtain_lock(mp, tid)) { \ - if (((mp)->mtx_lock & MTX_FLAGMASK) != ((uintptr_t)(tid)))\ - mtx_enter_hard(mp, (type) & MTX_HARDOPTS, 0); \ - else { \ - atomic_set_ptr(&(mp)->mtx_lock, MTX_RECURSED); \ - (mp)->mtx_recurse++; \ - } \ - } \ -} while (0) -#endif - -#ifndef _getlock_spin_block -/* Get a spin lock, handle recursion inline (as the less common case) */ -#define _getlock_spin_block(mp, tid, type) do { \ - u_int _mtx_intr = save_intr(); \ - disable_intr(); \ - if (!_obtain_lock(mp, tid)) \ - mtx_enter_hard(mp, (type) & MTX_HARDOPTS, _mtx_intr); \ - else \ - (mp)->mtx_saveintr = _mtx_intr; \ -} while (0) -#endif - -#ifndef _getlock_norecurse -/* - * Get a lock without any recursion handling. Calls the hard enter function if - * we can't get it inline. - */ -#define _getlock_norecurse(mp, tid, type) do { \ - if (!_obtain_lock(mp, tid)) \ - mtx_enter_hard((mp), (type) & MTX_HARDOPTS, 0); \ -} while (0) -#endif +#define RETIP(x) *(((uintptr_t *)(&x)) - 1) +#define SET_PRIO(p, pri) (p)->p_priority = (pri) -#ifndef _exitlock_norecurse /* - * Release a sleep lock assuming we haven't recursed on it, recursion is handled - * in the hard function. + * Early WITNESS-enabled declarations. */ -#define _exitlock_norecurse(mp, tid, type) do { \ - if (!_release_lock(mp, tid)) \ - mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \ -} while (0) -#endif +#ifdef WITNESS -#ifndef _exitlock /* - * Release a sleep lock when its likely we recursed (the code to - * deal with simple recursion is inline). - */ -#define _exitlock(mp, tid, type) do { \ - if (!_release_lock(mp, tid)) { \ - if ((mp)->mtx_lock & MTX_RECURSED) { \ - if (--((mp)->mtx_recurse) == 0) \ - atomic_clear_ptr(&(mp)->mtx_lock, \ - MTX_RECURSED); \ - } else { \ - mtx_exit_hard((mp), (type) & MTX_HARDOPTS); \ - } \ - } \ -} while (0) -#endif + * Internal WITNESS routines which must be prototyped early. + * + * XXX: When/if witness code is cleaned up, it would be wise to place all + * witness prototyping early in this file. + */ +static void witness_init(struct mtx *, int flag); +static void witness_destroy(struct mtx *); +static void witness_display(void(*)(const char *fmt, ...)); -#ifndef _exitlock_spin -/* Release a spin lock (with possible recursion). */ -#define _exitlock_spin(mp) do { \ - if (!mtx_recursed((mp))) { \ - int _mtx_intr = (mp)->mtx_saveintr; \ - \ - _release_lock_quick(mp); \ - restore_intr(_mtx_intr); \ - } else { \ - (mp)->mtx_recurse--; \ - } \ -} while (0) -#endif +MALLOC_DEFINE(M_WITNESS, "witness", "witness mtx_debug structure"); -#ifdef WITNESS -static void witness_init(struct mtx *, int flag); -static void witness_destroy(struct mtx *); -static void witness_display(void(*)(const char *fmt, ...)); - /* All mutexes in system (used for debug/panic) */ static struct mtx_debug all_mtx_debug = { NULL, {NULL, NULL}, NULL, 0 }; + /* - * Set to 0 once mutexes have been fully initialized so that witness code can be - * safely executed. + * This global is set to 0 once it becomes safe to use the witness code. */ static int witness_cold = 1; + #else /* WITNESS */ -/* - * flag++ is slezoid way of shutting up unused parameter warning - * in mtx_init() +/* XXX XXX XXX + * flag++ is sleazoid way of shuting up warning */ #define witness_init(m, flag) flag++ #define witness_destroy(m) #define witness_try_enter(m, t, f, l) #endif /* WITNESS */ -/* All mutexes in system (used for debug/panic) */ +/* + * All mutex locks in system are kept on the all_mtx list. + */ static struct mtx all_mtx = { MTX_UNOWNED, 0, 0, 0, "All mutexes queue head", TAILQ_HEAD_INITIALIZER(all_mtx.mtx_blocked), { NULL, NULL }, &all_mtx, &all_mtx, @@ -242,19 +156,18 @@ #endif }; +/* + * Global variables for book keeping. + */ static int mtx_cur_cnt; static int mtx_max_cnt; +/* + * Prototypes for non-exported routines. + * + * NOTE: Prototypes for witness routines are placed at the bottom of the file. + */ static void propagate_priority(struct proc *); -static void mtx_enter_hard(struct mtx *, int type, int saveintr); -static void mtx_exit_hard(struct mtx *, int type); - -#define mtx_unowned(m) ((m)->mtx_lock == MTX_UNOWNED) -#define mtx_owner(m) (mtx_unowned(m) ? NULL \ - : (struct proc *)((m)->mtx_lock & MTX_FLAGMASK)) - -#define RETIP(x) *(((uintptr_t *)(&x)) - 1) -#define SET_PRIO(p, pri) (p)->p_priority = (pri) static void propagate_priority(struct proc *p) @@ -277,6 +190,7 @@ MPASS(m->mtx_lock == MTX_CONTESTED); return; } + MPASS(p->p_magic == P_MAGIC); KASSERT(p->p_stat != SSLEEP, ("sleeping process owns a mutex")); if (p->p_priority <= pri) @@ -314,7 +228,7 @@ * quit. */ if (p->p_stat == SRUN) { - printf("XXX: moving process %d(%s) to a new run queue\n", + printf("XXX: moving proc %d(%s) to a new run queue\n", p->p_pid, p->p_comm); MPASS(p->p_blocked == NULL); remrunqueue(p); @@ -338,6 +252,7 @@ printf("XXX: process %d(%s) is blocked on %s\n", p->p_pid, p->p_comm, m->mtx_description); + /* * Check if the proc needs to be moved up on * the blocked chain @@ -346,10 +261,11 @@ printf("XXX: process at head of run queue\n"); continue; } + p1 = TAILQ_PREV(p, rq, p_procq); if (p1->p_priority <= pri) { printf( - "XXX: previous process %d(%s) has higher priority\n", + "XXX: previous process %d(%s) has higher priority\n", p->p_pid, p->p_comm); continue; } @@ -367,6 +283,7 @@ if (p1->p_priority > pri) break; } + MPASS(p1 != NULL); TAILQ_INSERT_BEFORE(p1, p, p_procq); CTR4(KTR_LOCK, @@ -376,421 +293,332 @@ } /* - * Get lock 'm', the macro handles the easy (and most common cases) and leaves - * the slow stuff to the mtx_enter_hard() function. - * - * Note: since type is usually a constant much of this code is optimized out. + * The important part of mtx_trylock{,_flags}() + * Tries to acquire lock `m.' We do NOT handle recursion here; we assume that + * if we're called, it's because we know we don't already own this lock. */ -void -_mtx_enter(struct mtx *mtxp, int type, const char *file, int line) +int +_mtx_trylock(struct mtx *m, int opts, const char *file, int line) { - struct mtx *mpp = mtxp; + int rval; - /* bits only valid on mtx_exit() */ - MPASS4(((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0, - STR_mtx_bad_type, file, line); + KASSERT(CURPROC != NULL, ("curproc is NULL in _mtx_trylock")); - if ((type) & MTX_SPIN) { - /* - * Easy cases of spin locks: - * - * 1) We already own the lock and will simply recurse on it (if - * RLIKELY) - * - * 2) The lock is free, we just get it - */ - if ((type) & MTX_RLIKELY) { - /* - * Check for recursion, if we already have this - * lock we just bump the recursion count. - */ - if (mpp->mtx_lock == (uintptr_t)CURTHD) { - mpp->mtx_recurse++; - goto done; - } - } - - if (((type) & MTX_TOPHALF) == 0) { - /* - * If an interrupt thread uses this we must block - * interrupts here. - */ - if ((type) & MTX_FIRST) { - ASS_IEN; - disable_intr(); - _getlock_norecurse(mpp, CURTHD, - (type) & MTX_HARDOPTS); - } else { - _getlock_spin_block(mpp, CURTHD, - (type) & MTX_HARDOPTS); - } - } else - _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS); - } else { - /* Sleep locks */ - if ((type) & MTX_RLIKELY) - _getlock_sleep(mpp, CURTHD, (type) & MTX_HARDOPTS); - else - _getlock_norecurse(mpp, CURTHD, (type) & MTX_HARDOPTS); - } -done: - WITNESS_ENTER(mpp, type, file, line); - if (((type) & MTX_QUIET) == 0) - CTR5(KTR_LOCK, STR_mtx_enter_fmt, - mpp->mtx_description, mpp, mpp->mtx_recurse, file, line); - -} + /* + * _mtx_trylock does not accept MTX_NOSWITCH option. + */ + MPASS((opts & MTX_NOSWITCH) == 0); -/* - * Attempt to get MTX_DEF lock, return non-zero if lock acquired. - * - * XXX DOES NOT HANDLE RECURSION - */ -int -_mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line) -{ - struct mtx *const mpp = mtxp; - int rval; + rval = _obtain_lock(m, CURTHD); - rval = _obtain_lock(mpp, CURTHD); #ifdef WITNESS - if (rval && mpp->mtx_witness != NULL) { - MPASS(mpp->mtx_recurse == 0); - witness_try_enter(mpp, type, file, line); + if (rval && m->mtx_witness != NULL) { + /* + * We do not handle recursion in _mtx_trylock; see the + * note at the top of the routine. + */ + MPASS(!mtx_recursed(m)); + witness_try_enter(m, (opts | m->mtx_flags), file, line); } #endif /* WITNESS */ - if (((type) & MTX_QUIET) == 0) - CTR5(KTR_LOCK, STR_mtx_try_enter_fmt, - mpp->mtx_description, mpp, rval, file, line); + + if ((opts & MTX_QUIET) == 0) + CTR5(KTR_LOCK, "TRY_ENTER %s [%p] result=%d at %s:%d", + m->mtx_description, m, rval, file, line); return rval; } /* - * Release lock m. + * _mtx_lock_sleep: the tougher part of acquiring an MTX_DEF lock. + * + * We call this if the lock is either contested (i.e. we need to go to + * sleep waiting for it), or if we need to recurse on it. */ void -_mtx_exit(struct mtx *mtxp, int type, const char *file, int line) +_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) { - struct mtx *const mpp = mtxp; - - MPASS4(mtx_owned(mpp), STR_mtx_owned, file, line); - WITNESS_EXIT(mpp, type, file, line); - if (((type) & MTX_QUIET) == 0) - CTR5(KTR_LOCK, STR_mtx_exit_fmt, - mpp->mtx_description, mpp, mpp->mtx_recurse, file, line); - if ((type) & MTX_SPIN) { - if ((type) & MTX_NORECURSE) { - int mtx_intr = mpp->mtx_saveintr; + struct proc *p = CURPROC; - MPASS4(mpp->mtx_recurse == 0, STR_mtx_recurse, - file, line); - _release_lock_quick(mpp); - if (((type) & MTX_TOPHALF) == 0) { - if ((type) & MTX_FIRST) { - ASS_IDIS; - enable_intr(); - } else - restore_intr(mtx_intr); - } - } else { - if (((type & MTX_TOPHALF) == 0) && - (type & MTX_FIRST)) { - ASS_IDIS; - ASS_SIEN(mpp); - } - _exitlock_spin(mpp); - } - } else { - /* Handle sleep locks */ - if ((type) & MTX_RLIKELY) - _exitlock(mpp, CURTHD, (type) & MTX_HARDOPTS); - else { - _exitlock_norecurse(mpp, CURTHD, - (type) & MTX_HARDOPTS); - } + if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) { + m->mtx_recurse++; + atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "_mtx_lock_sleep: %p recurse", m); + return; } -} -void -mtx_enter_hard(struct mtx *m, int type, int saveintr) -{ - struct proc *p = CURPROC; + if ((opts & MTX_QUIET) == 0) + CTR3(KTR_LOCK, "mtx_lock: %p contested (lock=%p) [%p]", m, + (void *)m->mtx_lock, (void *)RETIP(m)); + + /* + * Save our priority. Even though p_nativepri is protected by + * sched_lock, we don't obtain it here as it can be expensive. + * Since this is the only place p_nativepri is set, and since two + * CPUs will not be executing the same process concurrently, we know + * that no other CPU is going to be messing with this. Also, + * p_nativepri is only read when we are blocked on a mutex, so that + * can't be happening right now either. + */ + p->p_nativepri = p->p_priority; - KASSERT(p != NULL, ("curproc is NULL in mutex")); + while (!_obtain_lock(m, p)) { + uintptr_t v; + struct proc *p1; - switch (type) { - case MTX_DEF: - if ((m->mtx_lock & MTX_FLAGMASK) == (uintptr_t)p) { - m->mtx_recurse++; - atomic_set_ptr(&m->mtx_lock, MTX_RECURSED); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_enter: %p recurse", m); - return; + mtx_lock_spin(&sched_lock); + /* + * Check if the lock has been released while spinning for + * the sched_lock. + */ + if ((v = m->mtx_lock) == MTX_UNOWNED) { + mtx_unlock_spin(&sched_lock); + continue; } - if ((type & MTX_QUIET) == 0) - CTR3(KTR_LOCK, - "mtx_enter: %p contested (lock=%p) [%p]", - m, (void *)m->mtx_lock, (void *)RETIP(m)); /* - * Save our priority. Even though p_nativepri is protected - * by sched_lock, we don't obtain it here as it can be - * expensive. Since this is the only place p_nativepri is - * set, and since two CPUs will not be executing the same - * process concurrently, we know that no other CPU is going - * to be messing with this. Also, p_nativepri is only read - * when we are blocked on a mutex, so that can't be happening - * right now either. + * The mutex was marked contested on release. This means that + * there are processes blocked on it. */ - p->p_nativepri = p->p_priority; - while (!_obtain_lock(m, p)) { - uintptr_t v; - struct proc *p1; + if (v == MTX_CONTESTED) { + p1 = TAILQ_FIRST(&m->mtx_blocked); + KASSERT(p1 != NULL, + ("contested mutex has no contesters")); + m->mtx_lock = (uintptr_t)p | MTX_CONTESTED; + + if (p1->p_priority < p->p_priority) + SET_PRIO(p, p1->p_priority); + mtx_unlock_spin(&sched_lock); + return; + } - mtx_enter(&sched_lock, MTX_SPIN | MTX_RLIKELY); - /* - * check if the lock has been released while - * waiting for the schedlock. - */ - if ((v = m->mtx_lock) == MTX_UNOWNED) { - mtx_exit(&sched_lock, MTX_SPIN); - continue; - } - /* - * The mutex was marked contested on release. This - * means that there are processes blocked on it. - */ - if (v == MTX_CONTESTED) { - p1 = TAILQ_FIRST(&m->mtx_blocked); - KASSERT(p1 != NULL, ("contested mutex has no contesters")); - KASSERT(p != NULL, ("curproc is NULL for contested mutex")); - m->mtx_lock = (uintptr_t)p | MTX_CONTESTED; - if (p1->p_priority < p->p_priority) { - SET_PRIO(p, p1->p_priority); - } - mtx_exit(&sched_lock, MTX_SPIN); - return; - } - /* - * If the mutex isn't already contested and - * a failure occurs setting the contested bit the - * mutex was either release or the - * state of the RECURSION bit changed. - */ - if ((v & MTX_CONTESTED) == 0 && - !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, - (void *)(v | MTX_CONTESTED))) { - mtx_exit(&sched_lock, MTX_SPIN); - continue; - } + /* + * If the mutex isn't already contested and a failure occurs + * setting the contested bit, the mutex was either released + * or the state of the MTX_RECURSED bit changed. + */ + if ((v & MTX_CONTESTED) == 0 && + !atomic_cmpset_ptr(&m->mtx_lock, (void *)v, + (void *)(v | MTX_CONTESTED))) { + mtx_unlock_spin(&sched_lock); + continue; + } - /* We definitely have to sleep for this lock */ - mtx_assert(m, MA_NOTOWNED); + /* + * We deffinately must sleep for this lock. + */ + mtx_assert(m, MA_NOTOWNED); #ifdef notyet - /* - * If we're borrowing an interrupted thread's VM - * context must clean up before going to sleep. - */ - if (p->p_flag & (P_ITHD | P_SITHD)) { - ithd_t *it = (ithd_t *)p; + /* + * If we're borrowing an interrupted thread's VM context, we + * must clean up before going to sleep. + */ + if (p->p_flag & (P_ITHD | P_SITHD)) { + ithd_t *it = (ithd_t *)p; - if (it->it_interrupted) { - if ((type & MTX_QUIET) == 0) - CTR2(KTR_LOCK, - "mtx_enter: 0x%x interrupted 0x%x", - it, it->it_interrupted); - intr_thd_fixup(it); - } + if (it->it_interrupted) { + if ((opts & MTX_QUIET) == 0) + CTR2(KTR_LOCK, + "mtx_lock: 0x%x interrupted 0x%x", + it, it->it_interrupted); + intr_thd_fixup(it); } + } #endif - /* Put us on the list of procs blocked on this mutex */ - if (TAILQ_EMPTY(&m->mtx_blocked)) { - p1 = (struct proc *)(m->mtx_lock & - MTX_FLAGMASK); - LIST_INSERT_HEAD(&p1->p_contested, m, - mtx_contested); + /* + * Put us on the list of threads blocked on this mutex. + */ + if (TAILQ_EMPTY(&m->mtx_blocked)) { + p1 = (struct proc *)(m->mtx_lock & MTX_FLAGMASK); + LIST_INSERT_HEAD(&p1->p_contested, m, mtx_contested); + TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq); + } else { + TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) + if (p1->p_priority > p->p_priority) + break; + if (p1) + TAILQ_INSERT_BEFORE(p1, p, p_procq); + else TAILQ_INSERT_TAIL(&m->mtx_blocked, p, p_procq); - } else { - TAILQ_FOREACH(p1, &m->mtx_blocked, p_procq) - if (p1->p_priority > p->p_priority) - break; - if (p1) - TAILQ_INSERT_BEFORE(p1, p, p_procq); - else - TAILQ_INSERT_TAIL(&m->mtx_blocked, p, - p_procq); - } + } - p->p_blocked = m; /* Who we're blocked on */ - p->p_mtxname = m->mtx_description; - p->p_stat = SMTX; + /* + * Save who we're blocked on. + */ + p->p_blocked = m; + p->p_mtxname = m->mtx_description; + p->p_stat = SMTX; #if 0 - propagate_priority(p); + propagate_priority(p); #endif - if ((type & MTX_QUIET) == 0) - CTR3(KTR_LOCK, - "mtx_enter: p %p blocked on [%p] %s", - p, m, m->mtx_description); - mi_switch(); - if ((type & MTX_QUIET) == 0) - CTR3(KTR_LOCK, - "mtx_enter: p %p free from blocked on [%p] %s", - p, m, m->mtx_description); - mtx_exit(&sched_lock, MTX_SPIN); - } - return; - case MTX_SPIN: - case MTX_SPIN | MTX_FIRST: - case MTX_SPIN | MTX_TOPHALF: - { - int i = 0; - if (m->mtx_lock == (uintptr_t)p) { - m->mtx_recurse++; - return; - } - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_enter: %p spinning", m); - for (;;) { - if (_obtain_lock(m, p)) - break; - while (m->mtx_lock != MTX_UNOWNED) { - if (i++ < 1000000) - continue; - if (i++ < 6000000) - DELAY (1); + if ((opts & MTX_QUIET) == 0) + CTR3(KTR_LOCK, + "_mtx_lock_sleep: p %p blocked on [%p] %s", p, m, + m->mtx_description); + + mi_switch(); + + if ((opts & MTX_QUIET) == 0) + CTR3(KTR_LOCK, + "_mtx_lock_sleep: p %p free from blocked on [%p] %s", + p, m, m->mtx_description); + + mtx_unlock_spin(&sched_lock); + } + + return; +} + +/* + * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. + * + * This is only called if we need to actually spin for the lock. Recursion + * is handled inline. + */ +void +_mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, const char *file, + int line) +{ + int i = 0; + + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "mtx_lock_spin: %p spinning", m); + + for (;;) { + if (_obtain_lock(m, CURPROC)) + break; + + while (m->mtx_lock != MTX_UNOWNED) { + if (i++ < 1000000) + continue; + if (i++ < 6000000) + DELAY(1); #ifdef DDB - else if (!db_active) + else if (!db_active) #else - else + else #endif - panic( - "spin lock %s held by %p for > 5 seconds", - m->mtx_description, - (void *)m->mtx_lock); - } + panic("spin lock %s held by %p for > 5 seconds", + m->mtx_description, (void *)m->mtx_lock); } - -#ifdef MUTEX_DEBUG - if (type != MTX_SPIN) - m->mtx_saveintr = 0xbeefface; - else -#endif - m->mtx_saveintr = saveintr; - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_enter: %p spin done", m); - return; - } } + + m->mtx_saveintr = mtx_intr; + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); + + return; } +/* + * _mtx_unlock_sleep: the tougher part of releasing an MTX_DEF lock. + * + * We are only called here if the lock is recursed or contested (i.e. we + * need to wake up a blocked thread). + */ void -mtx_exit_hard(struct mtx *m, int type) +_mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line) { struct proc *p, *p1; struct mtx *m1; int pri; p = CURPROC; - switch (type) { - case MTX_DEF: - case MTX_DEF | MTX_NOSWITCH: - if (mtx_recursed(m)) { - if (--(m->mtx_recurse) == 0) - atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_exit: %p unrecurse", m); - return; - } - mtx_enter(&sched_lock, MTX_SPIN); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_exit: %p contested", m); - p1 = TAILQ_FIRST(&m->mtx_blocked); - MPASS(p->p_magic == P_MAGIC); - MPASS(p1->p_magic == P_MAGIC); - TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq); - if (TAILQ_EMPTY(&m->mtx_blocked)) { - LIST_REMOVE(m, mtx_contested); - _release_lock_quick(m); - if ((type & MTX_QUIET) == 0) - CTR1(KTR_LOCK, "mtx_exit: %p not held", m); - } else - atomic_store_rel_ptr(&m->mtx_lock, - (void *)MTX_CONTESTED); - pri = MAXPRI; - LIST_FOREACH(m1, &p->p_contested, mtx_contested) { - int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority; - if (cp < pri) - pri = cp; - } - if (pri > p->p_nativepri) - pri = p->p_nativepri; - SET_PRIO(p, pri); - if ((type & MTX_QUIET) == 0) - CTR2(KTR_LOCK, - "mtx_exit: %p contested setrunqueue %p", m, p1); - p1->p_blocked = NULL; - p1->p_mtxname = NULL; - p1->p_stat = SRUN; - setrunqueue(p1); - if ((type & MTX_NOSWITCH) == 0 && p1->p_priority < pri) { + MPASS4(mtx_owned(m), "mtx_owned(mpp)", file, line); + + if ((opts & MTX_QUIET) == 0) + CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", m->mtx_description, + m, m->mtx_recurse, file, line); + + if (mtx_recursed(m)) { + if (--(m->mtx_recurse) == 0) + atomic_clear_ptr(&m->mtx_lock, MTX_RECURSED); + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p unrecurse", m); + return; + } + + mtx_lock_spin(&sched_lock); + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); + + p1 = TAILQ_FIRST(&m->mtx_blocked); + MPASS(p->p_magic == P_MAGIC); + MPASS(p1->p_magic == P_MAGIC); + + TAILQ_REMOVE(&m->mtx_blocked, p1, p_procq); + + if (TAILQ_EMPTY(&m->mtx_blocked)) { + LIST_REMOVE(m, mtx_contested); + _release_lock_quick(m); + if ((opts & MTX_QUIET) == 0) + CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p not held", m); + } else + atomic_store_rel_ptr(&m->mtx_lock, (void *)MTX_CONTESTED); + + pri = MAXPRI; + LIST_FOREACH(m1, &p->p_contested, mtx_contested) { + int cp = TAILQ_FIRST(&m1->mtx_blocked)->p_priority; + if (cp < pri) + pri = cp; + } + + if (pri > p->p_nativepri) + pri = p->p_nativepri; + SET_PRIO(p, pri); + + if ((opts & MTX_QUIET) == 0) + CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p contested setrunqueue %p", + m, p1); + + p1->p_blocked = NULL; + p1->p_mtxname = NULL; + p1->p_stat = SRUN; + setrunqueue(p1); + + if ((opts & MTX_NOSWITCH) == 0 && p1->p_priority < pri) { #ifdef notyet - if (p->p_flag & (P_ITHD | P_SITHD)) { - ithd_t *it = (ithd_t *)p; + if (p->p_flag & (P_ITHD | P_SITHD)) { + ithd_t *it = (ithd_t *)p; - if (it->it_interrupted) { - if ((type & MTX_QUIET) == 0) - CTR2(KTR_LOCK, - "mtx_exit: 0x%x interruped 0x%x", - it, it->it_interrupted); - intr_thd_fixup(it); - } + if (it->it_interrupted) { + if ((opts & MTX_QUIET) == 0) + CTR2(KTR_LOCK, + "_mtx_unlock_sleep: 0x%x interrupted 0x%x", + it, it->it_interrupted); + intr_thd_fixup(it); } -#endif - setrunqueue(p); - if ((type & MTX_QUIET) == 0) - CTR2(KTR_LOCK, - "mtx_exit: %p switching out lock=%p", - m, (void *)m->mtx_lock); - mi_switch(); - if ((type & MTX_QUIET) == 0) - CTR2(KTR_LOCK, - "mtx_exit: %p resuming lock=%p", - m, (void *)m->mtx_lock); - } - mtx_exit(&sched_lock, MTX_SPIN); - break; - case MTX_SPIN: - case MTX_SPIN | MTX_FIRST: - if (mtx_recursed(m)) { - m->mtx_recurse--; - return; } - MPASS(mtx_owned(m)); - _release_lock_quick(m); - if (type & MTX_FIRST) - enable_intr(); /* XXX is this kosher? */ - else { - MPASS(m->mtx_saveintr != 0xbeefface); - restore_intr(m->mtx_saveintr); - } - break; - case MTX_SPIN | MTX_TOPHALF: - if (mtx_recursed(m)) { - m->mtx_recurse--; - return; - } - MPASS(mtx_owned(m)); - _release_lock_quick(m); - break; - default: - panic("mtx_exit_hard: unsupported type 0x%x\n", type); +#endif + setrunqueue(p); + if ((opts & MTX_QUIET) == 0) + CTR2(KTR_LOCK, + "_mtx_unlock_sleep: %p switching out lock=%p", m, + (void *)m->mtx_lock); + + mi_switch(); + if ((opts & MTX_QUIET) == 0) + CTR2(KTR_LOCK, "_mtx_unlock_sleep: %p resuming lock=%p", + m, (void *)m->mtx_lock); } + + mtx_unlock_spin(&sched_lock); + + return; } +/* + * All the unlocking of MTX_SPIN locks is done inline. + * See the _rel_spin_lock() macro for the details. + */ + +/* + * The INVARIANTS-enabled mtx_assert() + */ #ifdef INVARIANTS void _mtx_assert(struct mtx *m, int what, const char *file, int line) @@ -822,6 +650,9 @@ } #endif +/* + * The MUTEX_DEBUG-enabled mtx_validate() + */ #define MV_DESTROY 0 /* validate before destory */ #define MV_INIT 1 /* validate before init */ @@ -843,7 +674,7 @@ if (m == &all_mtx || cold) return 0; - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); /* * XXX - When kernacc() is fixed on the alpha to handle K0_SEG memory properly * we can re-enable the kernacc() checks. @@ -887,50 +718,63 @@ retval = 1; } } - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); return (retval); } #endif +/* + * Mutex initialization routine; initialize lock `m' of type contained in + * `opts' with options contained in `opts' and description `description.' + * Place on "all_mtx" queue. + */ void -mtx_init(struct mtx *m, const char *t, int flag) +mtx_init(struct mtx *m, const char *description, int opts) { - if ((flag & MTX_QUIET) == 0) - CTR2(KTR_LOCK, "mtx_init %p (%s)", m, t); + + if ((opts & MTX_QUIET) == 0) + CTR2(KTR_LOCK, "mtx_init %p (%s)", m, description); + #ifdef MUTEX_DEBUG - if (mtx_validate(m, MV_INIT)) /* diagnostic and error correction */ + /* Diagnostic and error correction */ + if (mtx_validate(m, MV_INIT)) return; #endif bzero((void *)m, sizeof *m); TAILQ_INIT(&m->mtx_blocked); + #ifdef WITNESS if (!witness_cold) { - /* XXX - should not use DEVBUF */ m->mtx_debug = malloc(sizeof(struct mtx_debug), - M_DEVBUF, M_NOWAIT | M_ZERO); + M_WITNESS, M_NOWAIT | M_ZERO); MPASS(m->mtx_debug != NULL); } #endif - m->mtx_description = t; - m->mtx_flags = flag; + m->mtx_description = description; + m->mtx_flags = opts; m->mtx_lock = MTX_UNOWNED; + /* Put on all mutex queue */ - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); m->mtx_next = &all_mtx; m->mtx_prev = all_mtx.mtx_prev; m->mtx_prev->mtx_next = m; all_mtx.mtx_prev = m; if (++mtx_cur_cnt > mtx_max_cnt) mtx_max_cnt = mtx_cur_cnt; - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); + #ifdef WITNESS if (!witness_cold) - witness_init(m, flag); + witness_init(m, opts); #endif } +/* + * Remove lock `m' from all_mtx queue. + */ void mtx_destroy(struct mtx *m) { @@ -939,7 +783,9 @@ KASSERT(!witness_cold, ("%s: Cannot destroy while still cold\n", __FUNCTION__)); #endif + CTR2(KTR_LOCK, "mtx_destroy %p (%s)", m, m->mtx_description); + #ifdef MUTEX_DEBUG if (m->mtx_next == NULL) panic("mtx_destroy: %p (%s) already destroyed", @@ -950,7 +796,9 @@ } else { MPASS((m->mtx_lock & (MTX_RECURSED|MTX_CONTESTED)) == 0); } - mtx_validate(m, MV_DESTROY); /* diagnostic */ + + /* diagnostic */ + mtx_validate(m, MV_DESTROY); #endif #ifdef WITNESS @@ -959,25 +807,27 @@ #endif /* WITNESS */ /* Remove from the all mutex queue */ - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); m->mtx_next->mtx_prev = m->mtx_prev; m->mtx_prev->mtx_next = m->mtx_next; + #ifdef MUTEX_DEBUG m->mtx_next = m->mtx_prev = NULL; #endif + #ifdef WITNESS - free(m->mtx_debug, M_DEVBUF); + free(m->mtx_debug, M_WITNESS); m->mtx_debug = NULL; #endif + mtx_cur_cnt--; - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); } + /* - * The non-inlined versions of the mtx_*() functions are always built (above), - * but the witness code depends on the WITNESS kernel option being specified. + * The WITNESS-enabled diagnostic code. */ - #ifdef WITNESS static void witness_fixup(void *dummy __unused) @@ -988,26 +838,26 @@ * We have to release Giant before initializing its witness * structure so that WITNESS doesn't get confused. */ - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); mtx_assert(&Giant, MA_NOTOWNED); - mtx_enter(&all_mtx, MTX_DEF); + mtx_lock(&all_mtx); + /* Iterate through all mutexes and finish up mutex initialization. */ for (mp = all_mtx.mtx_next; mp != &all_mtx; mp = mp->mtx_next) { - /* XXX - should not use DEVBUF */ mp->mtx_debug = malloc(sizeof(struct mtx_debug), - M_DEVBUF, M_NOWAIT | M_ZERO); + M_WITNESS, M_NOWAIT | M_ZERO); MPASS(mp->mtx_debug != NULL); witness_init(mp, mp->mtx_flags); } - mtx_exit(&all_mtx, MTX_DEF); + mtx_unlock(&all_mtx); /* Mark the witness code as being ready for use. */ atomic_store_rel_int(&witness_cold, 0); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); } SYSINIT(wtnsfxup, SI_SUB_MUTEX, SI_ORDER_FIRST, witness_fixup, NULL) @@ -1061,6 +911,9 @@ SYSCTL_INT(_debug, OID_AUTO, witness_skipspin, CTLFLAG_RD, &witness_skipspin, 0, ""); +/* + * Witness-enabled globals + */ static struct mtx w_mtx; static struct witness *w_free; static struct witness *w_all; @@ -1068,21 +921,23 @@ static int witness_dead; /* fatal error, probably no memory */ static struct witness w_data[WITNESS_COUNT]; - -static struct witness *enroll __P((const char *description, int flag)); -static int itismychild __P((struct witness *parent, struct witness *child)); -static void removechild __P((struct witness *parent, struct witness *child)); -static int isitmychild __P((struct witness *parent, struct witness *child)); -static int isitmydescendant __P((struct witness *parent, struct witness *child)); -static int dup_ok __P((struct witness *)); -static int blessed __P((struct witness *, struct witness *)); -static void witness_displaydescendants - __P((void(*)(const char *fmt, ...), struct witness *)); -static void witness_leveldescendents __P((struct witness *parent, int level)); -static void witness_levelall __P((void)); -static struct witness * witness_get __P((void)); -static void witness_free __P((struct witness *m)); +/* + * Internal witness routine prototypes + */ +static struct witness *enroll(const char *description, int flag); +static int itismychild(struct witness *parent, struct witness *child); +static void removechild(struct witness *parent, struct witness *child); +static int isitmychild(struct witness *parent, struct witness *child); +static int isitmydescendant(struct witness *parent, struct witness *child); +static int dup_ok(struct witness *); +static int blessed(struct witness *, struct witness *); +static void + witness_displaydescendants(void(*)(const char *fmt, ...), struct witness *); +static void witness_leveldescendents(struct witness *parent, int level); +static void witness_levelall(void); +static struct witness * witness_get(void); +static void witness_free(struct witness *m); static char *ignore_list[] = { "witness lock", @@ -1129,7 +984,8 @@ */ static struct witness_blessed blessed_list[] = { }; -static int blessed_count = sizeof(blessed_list) / sizeof(struct witness_blessed); +static int blessed_count = + sizeof(blessed_list) / sizeof(struct witness_blessed); static void witness_init(struct mtx *m, int flag) @@ -1211,17 +1067,17 @@ file, line); return; } - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_spin_flags(&w_mtx, MTX_QUIET); i = PCPU_GET(witness_spin_check); if (i != 0 && w->w_level < i) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); panic("mutex_enter(%s:%x, MTX_SPIN) out of order @" " %s:%d already holding %s:%x", m->mtx_description, w->w_level, file, line, spin_order_list[ffs(i)-1], i); } PCPU_SET(witness_spin_check, i | w->w_level); - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); w->w_file = file; w->w_line = line; m->mtx_line = line; @@ -1245,7 +1101,7 @@ goto out; if (!mtx_legal2block()) - panic("blockable mtx_enter() of %s when not legal @ %s:%d", + panic("blockable mtx_lock() of %s when not legal @ %s:%d", m->mtx_description, file, line); /* * Is this the first mutex acquired @@ -1267,16 +1123,16 @@ goto out; } MPASS(!mtx_owned(&w_mtx)); - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_spin_flags(&w_mtx, MTX_QUIET); /* * If we have a known higher number just say ok */ if (witness_watch > 1 && w->w_level > w1->w_level) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); goto out; } if (isitmydescendant(m1->mtx_witness, w)) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); goto out; } for (i = 0; m1 != NULL; m1 = LIST_NEXT(m1, mtx_held), i++) { @@ -1284,7 +1140,7 @@ MPASS(i < 200); w1 = m1->mtx_witness; if (isitmydescendant(w, w1)) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); if (blessed(w, w1)) goto out; if (m1 == &Giant) { @@ -1313,7 +1169,7 @@ } m1 = LIST_FIRST(&p->p_heldmtx); if (!itismychild(m1->mtx_witness, w)) - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); out: #ifdef DDB @@ -1356,10 +1212,10 @@ m->mtx_description, file, line); return; } - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_spin_flags(&w_mtx, MTX_QUIET); PCPU_SET(witness_spin_check, PCPU_GET(witness_spin_check) | w->w_level); - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); w->w_file = file; w->w_line = line; m->mtx_line = line; @@ -1407,10 +1263,10 @@ file, line); return; } - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_spin_flags(&w_mtx, MTX_QUIET); PCPU_SET(witness_spin_check, PCPU_GET(witness_spin_check) & ~w->w_level); - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); return; } if ((m->mtx_flags & MTX_SPIN) != 0) @@ -1426,7 +1282,7 @@ } if ((flags & MTX_NOSWITCH) == 0 && !mtx_legal2block() && !cold) - panic("switchable mtx_exit() of %s when not legal @ %s:%d", + panic("switchable mtx_unlock() of %s when not legal @ %s:%d", m->mtx_description, file, line); LIST_REMOVE(m, mtx_held); m->mtx_held.le_prev = NULL; @@ -1497,10 +1353,10 @@ } if ((flag & MTX_SPIN) && witness_skipspin) return (NULL); - mtx_enter(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_lock_spin_flags(&w_mtx, MTX_QUIET); for (w = w_all; w; w = w->w_next) { if (strcmp(description, w->w_description) == 0) { - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); return (w); } } @@ -1509,7 +1365,7 @@ w->w_next = w_all; w_all = w; w->w_description = description; - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); if (flag & MTX_SPIN) { w->w_spin = 1; @@ -1731,7 +1587,7 @@ if ((w = w_free) == NULL) { witness_dead = 1; - mtx_exit(&w_mtx, MTX_SPIN | MTX_QUIET); + mtx_unlock_spin_flags(&w_mtx, MTX_QUIET); printf("witness exhausted\n"); return (NULL); } Index: sys/kern/kern_proc.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_proc.c,v retrieving revision 1.83 diff -u -r1.83 kern_proc.c --- sys/kern/kern_proc.c 2001/01/24 12:49:50 1.83 +++ sys/kern/kern_proc.c 2001/02/07 03:01:12 @@ -403,7 +403,7 @@ kp->ki_sigignore = p->p_procsig->ps_sigignore; kp->ki_sigcatch = p->p_procsig->ps_sigcatch; } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat != SIDL && p->p_stat != SZOMB && p->p_vmspace != NULL) { struct vmspace *vm = p->p_vmspace; @@ -449,7 +449,7 @@ kp->ki_rqindex = p->p_rqindex; kp->ki_oncpu = p->p_oncpu; kp->ki_lastcpu = p->p_lastcpu; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); sp = NULL; if (p->p_pgrp) { kp->ki_pgid = p->p_pgrp->pg_id; Index: sys/kern/kern_prot.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_prot.c,v retrieving revision 1.72 diff -u -r1.72 kern_prot.c --- sys/kern/kern_prot.c 2001/02/06 12:05:26 1.72 +++ sys/kern/kern_prot.c 2001/02/07 03:01:12 @@ -1155,9 +1155,9 @@ struct ucred *cr; { - mtx_enter(&cr->cr_mtx, MTX_DEF); + mtx_lock(&cr->cr_mtx); cr->cr_ref++; - mtx_exit(&(cr)->cr_mtx, MTX_DEF); + mtx_unlock(&(cr)->cr_mtx); } @@ -1170,7 +1170,7 @@ struct ucred *cr; { - mtx_enter(&cr->cr_mtx, MTX_DEF); + mtx_lock(&cr->cr_mtx); if (--cr->cr_ref == 0) { mtx_destroy(&cr->cr_mtx); /* @@ -1182,7 +1182,7 @@ uifree(cr->cr_uidinfo); FREE((caddr_t)cr, M_CRED); } else { - mtx_exit(&cr->cr_mtx, MTX_DEF); + mtx_unlock(&cr->cr_mtx); } } @@ -1195,12 +1195,12 @@ { struct ucred *newcr; - mtx_enter(&cr->cr_mtx, MTX_DEF); + mtx_lock(&cr->cr_mtx); if (cr->cr_ref == 1) { - mtx_exit(&cr->cr_mtx, MTX_DEF); + mtx_unlock(&cr->cr_mtx); return (cr); } - mtx_exit(&cr->cr_mtx, MTX_DEF); + mtx_unlock(&cr->cr_mtx); newcr = crdup(cr); crfree(cr); return (newcr); Index: sys/kern/kern_resource.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_resource.c,v retrieving revision 1.71 diff -u -r1.71 kern_resource.c --- sys/kern/kern_resource.c 2001/01/24 11:06:39 1.71 +++ sys/kern/kern_resource.c 2001/02/07 03:01:12 @@ -610,9 +610,9 @@ case RUSAGE_SELF: rup = &p->p_stats->p_ru; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); calcru(p, &rup->ru_utime, &rup->ru_stime, NULL); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); break; case RUSAGE_CHILDREN: @@ -724,12 +724,12 @@ { struct uidinfo *uip; - mtx_enter(&uihashtbl_mtx, MTX_DEF); + mtx_lock(&uihashtbl_mtx); uip = uilookup(uid); if (uip == NULL) uip = uicreate(uid); uihold(uip); - mtx_exit(&uihashtbl_mtx, MTX_DEF); + mtx_unlock(&uihashtbl_mtx); return (uip); } @@ -741,9 +741,9 @@ struct uidinfo *uip; { - mtx_enter(&uip->ui_mtx, MTX_DEF); + mtx_lock(&uip->ui_mtx); uip->ui_ref++; - mtx_exit(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uip->ui_mtx); } /*- @@ -767,18 +767,18 @@ { /* Prepare for optimal case. */ - mtx_enter(&uip->ui_mtx, MTX_DEF); + mtx_lock(&uip->ui_mtx); if (--uip->ui_ref != 0) { - mtx_exit(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uip->ui_mtx); return; } /* Prepare for suboptimal case. */ uip->ui_ref++; - mtx_exit(&uip->ui_mtx, MTX_DEF); - mtx_enter(&uihashtbl_mtx, MTX_DEF); - mtx_enter(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uip->ui_mtx); + mtx_lock(&uihashtbl_mtx); + mtx_lock(&uip->ui_mtx); /* * We must subtract one from the count again because we backed out @@ -788,7 +788,7 @@ */ if (--uip->ui_ref == 0) { LIST_REMOVE(uip, ui_hash); - mtx_exit(&uihashtbl_mtx, MTX_DEF); + mtx_unlock(&uihashtbl_mtx); if (uip->ui_sbsize != 0) /* XXX no %qd in kernel. Truncate. */ printf("freeing uidinfo: uid = %d, sbsize = %ld\n", @@ -801,8 +801,8 @@ return; } - mtx_exit(&uihashtbl_mtx, MTX_DEF); - mtx_exit(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uihashtbl_mtx); + mtx_unlock(&uip->ui_mtx); } /* @@ -816,16 +816,16 @@ int max; { - mtx_enter(&uip->ui_mtx, MTX_DEF); + mtx_lock(&uip->ui_mtx); /* don't allow them to exceed max, but allow subtraction */ if (diff > 0 && uip->ui_proccnt + diff > max && max != 0) { - mtx_exit(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uip->ui_mtx); return (0); } uip->ui_proccnt += diff; if (uip->ui_proccnt < 0) printf("negative proccnt for uid = %d\n", uip->ui_uid); - mtx_exit(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uip->ui_mtx); return (1); } @@ -843,12 +843,12 @@ int s; s = splnet(); - mtx_enter(&uip->ui_mtx, MTX_DEF); + mtx_lock(&uip->ui_mtx); new = uip->ui_sbsize + to - *hiwat; /* don't allow them to exceed max, but allow subtraction */ if (to > *hiwat && new > max) { splx(s); - mtx_exit(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uip->ui_mtx); return (0); } uip->ui_sbsize = new; @@ -856,6 +856,6 @@ if (uip->ui_sbsize < 0) printf("negative sbsize for uid = %d\n", uip->ui_uid); splx(s); - mtx_exit(&uip->ui_mtx, MTX_DEF); + mtx_unlock(&uip->ui_mtx); return (1); } Index: sys/kern/kern_shutdown.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_shutdown.c,v retrieving revision 1.93 diff -u -r1.93 kern_shutdown.c --- sys/kern/kern_shutdown.c 2001/02/06 11:20:36 1.93 +++ sys/kern/kern_shutdown.c 2001/02/07 03:01:12 @@ -256,10 +256,10 @@ if (curproc != NULL) { DROP_GIANT_NOSWITCH(); for (subiter = 0; subiter < 50 * iter; subiter++) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); setrunqueue(curproc); mi_switch(); /* Allow interrupt threads to run */ - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); DELAY(1000); } PICKUP_GIANT(); @@ -540,7 +540,7 @@ #ifdef SMP /* Only 1 CPU can panic at a time */ - mtx_enter(&panic_mtx, MTX_DEF); + mtx_lock(&panic_mtx); #endif bootopt = RB_AUTOBOOT | RB_DUMP; Index: sys/kern/kern_sig.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_sig.c,v retrieving revision 1.105 diff -u -r1.105 kern_sig.c --- sys/kern/kern_sig.c 2001/01/24 11:08:02 1.105 +++ sys/kern/kern_sig.c 2001/02/07 03:01:12 @@ -186,9 +186,9 @@ SIGSETNAND(tmpset, p->p_sigmask); if (SIGISEMPTY(tmpset) && (p->p_flag & P_TRACED) == 0) return (0); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); r = issignal(p); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); return (r); } @@ -1087,11 +1087,11 @@ action = SIG_DFL; } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_nice > NZERO && action == SIG_DFL && (prop & SA_KILL) && (p->p_flag & P_TRACED) == 0) p->p_nice = NZERO; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (prop & SA_CONT) SIG_STOPSIGMASK(p->p_siglist); @@ -1116,9 +1116,9 @@ * Defer further processing for signals which are held, * except that stopped processes must be continued by SIGCONT. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (action == SIG_HOLD && (!(prop & SA_CONT) || p->p_stat != SSTOP)) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); return; } @@ -1132,7 +1132,7 @@ * trap() or syscall(). */ if ((p->p_sflag & PS_SINTR) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); goto out; } /* @@ -1142,7 +1142,7 @@ */ if (p->p_flag & P_TRACED) goto run; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* * If SIGCONT is default (or ignored) and process is * asleep, we are finished; the process should not @@ -1182,7 +1182,7 @@ /* NOTREACHED */ case SSTOP: - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* * If traced process is already stopped, * then no further action is necessary. @@ -1211,11 +1211,11 @@ SIGDELSET(p->p_siglist, sig); if (action == SIG_CATCH) goto runfast; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan == NULL) goto run; p->p_stat = SSLEEP; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); goto out; } @@ -1234,14 +1234,14 @@ * runnable and can look at the signal. But don't make * the process runnable, leave it stopped. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan && p->p_sflag & PS_SINTR) { if (p->p_sflag & PS_CVWAITQ) cv_waitq_remove(p); else unsleep(p); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); goto out; default: @@ -1251,17 +1251,17 @@ * It will either never be noticed, or noticed very soon. */ if (p == curproc) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); signotify(p); } #ifdef SMP else if (p->p_stat == SRUN) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); forward_signal(p); } #endif else - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); goto out; } /*NOTREACHED*/ @@ -1270,14 +1270,14 @@ /* * Raise priority to at least PUSER. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_priority > PUSER) p->p_priority = PUSER; run: /* If we jump here, sched_lock has to be owned. */ mtx_assert(&sched_lock, MA_OWNED | MA_NOTRECURSED); setrunnable(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); out: /* If we jump here, sched_lock should not be owned. */ mtx_assert(&sched_lock, MA_NOTOWNED); @@ -1336,10 +1336,10 @@ do { stop(p); PROCTREE_LOCK(PT_RELEASE); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); DROP_GIANT_NOSWITCH(); mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); PROCTREE_LOCK(PT_SHARED); } while (!trace_req(p) @@ -1413,10 +1413,10 @@ if ((p->p_pptr->p_procsig->ps_flag & PS_NOCLDSTOP) == 0) psignal(p->p_pptr, SIGCHLD); PROCTREE_LOCK(PT_RELEASE); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); DROP_GIANT_NOSWITCH(); mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); break; } else if (prop & SA_IGNORE) { @@ -1464,11 +1464,11 @@ { PROCTREE_ASSERT(PT_SHARED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_stat = SSTOP; p->p_flag &= ~P_WAITED; wakeup((caddr_t)p->p_pptr); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* Index: sys/kern/kern_subr.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_subr.c,v retrieving revision 1.39 diff -u -r1.39 kern_subr.c --- sys/kern/kern_subr.c 2001/01/10 04:43:49 1.39 +++ sys/kern/kern_subr.c 2001/02/07 03:01:12 @@ -377,13 +377,13 @@ p = curproc; s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); DROP_GIANT_NOSWITCH(); p->p_priority = p->p_usrpri; setrunqueue(p); p->p_stats->p_ru.ru_nivcsw++; mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); splx(s); } Index: sys/kern/kern_synch.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_synch.c,v retrieving revision 1.124 diff -u -r1.124 kern_synch.c --- sys/kern/kern_synch.c 2001/01/31 04:29:51 1.124 +++ sys/kern/kern_synch.c 2001/02/07 03:01:12 @@ -295,7 +295,7 @@ if (p->p_stat == SWAIT) continue; */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_swtime++; if (p->p_stat == SSLEEP || p->p_stat == SSTOP) p->p_slptime++; @@ -305,7 +305,7 @@ * stop recalculating its priority until it wakes up. */ if (p->p_slptime > 1) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } @@ -343,7 +343,7 @@ } else p->p_priority = p->p_usrpri; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); splx(s); } ALLPROC_LOCK(AP_RELEASE); @@ -427,7 +427,7 @@ ktrcsw(p->p_tracep, 1, 0); #endif WITNESS_SLEEP(0, mtx); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); s = splhigh(); if (cold || panicstr) { /* @@ -437,8 +437,8 @@ * in case this is the idle process and already asleep. */ if (mtx != NULL && priority & PDROP) - mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_flags(mtx, MTX_NOSWITCH); + mtx_unlock_spin(&sched_lock); splx(s); return (0); } @@ -448,7 +448,7 @@ if (mtx != NULL) { mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); WITNESS_SAVE(mtx, mtx); - mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH); + mtx_unlock_flags(mtx, MTX_NOSWITCH); if (priority & PDROP) mtx = NULL; } @@ -485,15 +485,15 @@ "msleep caught: proc %p (pid %d, %s), schedlock %p", p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock); p->p_sflag |= PS_SINTR; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if ((sig = CURSIG(p))) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan) unsleep(p); p->p_stat = SRUN; goto resume; } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan == NULL) { catch = 0; goto resume; @@ -518,12 +518,12 @@ ktrcsw(p->p_tracep, 0, 0); #endif rval = EWOULDBLOCK; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); goto out; } } else if (timo) callout_stop(&p->p_slpcallout); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (catch && (sig != 0 || (sig = CURSIG(p)))) { #ifdef KTRACE @@ -543,7 +543,7 @@ #endif PICKUP_GIANT(); if (mtx != NULL) { - mtx_enter(mtx, MTX_DEF); + mtx_lock(mtx); WITNESS_RESTORE(mtx, mtx); } return (rval); @@ -579,7 +579,7 @@ */ s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan != NULL) unsleep(p); @@ -593,7 +593,7 @@ TAILQ_INSERT_TAIL(&slpque[LOOKUP(ident)], p, p_slpq); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); splx(s); return(0); @@ -620,12 +620,12 @@ WITNESS_SAVE_DECL(mtx); WITNESS_SLEEP(0, mtx); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); DROP_GIANT_NOSWITCH(); if (mtx != NULL) { mtx_assert(mtx, MA_OWNED | MA_NOTRECURSED); WITNESS_SAVE(mtx, mtx); - mtx_exit(mtx, MTX_DEF | MTX_NOSWITCH); + mtx_unlock_flags(mtx, MTX_NOSWITCH); if (priority & PDROP) mtx = NULL; } @@ -657,15 +657,15 @@ if (catch) { p->p_sflag |= PS_SINTR; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if ((sig = CURSIG(p))) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan) unsleep(p); p->p_stat = SRUN; goto resume; } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan == NULL) { catch = 0; goto resume; @@ -687,12 +687,12 @@ ktrcsw(p->p_tracep, 0, 0); #endif rval = EWOULDBLOCK; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); goto out; } } else if (timo) callout_stop(&p->p_slpcallout); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (catch && (sig != 0 || (sig = CURSIG(p)))) { #ifdef KTRACE @@ -720,7 +720,7 @@ p->p_stats->p_ru.ru_nvcsw++; mi_switch(); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); splx(s); } @@ -735,7 +735,7 @@ out: PICKUP_GIANT(); if (mtx != NULL) { - mtx_enter(mtx, MTX_DEF); + mtx_lock(mtx); WITNESS_RESTORE(mtx, mtx); } return (rval); @@ -761,7 +761,7 @@ "endtsleep: proc %p (pid %d, %s), schedlock %p", p, p->p_pid, p->p_comm, (void *) sched_lock.mtx_lock); s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan) { if (p->p_stat == SSLEEP) setrunnable(p); @@ -769,7 +769,7 @@ unsleep(p); p->p_sflag |= PS_TIMEOUT; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); splx(s); } @@ -783,12 +783,12 @@ int s; s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan) { TAILQ_REMOVE(&slpque[LOOKUP(p->p_wchan)], p, p_slpq); p->p_wchan = NULL; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); splx(s); } @@ -804,7 +804,7 @@ int s; s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); qp = &slpque[LOOKUP(ident)]; restart: TAILQ_FOREACH(p, qp, p_slpq) { @@ -832,7 +832,7 @@ } } } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); splx(s); } @@ -850,7 +850,7 @@ int s; s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); qp = &slpque[LOOKUP(ident)]; TAILQ_FOREACH(p, qp, p_slpq) { @@ -878,7 +878,7 @@ } } } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); splx(s); } @@ -947,13 +947,13 @@ p->p_runtime > p->p_limit->p_cpulimit) { rlim = &p->p_rlimit[RLIMIT_CPU]; if (p->p_runtime / (rlim_t)1000000 >= rlim->rlim_max) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); killproc(p, "exceeded maximum CPU limit"); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } else { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); psignal(p, SIGXCPU); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (rlim->rlim_cur < rlim->rlim_max) { /* XXX: we should make a private copy */ rlim->rlim_cur += 5; @@ -990,7 +990,7 @@ register int s; s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); switch (p->p_stat) { case 0: case SRUN: @@ -1022,7 +1022,7 @@ } else maybe_resched(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -1036,7 +1036,7 @@ { register unsigned int newpriority; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_rtprio.type == RTP_PRIO_NORMAL) { newpriority = PUSER + p->p_estcpu / INVERSE_ESTCPU_WEIGHT + NICE_WEIGHT * (p->p_nice - PRIO_MIN); @@ -1044,7 +1044,7 @@ p->p_usrpri = newpriority; } maybe_resched(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* ARGSUSED */ @@ -1100,13 +1100,13 @@ p->p_retval[0] = 0; s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); DROP_GIANT_NOSWITCH(); p->p_priority = MAXPRI; setrunqueue(p); p->p_stats->p_ru.ru_nvcsw++; mi_switch(); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PICKUP_GIANT(); splx(s); Index: sys/kern/kern_timeout.c =================================================================== RCS file: /home/ncvs/src/sys/kern/kern_timeout.c,v retrieving revision 1.65 diff -u -r1.65 kern_timeout.c --- sys/kern/kern_timeout.c 2000/11/25 06:22:14 1.65 +++ sys/kern/kern_timeout.c 2001/02/07 03:01:12 @@ -91,7 +91,7 @@ steps = 0; s = splhigh(); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); while (softticks != ticks) { softticks++; /* @@ -108,10 +108,10 @@ if (steps >= MAX_SOFTCLOCK_STEPS) { nextsoftcheck = c; /* Give interrupts a chance. */ - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); s = splhigh(); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); c = nextsoftcheck; steps = 0; } @@ -134,22 +134,22 @@ c->c_flags = (c->c_flags & ~CALLOUT_PENDING); } - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); if (!(c_flags & CALLOUT_MPSAFE)) - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); splx(s); c_func(c_arg); s = splhigh(); if (!(c_flags & CALLOUT_MPSAFE)) - mtx_exit(&Giant, MTX_DEF); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_unlock(&Giant); + mtx_lock_spin(&callout_lock); steps = 0; c = nextsoftcheck; } } } nextsoftcheck = NULL; - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); } @@ -180,7 +180,7 @@ struct callout_handle handle; s = splhigh(); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); /* Fill in the next free callout structure. */ new = SLIST_FIRST(&callfree); @@ -192,7 +192,7 @@ callout_reset(new, to_ticks, ftn, arg); handle.callout = new; - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); return (handle); } @@ -214,10 +214,10 @@ return; s = splhigh(); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); if (handle.callout->c_func == ftn && handle.callout->c_arg == arg) callout_stop(handle.callout); - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); } @@ -251,7 +251,7 @@ int s; s = splhigh(); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); if (c->c_flags & CALLOUT_PENDING) callout_stop(c); @@ -269,7 +269,7 @@ c->c_time = ticks + to_ticks; TAILQ_INSERT_TAIL(&callwheel[c->c_time & callwheelmask], c, c_links.tqe); - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); } @@ -280,13 +280,13 @@ int s; s = splhigh(); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); /* * Don't attempt to delete a callout that's not on the queue. */ if (!(c->c_flags & CALLOUT_PENDING)) { c->c_flags &= ~CALLOUT_ACTIVE; - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); return; } @@ -301,7 +301,7 @@ if (c->c_flags & CALLOUT_LOCAL_ALLOC) { SLIST_INSERT_HEAD(&callfree, c, c_links.sle); } - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); } @@ -366,7 +366,7 @@ /* don't collide with softclock() */ s = splhigh(); - mtx_enter(&callout_lock, MTX_SPIN); + mtx_lock_spin(&callout_lock); for (p = calltodo.c_next; p != NULL; p = p->c_next) { p->c_time -= delta_ticks; @@ -377,7 +377,7 @@ /* take back the ticks the timer didn't use (p->c_time <= 0) */ delta_ticks = -p->c_time; } - mtx_exit(&callout_lock, MTX_SPIN); + mtx_unlock_spin(&callout_lock); splx(s); return; Index: sys/kern/subr_eventhandler.c =================================================================== RCS file: /home/ncvs/src/sys/kern/subr_eventhandler.c,v retrieving revision 1.10 diff -u -r1.10 subr_eventhandler.c --- sys/kern/subr_eventhandler.c 2001/01/19 01:58:53 1.10 +++ sys/kern/subr_eventhandler.c 2001/02/07 03:01:12 @@ -73,7 +73,7 @@ KASSERT(eventhandler_lists_initted, ("eventhandler registered too early")); /* lock the eventhandler lists */ - mtx_enter(&eventhandler_mutex, MTX_DEF); + mtx_lock(&eventhandler_mutex); /* Do we need to find/create the (slow) list? */ if (list == NULL) { @@ -84,7 +84,7 @@ if (list == NULL) { if ((list = malloc(sizeof(struct eventhandler_list) + strlen(name) + 1, M_EVENTHANDLER, M_NOWAIT)) == NULL) { - mtx_exit(&eventhandler_mutex, MTX_DEF); + mtx_unlock(&eventhandler_mutex); return(NULL); } list->el_flags = 0; @@ -102,7 +102,7 @@ /* allocate an entry for this handler, populate it */ if ((eg = malloc(sizeof(struct eventhandler_entry_generic), M_EVENTHANDLER, M_NOWAIT)) == NULL) { - mtx_exit(&eventhandler_mutex, MTX_DEF); + mtx_unlock(&eventhandler_mutex); return(NULL); } eg->func = func; @@ -122,7 +122,7 @@ if (ep == NULL) TAILQ_INSERT_TAIL(&list->el_entries, &eg->ee, ee_link); lockmgr(&list->el_lock, LK_RELEASE, NULL, CURPROC); - mtx_exit(&eventhandler_mutex, MTX_DEF); + mtx_unlock(&eventhandler_mutex); return(&eg->ee); } @@ -154,14 +154,14 @@ struct eventhandler_list *list; /* scan looking for the requested list */ - mtx_enter(&eventhandler_mutex, MTX_DEF); + mtx_lock(&eventhandler_mutex); for (list = TAILQ_FIRST(&eventhandler_lists); list != NULL; list = TAILQ_NEXT(list, el_link)) { if (!strcmp(name, list->el_name)) break; } - mtx_exit(&eventhandler_mutex, MTX_DEF); + mtx_unlock(&eventhandler_mutex); return(list); } Index: sys/kern/subr_prof.c =================================================================== RCS file: /home/ncvs/src/sys/kern/subr_prof.c,v retrieving revision 1.40 diff -u -r1.40 subr_prof.c --- sys/kern/subr_prof.c 2001/01/24 11:11:35 1.40 +++ sys/kern/subr_prof.c 2001/02/07 03:01:12 @@ -444,12 +444,12 @@ u_short v; /* Testing PS_PROFIL may be unnecessary, but is certainly safe. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_PROFIL) == 0 || ticks == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); prof = &p->p_stats->p_prof; if (pc < prof->pr_off || Index: sys/kern/subr_rman.c =================================================================== RCS file: /home/ncvs/src/sys/kern/subr_rman.c,v retrieving revision 1.17 diff -u -r1.17 subr_rman.c --- sys/kern/subr_rman.c 2001/01/24 12:35:50 1.17 +++ sys/kern/subr_rman.c 2001/02/07 03:34:10 @@ -104,9 +104,9 @@ return ENOMEM; mtx_init(rm->rm_mtx, "rman", MTX_DEF); - mtx_enter(&rman_mtx, MTX_DEF); + mtx_lock(&rman_mtx); TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); - mtx_exit(&rman_mtx, MTX_DEF); + mtx_unlock(&rman_mtx); return 0; } @@ -129,7 +129,7 @@ r->r_dev = 0; r->r_rm = rm; - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); for (s = TAILQ_FIRST(&rm->rm_list); s && s->r_end < r->r_start; s = TAILQ_NEXT(s, r_link)) @@ -141,7 +141,7 @@ TAILQ_INSERT_BEFORE(s, r, r_link); } - mtx_exit(rm->rm_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); return 0; } @@ -150,10 +150,10 @@ { struct resource *r; - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); TAILQ_FOREACH(r, &rm->rm_list, r_link) { if (r->r_flags & RF_ALLOCATED) { - mtx_exit(rm->rm_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); return EBUSY; } } @@ -167,10 +167,10 @@ TAILQ_REMOVE(&rm->rm_list, r, r_link); free(r, M_RMAN); } - mtx_exit(rm->rm_mtx, MTX_DEF); - mtx_enter(&rman_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); + mtx_lock(&rman_mtx); TAILQ_REMOVE(&rman_head, rm, rm_link); - mtx_exit(&rman_mtx, MTX_DEF); + mtx_unlock(&rman_mtx); mtx_destroy(rm->rm_mtx); free(rm->rm_mtx, M_RMAN); @@ -193,7 +193,7 @@ want_activate = (flags & RF_ACTIVE); flags &= ~RF_ACTIVE; - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); for (r = TAILQ_FIRST(&rm->rm_list); r && r->r_end < start; @@ -370,7 +370,7 @@ } } - mtx_exit(rm->rm_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); return (rv); } @@ -417,9 +417,9 @@ struct rman *rm; rm = r->r_rm; - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); rv = int_rman_activate_resource(rm, r, &whohas); - mtx_exit(rm->rm_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); return rv; } @@ -432,7 +432,7 @@ rm = r->r_rm; for (;;) { - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); rv = int_rman_activate_resource(rm, r, &whohas); if (rv != EBUSY) return (rv); /* returns with mutex held */ @@ -441,19 +441,19 @@ panic("rman_await_resource"); /* * splhigh hopefully will prevent a race between - * mtx_exit and tsleep where a process + * mtx_unlock and tsleep where a process * could conceivably get in and release the resource * before we have a chance to sleep on it. */ s = splhigh(); whohas->r_flags |= RF_WANTED; - mtx_exit(rm->rm_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); rv = tsleep(r->r_sharehead, pri, "rmwait", timo); if (rv) { splx(s); return rv; } - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); splx(s); } } @@ -478,9 +478,9 @@ struct rman *rm; rm = r->r_rm; - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); int_rman_deactivate_resource(r); - mtx_exit(rm->rm_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); return 0; } @@ -576,9 +576,9 @@ int rv; struct rman *rm = r->r_rm; - mtx_enter(rm->rm_mtx, MTX_DEF); + mtx_lock(rm->rm_mtx); rv = int_rman_release_resource(rm, r); - mtx_exit(rm->rm_mtx, MTX_DEF); + mtx_unlock(rm->rm_mtx); return (rv); } Index: sys/kern/sys_generic.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sys_generic.c,v retrieving revision 1.70 diff -u -r1.70 sys_generic.c --- sys/kern/sys_generic.c 2001/01/24 11:12:37 1.70 +++ sys/kern/sys_generic.c 2001/02/07 03:01:12 @@ -1017,13 +1017,13 @@ if (sip->si_pid == mypid) return; if (sip->si_pid && (p = pfind(sip->si_pid))) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan == (caddr_t)&selwait) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); sip->si_flags |= SI_COLL; return; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } sip->si_pid = mypid; } @@ -1047,15 +1047,15 @@ p = pfind(sip->si_pid); sip->si_pid = 0; if (p != NULL) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_wchan == (caddr_t)&selwait) { if (p->p_stat == SSLEEP) setrunnable(p); else unsleep(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } else { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROC_LOCK(p); p->p_flag &= ~P_SELECT; PROC_UNLOCK(p); Index: sys/kern/sys_process.c =================================================================== RCS file: /home/ncvs/src/sys/kern/sys_process.c,v retrieving revision 1.60 diff -u -r1.60 sys_process.c --- sys/kern/sys_process.c 2001/01/24 11:15:24 1.60 +++ sys/kern/sys_process.c 2001/02/07 03:01:12 @@ -284,12 +284,12 @@ PROCTREE_LOCK(PT_RELEASE); /* not currently stopped */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat != SSTOP || (p->p_flag & P_WAITED) == 0) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return EBUSY; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* OK */ break; @@ -377,13 +377,13 @@ sendsig: /* deliver or queue signal */ s = splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat == SSTOP) { p->p_xstat = uap->data; setrunnable(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } else { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (uap->data) { mtx_assert(&Giant, MA_OWNED); psignal(p, uap->data); @@ -437,14 +437,14 @@ } error = 0; PHOLD(p); /* user had damn well better be incore! */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_INMEM) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); fill_kinfo_proc (p, &p->p_addr->u_kproc); curp->p_retval[0] = *(int *) ((uintptr_t)p->p_addr + (uintptr_t)uap->addr); } else { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); curp->p_retval[0] = 0; error = EFAULT; } @@ -453,13 +453,13 @@ case PT_WRITE_U: PHOLD(p); /* user had damn well better be incore! */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_INMEM) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); fill_kinfo_proc (p, &p->p_addr->u_kproc); error = ptrace_write_u(p, (vm_offset_t)uap->addr, uap->data); } else { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); error = EFAULT; } PRELE(p); Index: sys/kern/tty.c =================================================================== RCS file: /home/ncvs/src/sys/kern/tty.c,v retrieving revision 1.146 diff -u -r1.146 tty.c --- sys/kern/tty.c 2001/01/29 09:43:36 1.146 +++ sys/kern/tty.c 2001/02/07 03:01:12 @@ -2251,7 +2251,7 @@ else if ((p = LIST_FIRST(&tp->t_pgrp->pg_members)) == 0) ttyprintf(tp, "empty foreground process group\n"); else { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); /* Pick interesting process. */ for (pick = NULL; p != 0; p = LIST_NEXT(p, p_pglist)) @@ -2264,7 +2264,7 @@ ltmp = pick->p_stat == SIDL || pick->p_stat == SWAIT || pick->p_stat == SZOMB ? 0 : pgtok(vmspace_resident_count(pick->p_vmspace)); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); ttyprintf(tp, " cmd: %s %d [%s] ", pick->p_comm, pick->p_pid, stmp); Index: sys/kern/uipc_mbuf.c =================================================================== RCS file: /home/ncvs/src/sys/kern/uipc_mbuf.c,v retrieving revision 1.63 diff -u -r1.63 uipc_mbuf.c --- sys/kern/uipc_mbuf.c 2001/01/20 21:29:07 1.63 +++ sys/kern/uipc_mbuf.c 2001/02/07 03:01:12 @@ -152,20 +152,20 @@ /* * Perform some initial allocations. */ - mtx_enter(&mcntfree.m_mtx, MTX_DEF); + mtx_lock(&mcntfree.m_mtx); if (m_alloc_ref(REF_INIT, M_DONTWAIT) == 0) goto bad; - mtx_exit(&mcntfree.m_mtx, MTX_DEF); + mtx_unlock(&mcntfree.m_mtx); - mtx_enter(&mmbfree.m_mtx, MTX_DEF); + mtx_lock(&mmbfree.m_mtx); if (m_mballoc(NMB_INIT, M_DONTWAIT) == 0) goto bad; - mtx_exit(&mmbfree.m_mtx, MTX_DEF); + mtx_unlock(&mmbfree.m_mtx); - mtx_enter(&mclfree.m_mtx, MTX_DEF); + mtx_lock(&mclfree.m_mtx); if (m_clalloc(NCL_INIT, M_DONTWAIT) == 0) goto bad; - mtx_exit(&mclfree.m_mtx, MTX_DEF); + mtx_unlock(&mclfree.m_mtx); return; bad: @@ -204,7 +204,7 @@ */ nbytes = round_page(nmb * sizeof(union mext_refcnt)); - mtx_exit(&mcntfree.m_mtx, MTX_DEF); + mtx_unlock(&mcntfree.m_mtx); #ifdef WITNESS /* * XXX: Make sure we don't create lock order problems. @@ -216,21 +216,21 @@ KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0, ("m_alloc_ref: Giant must be owned or no locks held")); #endif - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if ((p = (caddr_t)kmem_malloc(mb_map, nbytes, how == M_TRYWAIT ? M_WAITOK : M_NOWAIT)) == NULL) { - mtx_exit(&Giant, MTX_DEF); - mtx_enter(&mcntfree.m_mtx, MTX_DEF); + mtx_unlock(&Giant); + mtx_lock(&mcntfree.m_mtx); return (0); } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); nmb = nbytes / sizeof(union mext_refcnt); /* * We don't let go of the mutex in order to avoid a race. * It is up to the caller to let go of the mutex. */ - mtx_enter(&mcntfree.m_mtx, MTX_DEF); + mtx_lock(&mcntfree.m_mtx); for (i = 0; i < nmb; i++) { ((union mext_refcnt *)p)->next_ref = mcntfree.m_head; mcntfree.m_head = (union mext_refcnt *)p; @@ -274,7 +274,7 @@ nbytes = round_page(nmb * MSIZE); - mtx_exit(&mmbfree.m_mtx, MTX_DEF); + mtx_unlock(&mmbfree.m_mtx); #ifdef WITNESS /* * XXX: Make sure we don't create lock order problems. @@ -286,14 +286,14 @@ KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0, ("m_mballoc: Giant must be owned or no locks held")); #endif - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); p = (caddr_t)kmem_malloc(mb_map, nbytes, M_NOWAIT); if (p == 0 && how == M_TRYWAIT) { atomic_add_long(&mbstat.m_wait, 1); p = (caddr_t)kmem_malloc(mb_map, nbytes, M_WAITOK); } - mtx_exit(&Giant, MTX_DEF); - mtx_enter(&mmbfree.m_mtx, MTX_DEF); + mtx_unlock(&Giant); + mtx_lock(&mmbfree.m_mtx); /* * Either the map is now full, or `how' is M_DONTWAIT and there @@ -345,10 +345,10 @@ * importantly, to avoid a potential lock order reversal which may * result in deadlock (See comment above m_reclaim()). */ - mtx_exit(&mmbfree.m_mtx, MTX_DEF); + mtx_unlock(&mmbfree.m_mtx); m_reclaim(); - mtx_enter(&mmbfree.m_mtx, MTX_DEF); + mtx_lock(&mmbfree.m_mtx); _MGET(p, M_DONTWAIT); if (p == NULL) { @@ -408,7 +408,7 @@ } npg = ncl; - mtx_exit(&mclfree.m_mtx, MTX_DEF); + mtx_unlock(&mclfree.m_mtx); #ifdef WITNESS /* * XXX: Make sure we don't create lock order problems. @@ -420,12 +420,12 @@ KASSERT(mtx_owned(&Giant) || witness_list(CURPROC) == 0, ("m_clalloc: Giant must be owned or no locks held")); #endif - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); p = (caddr_t)kmem_malloc(mb_map, ctob(npg), how == M_TRYWAIT ? M_WAITOK : M_NOWAIT); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); ncl = ncl * PAGE_SIZE / MCLBYTES; - mtx_enter(&mclfree.m_mtx, MTX_DEF); + mtx_lock(&mclfree.m_mtx); /* * Either the map is now full, or `how' is M_DONTWAIT and there Index: sys/kern/uipc_syscalls.c =================================================================== RCS file: /home/ncvs/src/sys/kern/uipc_syscalls.c,v retrieving revision 1.83 diff -u -r1.83 uipc_syscalls.c --- sys/kern/uipc_syscalls.c 2001/01/02 11:51:55 1.83 +++ sys/kern/uipc_syscalls.c 2001/02/07 03:01:13 @@ -1418,7 +1418,7 @@ int i; mtx_init(&sf_freelist.sf_lock, "sf_bufs list lock", MTX_DEF); - mtx_enter(&sf_freelist.sf_lock, MTX_DEF); + mtx_lock(&sf_freelist.sf_lock); SLIST_INIT(&sf_freelist.sf_head); sf_base = kmem_alloc_pageable(kernel_map, nsfbufs * PAGE_SIZE); sf_bufs = malloc(nsfbufs * sizeof(struct sf_buf), M_TEMP, @@ -1428,7 +1428,7 @@ SLIST_INSERT_HEAD(&sf_freelist.sf_head, &sf_bufs[i], free_list); } sf_buf_alloc_want = 0; - mtx_exit(&sf_freelist.sf_lock, MTX_DEF); + mtx_unlock(&sf_freelist.sf_lock); } /* @@ -1439,13 +1439,13 @@ { struct sf_buf *sf; - mtx_enter(&sf_freelist.sf_lock, MTX_DEF); + mtx_lock(&sf_freelist.sf_lock); while ((sf = SLIST_FIRST(&sf_freelist.sf_head)) == NULL) { sf_buf_alloc_want++; msleep(&sf_freelist, &sf_freelist.sf_lock, PVM, "sfbufa", 0); } SLIST_REMOVE_HEAD(&sf_freelist.sf_head, free_list); - mtx_exit(&sf_freelist.sf_lock, MTX_DEF); + mtx_unlock(&sf_freelist.sf_lock); return (sf); } @@ -1475,13 +1475,13 @@ vm_page_free(m); splx(s); sf->m = NULL; - mtx_enter(&sf_freelist.sf_lock, MTX_DEF); + mtx_lock(&sf_freelist.sf_lock); SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list); if (sf_buf_alloc_want) { sf_buf_alloc_want--; wakeup_one(&sf_freelist); } - mtx_exit(&sf_freelist.sf_lock, MTX_DEF); + mtx_unlock(&sf_freelist.sf_lock); } /* Index: sys/kern/vfs_aio.c =================================================================== RCS file: /home/ncvs/src/sys/kern/vfs_aio.c,v retrieving revision 1.90 diff -u -r1.90 vfs_aio.c --- sys/kern/vfs_aio.c 2001/02/06 09:25:03 1.90 +++ sys/kern/vfs_aio.c 2001/02/07 03:01:13 @@ -638,7 +638,7 @@ struct proc *curcp, *mycp, *userp; struct vmspace *myvm, *tmpvm; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * Local copies of curproc (cp) and vmspace (myvm) */ Index: sys/kern/vfs_bio.c =================================================================== RCS file: /home/ncvs/src/sys/kern/vfs_bio.c,v retrieving revision 1.271 diff -u -r1.271 vfs_bio.c --- sys/kern/vfs_bio.c 2001/02/04 06:19:26 1.271 +++ sys/kern/vfs_bio.c 2001/02/07 03:01:13 @@ -1800,7 +1800,7 @@ { int s; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * This process needs to be suspended prior to shutdown sync. Index: sys/kern/vfs_conf.c =================================================================== RCS file: /home/ncvs/src/sys/kern/vfs_conf.c,v retrieving revision 1.56 diff -u -r1.56 vfs_conf.c --- sys/kern/vfs_conf.c 2000/12/09 09:35:40 1.56 +++ sys/kern/vfs_conf.c 2001/02/07 03:01:13 @@ -230,9 +230,9 @@ } else { /* register with list of mounted filesystems */ - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); TAILQ_INSERT_HEAD(&mountlist, mp, mnt_list); - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); /* sanity check system clock against root filesystem timestamp */ inittodr(mp->mnt_time); Index: sys/kern/vfs_default.c =================================================================== RCS file: /home/ncvs/src/sys/kern/vfs_default.c,v retrieving revision 1.39 diff -u -r1.39 vfs_default.c --- sys/kern/vfs_default.c 2000/11/01 17:57:22 1.39 +++ sys/kern/vfs_default.c 2001/02/07 03:01:13 @@ -449,7 +449,7 @@ * the interlock here. */ if (ap->a_flags & LK_INTERLOCK) - mtx_exit(&ap->a_vp->v_interlock, MTX_DEF); + mtx_unlock(&ap->a_vp->v_interlock); return (0); #endif } @@ -471,7 +471,7 @@ * the interlock here. */ if (ap->a_flags & LK_INTERLOCK) - mtx_exit(&ap->a_vp->v_interlock, MTX_DEF); + mtx_unlock(&ap->a_vp->v_interlock); return (0); } Index: sys/kern/vfs_subr.c =================================================================== RCS file: /home/ncvs/src/sys/kern/vfs_subr.c,v retrieving revision 1.302 diff -u -r1.302 vfs_subr.c --- sys/kern/vfs_subr.c 2001/02/04 13:12:23 1.302 +++ sys/kern/vfs_subr.c 2001/02/07 03:01:13 @@ -390,15 +390,15 @@ { register struct mount *mp; - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); TAILQ_FOREACH(mp, &mountlist, mnt_list) { if (mp->mnt_stat.f_fsid.val[0] == fsid->val[0] && mp->mnt_stat.f_fsid.val[1] == fsid->val[1]) { - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); return (mp); } } - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); return ((struct mount *) 0); } @@ -422,7 +422,7 @@ fsid_t tfsid; int mtype; - mtx_enter(&mntid_mtx, MTX_DEF); + mtx_lock(&mntid_mtx); mtype = mp->mnt_vfc->vfc_typenum; tfsid.val[1] = mtype; mtype = (mtype & 0xFF) << 24; @@ -435,7 +435,7 @@ } mp->mnt_stat.f_fsid.val[0] = tfsid.val[0]; mp->mnt_stat.f_fsid.val[1] = tfsid.val[1]; - mtx_exit(&mntid_mtx, MTX_DEF); + mtx_unlock(&mntid_mtx); } /* @@ -538,7 +538,7 @@ */ s = splbio(); - mtx_enter(&vnode_free_list_mtx, MTX_DEF); + mtx_lock(&vnode_free_list_mtx); if (wantfreevnodes && freevnodes < wantfreevnodes) { vp = NULL; @@ -560,7 +560,7 @@ if (LIST_FIRST(&vp->v_cache_src) != NULL || (VOP_GETVOBJECT(vp, &object) == 0 && (object->resident_page_count || object->ref_count)) || - !mtx_try_enter(&vp->v_interlock, MTX_DEF)) { + !mtx_trylock(&vp->v_interlock)) { TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); vp = NULL; continue; @@ -570,7 +570,7 @@ */ if (vn_start_write(vp, &vnmp, V_NOWAIT) == 0) break; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); vp = NULL; } @@ -578,13 +578,13 @@ vp->v_flag |= VDOOMED; vp->v_flag &= ~VFREE; freevnodes--; - mtx_exit(&vnode_free_list_mtx, MTX_DEF); + mtx_unlock(&vnode_free_list_mtx); cache_purge(vp); vp->v_lease = NULL; if (vp->v_type != VBAD) { vgonel(vp, p); } else { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); } vn_finished_write(vnmp); @@ -609,7 +609,7 @@ vp->v_clen = 0; vp->v_socket = 0; } else { - mtx_exit(&vnode_free_list_mtx, MTX_DEF); + mtx_unlock(&vnode_free_list_mtx); vp = (struct vnode *) zalloc(vnode_zone); bzero((char *) vp, sizeof *vp); mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF); @@ -646,7 +646,7 @@ register struct mount *mp; { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); /* * Delete from old mount point vnode list, if on one. */ @@ -656,11 +656,11 @@ * Insert into list of vnodes for the new mount point, if available. */ if ((vp->v_mount = mp) == NULL) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); return; } LIST_INSERT_HEAD(&mp->mnt_vnodelist, vp, v_mntvnodes); - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); } /* @@ -785,12 +785,12 @@ /* * Destroy the copy in the VM cache, too. */ - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (VOP_GETVOBJECT(vp, &object) == 0) { vm_object_page_remove(object, 0, 0, (flags & V_SAVE) ? TRUE : FALSE); } - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); if (!TAILQ_EMPTY(&vp->v_dirtyblkhd) || !TAILQ_EMPTY(&vp->v_cleanblkhd)) panic("vinvalbuf: flush failed"); @@ -1010,7 +1010,7 @@ int s; struct proc *p = updateproc; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); EVENTHANDLER_REGISTER(shutdown_pre_sync, kproc_shutdown, p, SHUTDOWN_PRI_LAST); @@ -1104,10 +1104,10 @@ speedup_syncer() { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (updateproc->p_wchan == &lbolt) setrunnable(updateproc); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); if (rushjob < syncdelay / 2) { rushjob += 1; stat_rush_requests += 1; @@ -1407,9 +1407,9 @@ KASSERT(nvp->v_type == VCHR, ("addalias on non-special vnode")); nvp->v_rdev = dev; - mtx_enter(&spechash_mtx, MTX_DEF); + mtx_lock(&spechash_mtx); SLIST_INSERT_HEAD(&dev->si_hlist, nvp, v_specnext); - mtx_exit(&spechash_mtx, MTX_DEF); + mtx_unlock(&spechash_mtx); } /* @@ -1435,7 +1435,7 @@ * the VXLOCK flag is set. */ if ((flags & LK_INTERLOCK) == 0) - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_flag & VXLOCK) { if (vp->v_vxproc == curproc) { printf("VXLOCK interlock avoided\n"); @@ -1461,15 +1461,15 @@ * before sleeping so that multiple processes do * not try to recycle it. */ - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); vp->v_usecount--; if (VSHOULDFREE(vp)) vfree(vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); } return (error); } - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (0); } @@ -1479,9 +1479,9 @@ void vref(struct vnode *vp) { - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); vp->v_usecount++; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); } /* @@ -1496,14 +1496,14 @@ KASSERT(vp != NULL, ("vrele: null vp")); - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); KASSERT(vp->v_writecount < vp->v_usecount, ("vrele: missed vn_close")); if (vp->v_usecount > 1) { vp->v_usecount--; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return; } @@ -1525,7 +1525,7 @@ } else { #ifdef DIAGNOSTIC vprint("vrele: negative ref count", vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); #endif panic("vrele: negative ref cnt"); } @@ -1543,7 +1543,7 @@ struct proc *p = curproc; /* XXX */ KASSERT(vp != NULL, ("vput: null vp")); - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); KASSERT(vp->v_writecount < vp->v_usecount, ("vput: missed vn_close")); if (vp->v_usecount > 1) { @@ -1564,7 +1564,7 @@ * call VOP_INACTIVE with the node locked. So, in the case of * vrele, we explicitly lock the vnode before calling VOP_INACTIVE. */ - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); VOP_INACTIVE(vp, p); } else { @@ -1633,7 +1633,7 @@ struct vnode *vp, *nvp; int busy = 0; - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); loop: for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) { /* @@ -1649,12 +1649,12 @@ if (vp == skipvp) continue; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); /* * Skip over a vnodes marked VSYSTEM. */ if ((flags & SKIPSYSTEM) && (vp->v_flag & VSYSTEM)) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); continue; } /* @@ -1663,7 +1663,7 @@ */ if ((flags & WRITECLOSE) && (vp->v_writecount == 0 || vp->v_type != VREG)) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); continue; } @@ -1672,9 +1672,9 @@ * vnode data structures and we are done. */ if (vp->v_usecount == 0) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); vgonel(vp, p); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); continue; } @@ -1684,7 +1684,7 @@ * all other files, just kill them. */ if (flags & FORCECLOSE) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); if (vp->v_type != VCHR) { vgonel(vp, p); } else { @@ -1692,17 +1692,17 @@ vp->v_op = spec_vnodeop_p; insmntque(vp, (struct mount *) 0); } - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); continue; } #ifdef DIAGNOSTIC if (busyprt) vprint("vflush: busy vnode", vp); #endif - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); busy++; } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); if (busy) return (EBUSY); return (0); @@ -1784,7 +1784,7 @@ * Inline copy of vrele() since VOP_INACTIVE * has already been called. */ - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (--vp->v_usecount <= 0) { #ifdef DIAGNOSTIC if (vp->v_usecount < 0 || vp->v_writecount != 0) { @@ -1794,7 +1794,7 @@ #endif vfree(vp); } - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); } cache_purge(vp); @@ -1847,9 +1847,9 @@ } dev = vp->v_rdev; for (;;) { - mtx_enter(&spechash_mtx, MTX_DEF); + mtx_lock(&spechash_mtx); vq = SLIST_FIRST(&dev->si_hlist); - mtx_exit(&spechash_mtx, MTX_DEF); + mtx_unlock(&spechash_mtx); if (!vq) break; vgone(vq); @@ -1868,15 +1868,15 @@ struct proc *p; { - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount == 0) { if (inter_lkp) { - mtx_exit(inter_lkp, MTX_DEF); + mtx_unlock(inter_lkp); } vgonel(vp, p); return (1); } - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (0); } @@ -1890,7 +1890,7 @@ { struct proc *p = curproc; /* XXX */ - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); vgonel(vp, p); } @@ -1919,7 +1919,7 @@ * Clean out the filesystem specific data. */ vclean(vp, DOCLOSE, p); - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); /* * Delete from old mount point vnode list, if on one. @@ -1931,10 +1931,10 @@ * if it is on one. */ if (vp->v_type == VCHR && vp->v_rdev != NULL && vp->v_rdev != NODEV) { - mtx_enter(&spechash_mtx, MTX_DEF); + mtx_lock(&spechash_mtx); SLIST_REMOVE(&vp->v_rdev->si_hlist, vp, vnode, v_specnext); freedev(vp->v_rdev); - mtx_exit(&spechash_mtx, MTX_DEF); + mtx_unlock(&spechash_mtx); vp->v_rdev = NULL; } @@ -1950,19 +1950,19 @@ */ if (vp->v_usecount == 0 && !(vp->v_flag & VDOOMED)) { s = splbio(); - mtx_enter(&vnode_free_list_mtx, MTX_DEF); + mtx_lock(&vnode_free_list_mtx); if (vp->v_flag & VFREE) TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); else freevnodes++; vp->v_flag |= VFREE; TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); - mtx_exit(&vnode_free_list_mtx, MTX_DEF); + mtx_unlock(&vnode_free_list_mtx); splx(s); } vp->v_type = VBAD; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); } /* @@ -1976,15 +1976,15 @@ { struct vnode *vp; - mtx_enter(&spechash_mtx, MTX_DEF); + mtx_lock(&spechash_mtx); SLIST_FOREACH(vp, &dev->si_hlist, v_specnext) { if (type == vp->v_type) { *vpp = vp; - mtx_exit(&spechash_mtx, MTX_DEF); + mtx_unlock(&spechash_mtx); return (1); } } - mtx_exit(&spechash_mtx, MTX_DEF); + mtx_unlock(&spechash_mtx); return (0); } @@ -1999,10 +1999,10 @@ int count; count = 0; - mtx_enter(&spechash_mtx, MTX_DEF); + mtx_lock(&spechash_mtx); SLIST_FOREACH(vq, &vp->v_rdev->si_hlist, v_specnext) count += vq->v_usecount; - mtx_exit(&spechash_mtx, MTX_DEF); + mtx_unlock(&spechash_mtx); return (count); } @@ -2083,7 +2083,7 @@ struct vnode *vp; printf("Locked vnodes\n"); - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) { nmp = TAILQ_NEXT(mp, mnt_list); @@ -2093,11 +2093,11 @@ if (VOP_ISLOCKED(vp, NULL)) vprint((char *)0, vp); } - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); nmp = TAILQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); } #endif @@ -2202,14 +2202,14 @@ return (SYSCTL_OUT(req, 0, (numvnodes + KINFO_VNODESLOP) * (VPTRSZ + VNODESZ))); - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) { nmp = TAILQ_NEXT(mp, mnt_list); continue; } again: - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { @@ -2219,22 +2219,22 @@ * recycled onto the same filesystem. */ if (vp->v_mount != mp) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); goto again; } nvp = LIST_NEXT(vp, v_mntvnodes); - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); if ((error = SYSCTL_OUT(req, &vp, VPTRSZ)) || (error = SYSCTL_OUT(req, vp, VNODESZ))) return (error); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } - mtx_exit(&mntvnode_mtx, MTX_DEF); - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); + mtx_lock(&mountlist_mtx); nmp = TAILQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); return (0); } @@ -2592,7 +2592,7 @@ continue; } - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (VOP_GETVOBJECT(vp, &obj) == 0 && (obj->flags & OBJ_MIGHTBEDIRTY)) { if (!vget(vp, @@ -2604,7 +2604,7 @@ vput(vp); } } else { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); } } if (anyio && (--tries > 0)) @@ -2638,7 +2638,7 @@ int s; s = splbio(); - mtx_enter(&vnode_free_list_mtx, MTX_DEF); + mtx_lock(&vnode_free_list_mtx); KASSERT((vp->v_flag & VFREE) == 0, ("vnode already free")); if (vp->v_flag & VAGE) { TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist); @@ -2646,7 +2646,7 @@ TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist); } freevnodes++; - mtx_exit(&vnode_free_list_mtx, MTX_DEF); + mtx_unlock(&vnode_free_list_mtx); vp->v_flag &= ~VAGE; vp->v_flag |= VFREE; splx(s); @@ -2662,11 +2662,11 @@ int s; s = splbio(); - mtx_enter(&vnode_free_list_mtx, MTX_DEF); + mtx_lock(&vnode_free_list_mtx); KASSERT((vp->v_flag & VFREE) != 0, ("vnode not free")); TAILQ_REMOVE(&vnode_free_list, vp, v_freelist); freevnodes--; - mtx_exit(&vnode_free_list_mtx, MTX_DEF); + mtx_unlock(&vnode_free_list_mtx); vp->v_flag &= ~(VFREE|VAGE); splx(s); } @@ -2685,7 +2685,7 @@ struct proc *p; short events; { - mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_lock(&vp->v_pollinfo.vpi_lock); if (vp->v_pollinfo.vpi_revents & events) { /* * This leaves events we are not interested @@ -2697,12 +2697,12 @@ events &= vp->v_pollinfo.vpi_revents; vp->v_pollinfo.vpi_revents &= ~events; - mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_unlock(&vp->v_pollinfo.vpi_lock); return events; } vp->v_pollinfo.vpi_events |= events; selrecord(p, &vp->v_pollinfo.vpi_selinfo); - mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_unlock(&vp->v_pollinfo.vpi_lock); return 0; } @@ -2717,7 +2717,7 @@ struct vnode *vp; short events; { - mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_lock(&vp->v_pollinfo.vpi_lock); if (vp->v_pollinfo.vpi_events & events) { /* * We clear vpi_events so that we don't @@ -2734,7 +2734,7 @@ vp->v_pollinfo.vpi_revents |= events; selwakeup(&vp->v_pollinfo.vpi_selinfo); } - mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_unlock(&vp->v_pollinfo.vpi_lock); } /* @@ -2746,12 +2746,12 @@ vn_pollgone(vp) struct vnode *vp; { - mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_lock(&vp->v_pollinfo.vpi_lock); if (vp->v_pollinfo.vpi_events) { vp->v_pollinfo.vpi_events = 0; selwakeup(&vp->v_pollinfo.vpi_selinfo); } - mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_unlock(&vp->v_pollinfo.vpi_lock); } @@ -2856,9 +2856,9 @@ * Walk the list of vnodes pushing all that are dirty and * not already on the sync list. */ - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); if (vfs_busy(mp, LK_EXCLUSIVE | LK_NOWAIT, &mountlist_mtx, p) != 0) { - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); return (0); } if (vn_start_write(NULL, &mp, V_NOWAIT) != 0) { Index: sys/kern/vfs_syscalls.c =================================================================== RCS file: /home/ncvs/src/sys/kern/vfs_syscalls.c,v retrieving revision 1.175 diff -u -r1.175 vfs_syscalls.c --- sys/kern/vfs_syscalls.c 2000/12/13 00:17:01 1.175 +++ sys/kern/vfs_syscalls.c 2001/02/07 03:01:13 @@ -176,16 +176,16 @@ vput(vp); return (EBUSY); } - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if ((vp->v_flag & VMOUNT) != 0 || vp->v_mountedhere != NULL) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); vfs_unbusy(mp, p); vput(vp); return (EBUSY); } vp->v_flag |= VMOUNT; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); mp->mnt_flag |= SCARG(uap, flags) & (MNT_RELOAD | MNT_FORCE | MNT_UPDATE | MNT_SNAPSHOT); VOP_UNLOCK(vp, 0, p); @@ -243,15 +243,15 @@ return (ENODEV); } } - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if ((vp->v_flag & VMOUNT) != 0 || vp->v_mountedhere != NULL) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); vput(vp); return (EBUSY); } vp->v_flag |= VMOUNT; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); /* * Allocate and initialize the filesystem. @@ -310,9 +310,9 @@ mp->mnt_syncer = NULL; } vfs_unbusy(mp, p); - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); vp->v_flag &= ~VMOUNT; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); vrele(vp); return (error); } @@ -322,13 +322,13 @@ */ cache_purge(vp); if (!error) { - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); vp->v_flag &= ~VMOUNT; vp->v_mountedhere = mp; - mtx_exit(&vp->v_interlock, MTX_DEF); - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_unlock(&vp->v_interlock); + mtx_lock(&mountlist_mtx); TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); checkdirs(vp); VOP_UNLOCK(vp, 0, p); if ((mp->mnt_flag & MNT_RDONLY) == 0) @@ -337,9 +337,9 @@ if ((error = VFS_START(mp, 0, p)) != 0) vrele(vp); } else { - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); vp->v_flag &= ~VMOUNT; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); mp->mnt_vfc->vfc_refcount--; vfs_unbusy(mp, p); free((caddr_t)mp, M_MOUNT); @@ -464,7 +464,7 @@ int error; int async_flag; - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); mp->mnt_kern_flag |= MNTK_UNMOUNT; lockmgr(&mp->mnt_lock, LK_DRAIN | LK_INTERLOCK, &mountlist_mtx, p); vn_start_write(NULL, &mp, V_WAIT); @@ -484,7 +484,7 @@ error = VFS_UNMOUNT(mp, flags, p); } vn_finished_write(mp); - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); if (error) { if ((mp->mnt_flag & MNT_RDONLY) == 0 && mp->mnt_syncer == NULL) (void) vfs_allocate_syncvnode(mp); @@ -535,7 +535,7 @@ struct mount *mp, *nmp; int asyncflag; - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) { nmp = TAILQ_NEXT(mp, mnt_list); @@ -551,11 +551,11 @@ mp->mnt_flag |= asyncflag; vn_finished_write(mp); } - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); nmp = TAILQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); #if 0 /* * XXX don't call vfs_bufstats() yet because that routine @@ -727,7 +727,7 @@ maxcount = SCARG(uap, bufsize) / sizeof(struct statfs); sfsp = (caddr_t)SCARG(uap, buf); count = 0; - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) { nmp = TAILQ_NEXT(mp, mnt_list); @@ -743,7 +743,7 @@ if (((SCARG(uap, flags) & (MNT_LAZY|MNT_NOWAIT)) == 0 || (SCARG(uap, flags) & MNT_WAIT)) && (error = VFS_STATFS(mp, sp, p))) { - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); nmp = TAILQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); continue; @@ -757,11 +757,11 @@ sfsp += sizeof(*sp); } count++; - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); nmp = TAILQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); if (sfsp && count > maxcount) p->p_retval[0] = maxcount; else Index: sys/kern/vfs_vnops.c =================================================================== RCS file: /home/ncvs/src/sys/kern/vfs_vnops.c,v retrieving revision 1.107 diff -u -r1.107 vfs_vnops.c --- sys/kern/vfs_vnops.c 2001/01/24 12:35:50 1.107 +++ sys/kern/vfs_vnops.c 2001/02/07 03:01:13 @@ -641,10 +641,10 @@ do { if ((flags & LK_INTERLOCK) == 0) - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if ((vp->v_flag & VXLOCK) && vp->v_vxproc != curproc) { vp->v_flag |= VXWANT; - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); tsleep((caddr_t)vp, PINOD, "vn_lock", 0); error = ENOENT; } else { @@ -833,9 +833,9 @@ if ((vp)->v_tag != VT_UFS) return (EOPNOTSUPP); - mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_lock(&vp->v_pollinfo.vpi_lock); SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext); - mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_unlock(&vp->v_pollinfo.vpi_lock); return (0); } @@ -845,10 +845,10 @@ { struct vnode *vp = (struct vnode *)kn->kn_fp->f_data; - mtx_enter(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_lock(&vp->v_pollinfo.vpi_lock); SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note, kn, knote, kn_selnext); - mtx_exit(&vp->v_pollinfo.vpi_lock, MTX_DEF); + mtx_unlock(&vp->v_pollinfo.vpi_lock); } static int Index: sys/miscfs/deadfs/dead_vnops.c =================================================================== RCS file: /home/ncvs/src/sys/miscfs/deadfs/dead_vnops.c,v retrieving revision 1.29 diff -u -r1.29 dead_vnops.c --- sys/miscfs/deadfs/dead_vnops.c 2000/11/01 17:57:23 1.29 +++ sys/miscfs/deadfs/dead_vnops.c 2001/02/07 03:01:13 @@ -211,7 +211,7 @@ * the interlock here. */ if (ap->a_flags & LK_INTERLOCK) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); ap->a_flags &= ~LK_INTERLOCK; } if (!chkvnlock(vp)) Index: sys/miscfs/nullfs/null_vnops.c =================================================================== RCS file: /home/ncvs/src/sys/miscfs/nullfs/null_vnops.c,v retrieving revision 1.45 diff -u -r1.45 null_vnops.c --- sys/miscfs/nullfs/null_vnops.c 2000/10/29 13:56:56 1.45 +++ sys/miscfs/nullfs/null_vnops.c 2001/02/07 03:01:13 @@ -624,7 +624,7 @@ if (lvp == NULL) return (lockmgr(&vp->v_lock, flags, &vp->v_interlock, p)); if (flags & LK_INTERLOCK) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); flags &= ~LK_INTERLOCK; } if ((flags & LK_TYPE_MASK) == LK_DRAIN) { @@ -671,7 +671,7 @@ return (lockmgr(&vp->v_lock, flags | LK_RELEASE, &vp->v_interlock, p)); if ((flags & LK_THISLAYER) == 0) { if (flags & LK_INTERLOCK) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); flags &= ~LK_INTERLOCK; } VOP_UNLOCK(lvp, flags & ~LK_INTERLOCK, p); Index: sys/miscfs/procfs/procfs_ctl.c =================================================================== RCS file: /home/ncvs/src/sys/miscfs/procfs/procfs_ctl.c,v retrieving revision 1.23 diff -u -r1.23 procfs_ctl.c --- sys/miscfs/procfs/procfs_ctl.c 2000/12/23 19:43:10 1.23 +++ sys/miscfs/procfs/procfs_ctl.c 2001/02/07 03:01:13 @@ -167,13 +167,13 @@ default: PROCTREE_LOCK(PT_SHARED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (!TRACE_WAIT_P(curp, p)) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROCTREE_LOCK(PT_RELEASE); return (EBUSY); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROCTREE_LOCK(PT_RELEASE); } @@ -252,31 +252,31 @@ error = 0; if (p->p_flag & P_TRACED) { PROCTREE_LOCK(PT_SHARED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); while (error == 0 && (p->p_stat != SSTOP) && (p->p_flag & P_TRACED) && (p->p_pptr == curp)) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROCTREE_LOCK(PT_RELEASE); error = tsleep((caddr_t) p, PWAIT|PCATCH, "procfsx", 0); PROCTREE_LOCK(PT_SHARED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } if (error == 0 && !TRACE_WAIT_P(curp, p)) error = EBUSY; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROCTREE_LOCK(PT_RELEASE); } else { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); while (error == 0 && p->p_stat != SSTOP) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); error = tsleep((caddr_t) p, PWAIT|PCATCH, "procfs", 0); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } return (error); @@ -284,10 +284,10 @@ panic("procfs_control"); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat == SSTOP) setrunnable(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); return (0); } @@ -329,17 +329,17 @@ nm = vfs_findname(signames, msg, xlen); if (nm) { PROCTREE_LOCK(PT_SHARED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (TRACE_WAIT_P(curp, p)) { p->p_xstat = nm->nm_val; #ifdef FIX_SSTEP FIX_SSTEP(p); #endif setrunnable(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROCTREE_LOCK(PT_RELEASE); } else { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROCTREE_LOCK(PT_RELEASE); psignal(p, nm->nm_val); } Index: sys/miscfs/procfs/procfs_status.c =================================================================== RCS file: /home/ncvs/src/sys/miscfs/procfs/procfs_status.c,v retrieving revision 1.25 diff -u -r1.25 procfs_status.c --- sys/miscfs/procfs/procfs_status.c 2001/01/24 11:17:35 1.25 +++ sys/miscfs/procfs/procfs_status.c 2001/02/07 03:01:13 @@ -123,12 +123,12 @@ DOCHECK(); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_sflag & PS_INMEM) { struct timeval ut, st; calcru(p, &ut, &st, (struct timeval *) NULL); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); ps += snprintf(ps, psbuf + sizeof(psbuf) - ps, " %ld,%ld %ld,%ld %ld,%ld", p->p_stats->p_start.tv_sec, @@ -136,7 +136,7 @@ ut.tv_sec, ut.tv_usec, st.tv_sec, st.tv_usec); } else { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); ps += snprintf(ps, psbuf + sizeof(psbuf) - ps, " -1,-1 -1,-1 -1,-1"); } Index: sys/msdosfs/msdosfs_denode.c =================================================================== RCS file: /home/ncvs/src/sys/msdosfs/msdosfs_denode.c,v retrieving revision 1.53 diff -u -r1.53 msdosfs_denode.c --- sys/msdosfs/msdosfs_denode.c 2001/01/24 12:35:51 1.53 +++ sys/msdosfs/msdosfs_denode.c 2001/02/07 03:01:13 @@ -130,21 +130,21 @@ struct vnode *vp; loop: - mtx_enter(&dehash_mtx, MTX_DEF); + mtx_lock(&dehash_mtx); for (dep = DEHASH(dev, dirclust, diroff); dep; dep = dep->de_next) { if (dirclust == dep->de_dirclust && diroff == dep->de_diroffset && dev == dep->de_dev && dep->de_refcnt != 0) { vp = DETOV(dep); - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&dehash_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&dehash_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) goto loop; return (dep); } } - mtx_exit(&dehash_mtx, MTX_DEF); + mtx_unlock(&dehash_mtx); return (NULL); } @@ -154,7 +154,7 @@ { struct denode **depp, *deq; - mtx_enter(&dehash_mtx, MTX_DEF); + mtx_lock(&dehash_mtx); depp = &DEHASH(dep->de_dev, dep->de_dirclust, dep->de_diroffset); deq = *depp; if (deq) @@ -162,7 +162,7 @@ dep->de_next = deq; dep->de_prev = depp; *depp = dep; - mtx_exit(&dehash_mtx, MTX_DEF); + mtx_unlock(&dehash_mtx); } static void @@ -171,7 +171,7 @@ { struct denode *deq; - mtx_enter(&dehash_mtx, MTX_DEF); + mtx_lock(&dehash_mtx); deq = dep->de_next; if (deq) deq->de_prev = dep->de_prev; @@ -180,7 +180,7 @@ dep->de_next = NULL; dep->de_prev = NULL; #endif - mtx_exit(&dehash_mtx, MTX_DEF); + mtx_unlock(&dehash_mtx); } /* Index: sys/msdosfs/msdosfs_vfsops.c =================================================================== RCS file: /home/ncvs/src/sys/msdosfs/msdosfs_vfsops.c,v retrieving revision 1.70 diff -u -r1.70 msdosfs_vfsops.c --- sys/msdosfs/msdosfs_vfsops.c 2001/02/04 13:12:54 1.70 +++ sys/msdosfs/msdosfs_vfsops.c 2001/02/07 03:01:13 @@ -862,7 +862,7 @@ /* * Write back each (modified) denode. */ - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); loop: for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { /* @@ -872,20 +872,20 @@ if (vp->v_mount != mp) goto loop; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); nvp = LIST_NEXT(vp, v_mntvnodes); dep = VTODE(vp); if (vp->v_type == VNON || ((dep->de_flag & (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 && (TAILQ_EMPTY(&vp->v_dirtyblkhd) || waitfor == MNT_LAZY))) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); continue; } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); if (error) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (error == ENOENT) goto loop; continue; @@ -895,9 +895,9 @@ allerror = error; VOP_UNLOCK(vp, 0, p); vrele(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); /* * Flush filesystem control info. Index: sys/msdosfs/msdosfs_vnops.c =================================================================== RCS file: /home/ncvs/src/sys/msdosfs/msdosfs_vnops.c,v retrieving revision 1.106 diff -u -r1.106 msdosfs_vnops.c --- sys/msdosfs/msdosfs_vnops.c 2000/10/22 14:24:30 1.106 +++ sys/msdosfs/msdosfs_vnops.c 2001/02/07 03:01:13 @@ -233,12 +233,12 @@ struct denode *dep = VTODE(vp); struct timespec ts; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount > 1) { getnanotime(&ts); DETIMES(dep, &ts, &ts, &ts); } - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return 0; } Index: sys/net/if_var.h =================================================================== RCS file: /home/ncvs/src/sys/net/if_var.h,v retrieving revision 1.31 diff -u -r1.31 if_var.h --- sys/net/if_var.h 2001/02/06 10:11:37 1.31 +++ sys/net/if_var.h 2001/02/07 03:01:13 @@ -191,8 +191,8 @@ * (defined above). Entries are added to and deleted from these structures * by these macros, which should be called with ipl raised to splimp(). */ -#define IF_LOCK(ifq) mtx_enter(&(ifq)->ifq_mtx, MTX_DEF) -#define IF_UNLOCK(ifq) mtx_exit(&(ifq)->ifq_mtx, MTX_DEF) +#define IF_LOCK(ifq) mtx_lock(&(ifq)->ifq_mtx) +#define IF_UNLOCK(ifq) mtx_unlock(&(ifq)->ifq_mtx) #define _IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen) #define _IF_DROP(ifq) ((ifq)->ifq_drops++) #define _IF_QLEN(ifq) ((ifq)->ifq_len) Index: sys/netgraph/ng_base.c =================================================================== RCS file: /home/ncvs/src/sys/netgraph/ng_base.c,v retrieving revision 1.46 diff -u -r1.46 ng_base.c --- sys/netgraph/ng_base.c 2001/02/05 18:57:11 1.46 +++ sys/netgraph/ng_base.c 2001/02/07 03:01:13 @@ -239,23 +239,23 @@ { hook_p hook; SLIST_ENTRY(ng_hook) temp; - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); hook = LIST_FIRST(&ng_freehooks); if (hook) { LIST_REMOVE(hook, hk_hooks); bcopy(&hook->hk_all, &temp, sizeof(temp)); bzero(hook, sizeof(struct ng_hook)); bcopy(&temp, &hook->hk_all, sizeof(temp)); - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); hook->hk_magic = HK_MAGIC; } else { - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); _NG_ALLOC_HOOK(hook); if (hook) { hook->hk_magic = HK_MAGIC; - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); SLIST_INSERT_HEAD(&ng_allhooks, hook, hk_all); - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); } } return (hook); @@ -266,23 +266,23 @@ { node_p node; SLIST_ENTRY(ng_node) temp; - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); node = LIST_FIRST(&ng_freenodes); if (node) { LIST_REMOVE(node, nd_nodes); bcopy(&node->nd_all, &temp, sizeof(temp)); bzero(node, sizeof(struct ng_node)); bcopy(&temp, &node->nd_all, sizeof(temp)); - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); node->nd_magic = ND_MAGIC; } else { - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); _NG_ALLOC_NODE(node); if (node) { node->nd_magic = ND_MAGIC; - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); SLIST_INSERT_HEAD(&ng_allnodes, node, nd_all); - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); } } return (node); @@ -294,18 +294,18 @@ #define NG_FREE_HOOK(hook) \ do { \ - mtx_enter(&ng_nodelist_mtx, MTX_DEF); \ + mtx_lock(&ng_nodelist_mtx); \ LIST_INSERT_HEAD(&ng_freehooks, hook, hk_hooks); \ hook->hk_magic = 0; \ - mtx_exit(&ng_nodelist_mtx, MTX_DEF); \ + mtx_unlock(&ng_nodelist_mtx); \ } while (0) #define NG_FREE_NODE(node) \ do { \ - mtx_enter(&ng_nodelist_mtx, MTX_DEF); \ + mtx_lock(&ng_nodelist_mtx); \ LIST_INSERT_HEAD(&ng_freenodes, node, nd_nodes); \ node->nd_magic = 0; \ - mtx_exit(&ng_nodelist_mtx, MTX_DEF); \ + mtx_unlock(&ng_nodelist_mtx); \ } while (0) #else /* NETGRAPH_DEBUG */ /*----------------------------------------------*/ @@ -625,13 +625,13 @@ LIST_INIT(&node->nd_hooks); /* Link us into the node linked list */ - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); LIST_INSERT_HEAD(&ng_nodelist, node, nd_nodes); - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); /* get an ID and put us in the hash chain */ - mtx_enter(&ng_idhash_mtx, MTX_DEF); + mtx_lock(&ng_idhash_mtx); for (;;) { /* wrap protection, even if silly */ node_p node2 = NULL; node->nd_ID = nextID++; /* 137/second for 1 year before wrap */ @@ -644,7 +644,7 @@ } LIST_INSERT_HEAD(&ng_ID_hash[NG_IDHASH_FN(node->nd_ID)], node, nd_idnodes); - mtx_exit(&ng_idhash_mtx, MTX_DEF); + mtx_unlock(&ng_idhash_mtx); /* Done */ *nodepp = node; @@ -757,14 +757,14 @@ if (v == 1) { /* we were the last */ - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); node->nd_type->refs--; /* XXX maybe should get types lock? */ LIST_REMOVE(node, nd_nodes); - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); - mtx_enter(&ng_idhash_mtx, MTX_DEF); + mtx_lock(&ng_idhash_mtx); LIST_REMOVE(node, nd_idnodes); - mtx_exit(&ng_idhash_mtx, MTX_DEF); + mtx_unlock(&ng_idhash_mtx); mtx_destroy(&node->nd_input_queue.q_mtx); NG_FREE_NODE(node); @@ -778,11 +778,11 @@ ng_ID2noderef(ng_ID_t ID) { node_p node; - mtx_enter(&ng_idhash_mtx, MTX_DEF); + mtx_lock(&ng_idhash_mtx); NG_IDHASH_FIND(ID, node); if(node) NG_NODE_REF(node); - mtx_exit(&ng_idhash_mtx, MTX_DEF); + mtx_unlock(&ng_idhash_mtx); return(node); } @@ -859,7 +859,7 @@ } /* Find node by name */ - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); LIST_FOREACH(node, &ng_nodelist, nd_nodes) { if (NG_NODE_IS_VALID(node) && NG_NODE_HAS_NAME(node) @@ -869,7 +869,7 @@ } if (node) NG_NODE_REF(node); - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); return (node); } @@ -1148,10 +1148,10 @@ /* Link in new type */ - mtx_enter(&ng_typelist_mtx, MTX_DEF); + mtx_lock(&ng_typelist_mtx); LIST_INSERT_HEAD(&ng_typelist, tp, types); tp->refs = 1; /* first ref is linked list */ - mtx_exit(&ng_typelist_mtx, MTX_DEF); + mtx_unlock(&ng_typelist_mtx); return (0); } @@ -1163,12 +1163,12 @@ { struct ng_type *type; - mtx_enter(&ng_typelist_mtx, MTX_DEF); + mtx_lock(&ng_typelist_mtx); LIST_FOREACH(type, &ng_typelist, types) { if (strcmp(type->name, typename) == 0) break; } - mtx_exit(&ng_typelist_mtx, MTX_DEF); + mtx_unlock(&ng_typelist_mtx); return (type); } @@ -1933,7 +1933,7 @@ atomic_subtract_long(&ngq->q_flags, READER_INCREMENT); /* ######### End Hack alert ######### */ - mtx_enter((&ngq->q_mtx), MTX_SPIN); + mtx_lock_spin((&ngq->q_mtx)); /* * Try again. Another processor (or interrupt for that matter) may * have removed the last queued item that was stopping us from @@ -1942,7 +1942,7 @@ */ if ((ngq->q_flags & NGQ_RMASK) == 0) { atomic_add_long(&ngq->q_flags, READER_INCREMENT); - mtx_exit((&ngq->q_mtx), MTX_SPIN); + mtx_unlock_spin((&ngq->q_mtx)); return (item); } @@ -1957,7 +1957,7 @@ * see if we can dequeue something to run instead. */ item = ng_dequeue(ngq); - mtx_exit(&(ngq->q_mtx), MTX_SPIN); + mtx_unlock_spin(&(ngq->q_mtx)); return (item); } @@ -1965,7 +1965,7 @@ ng_acquire_write(struct ng_queue *ngq, item_p item) { restart: - mtx_enter(&(ngq->q_mtx), MTX_SPIN); + mtx_lock_spin(&(ngq->q_mtx)); /* * If there are no readers, no writer, and no pending packets, then * we can just go ahead. In all other situations we need to queue the @@ -1973,7 +1973,7 @@ */ if ((ngq->q_flags & NGQ_WMASK) == 0) { atomic_add_long(&ngq->q_flags, WRITER_ACTIVE); - mtx_exit((&ngq->q_mtx), MTX_SPIN); + mtx_unlock_spin((&ngq->q_mtx)); if (ngq->q_flags & READER_MASK) { /* Collision with fast-track reader */ atomic_subtract_long(&ngq->q_flags, WRITER_ACTIVE); @@ -1993,7 +1993,7 @@ * see if we can dequeue something to run instead. */ item = ng_dequeue(ngq); - mtx_exit(&(ngq->q_mtx), MTX_SPIN); + mtx_unlock_spin(&(ngq->q_mtx)); return (item); } @@ -2014,7 +2014,7 @@ { item_p item; u_int add_arg; - mtx_enter(&ngq->q_mtx, MTX_SPIN); + mtx_lock_spin(&ngq->q_mtx); for (;;) { /* Now take a look at what's on the queue */ if (ngq->q_flags & READ_PENDING) { @@ -2038,16 +2038,16 @@ } atomic_add_long(&ngq->q_flags, add_arg); - mtx_exit(&ngq->q_mtx, MTX_SPIN); + mtx_lock_spin(&ngq->q_mtx); NG_FREE_ITEM(item); - mtx_enter(&ngq->q_mtx, MTX_SPIN); + mtx_unlock_spin(&ngq->q_mtx); } /* * Take us off the work queue if we are there. * We definatly have no work to be done. */ ng_worklist_remove(ngq->q_node); - mtx_exit(&ngq->q_mtx, MTX_SPIN); + mtx_unlock_spin(&ngq->q_mtx); } /*********************************************************************** @@ -2167,7 +2167,7 @@ #ifdef NETGRAPH_DEBUG _ngi_check(item, __FILE__, __LINE__); #endif - mtx_enter(&(ngq->q_mtx), MTX_SPIN); + mtx_lock_spin(&(ngq->q_mtx)); ng_queue_rw(ngq, item, rw); /* * If there are active elements then we can rely on @@ -2180,7 +2180,7 @@ if (CAN_GET_WORK(ngq->q_flags)) { ng_setisr(node); } - mtx_exit(&(ngq->q_mtx), MTX_SPIN); + mtx_unlock_spin(&(ngq->q_mtx)); return (0); } /* @@ -2234,13 +2234,13 @@ * dequeue acquires and adjusts the input_queue as it dequeues * packets. It acquires the rw lock as needed. */ - mtx_enter(&ngq->q_mtx, MTX_SPIN); + mtx_lock_spin(&ngq->q_mtx); item = ng_dequeue(ngq); /* fixes worklist too*/ if (!item) { - mtx_exit(&ngq->q_mtx, MTX_SPIN); + mtx_unlock_spin(&ngq->q_mtx); return (error); } - mtx_exit(&ngq->q_mtx, MTX_SPIN); + mtx_unlock_spin(&ngq->q_mtx); /* * We have the appropriate lock, so run the item. @@ -2559,7 +2559,7 @@ node_p node; int num = 0; - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); /* Count number of nodes */ LIST_FOREACH(node, &ng_nodelist, nd_nodes) { if (NG_NODE_IS_VALID(node) @@ -2567,7 +2567,7 @@ num++; } } - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); /* Get response struct */ NG_MKRESPONSE(resp, msg, sizeof(*nl) @@ -2580,7 +2580,7 @@ /* Cycle through the linked list of nodes */ nl->numnames = 0; - mtx_enter(&ng_nodelist_mtx, MTX_DEF); + mtx_lock(&ng_nodelist_mtx); LIST_FOREACH(node, &ng_nodelist, nd_nodes) { struct nodeinfo *const np = &nl->nodeinfo[nl->numnames]; @@ -2600,7 +2600,7 @@ np->hooks = node->nd_numhooks; nl->numnames++; } - mtx_exit(&ng_nodelist_mtx, MTX_DEF); + mtx_unlock(&ng_nodelist_mtx); break; } @@ -2610,12 +2610,12 @@ struct ng_type *type; int num = 0; - mtx_enter(&ng_typelist_mtx, MTX_DEF); + mtx_lock(&ng_typelist_mtx); /* Count number of types */ LIST_FOREACH(type, &ng_typelist, types) { num++; } - mtx_exit(&ng_typelist_mtx, MTX_DEF); + mtx_unlock(&ng_typelist_mtx); /* Get response struct */ NG_MKRESPONSE(resp, msg, sizeof(*tl) @@ -2628,7 +2628,7 @@ /* Cycle through the linked list of types */ tl->numtypes = 0; - mtx_enter(&ng_typelist_mtx, MTX_DEF); + mtx_lock(&ng_typelist_mtx); LIST_FOREACH(type, &ng_typelist, types) { struct typeinfo *const tp = &tl->typeinfo[tl->numtypes]; @@ -2641,7 +2641,7 @@ tp->numnodes = type->refs - 1; /* don't count list */ tl->numtypes++; } - mtx_exit(&ng_typelist_mtx, MTX_DEF); + mtx_unlock(&ng_typelist_mtx); break; } @@ -2868,10 +2868,10 @@ /* Call type specific code */ if (type->mod_event != NULL) if ((error = (*type->mod_event)(mod, event, data))) { - mtx_enter(&ng_typelist_mtx, MTX_DEF); + mtx_lock(&ng_typelist_mtx); type->refs--; /* undo it */ LIST_REMOVE(type, types); - mtx_exit(&ng_typelist_mtx, MTX_DEF); + mtx_unlock(&ng_typelist_mtx); } splx(s); break; @@ -2893,9 +2893,9 @@ break; } } - mtx_enter(&ng_typelist_mtx, MTX_DEF); + mtx_lock(&ng_typelist_mtx); LIST_REMOVE(type, types); - mtx_exit(&ng_typelist_mtx, MTX_DEF); + mtx_unlock(&ng_typelist_mtx); } splx(s); break; @@ -3238,15 +3238,15 @@ node_p node = NULL; for (;;) { - mtx_enter(&ng_worklist_mtx, MTX_SPIN); + mtx_lock_spin(&ng_worklist_mtx); node = TAILQ_FIRST(&ng_worklist); if (!node) { - mtx_exit(&ng_worklist_mtx, MTX_SPIN); + mtx_unlock_spin(&ng_worklist_mtx); break; } node->nd_flags &= ~NG_WORKQ; TAILQ_REMOVE(&ng_worklist, node, nd_work); - mtx_exit(&ng_worklist_mtx, MTX_SPIN); + mtx_unlock_spin(&ng_worklist_mtx); /* * We have the node. We also take over the reference * that the list had on it. @@ -3261,14 +3261,14 @@ * future. */ for (;;) { - mtx_enter(&node->nd_input_queue.q_mtx, MTX_SPIN); + mtx_lock_spin(&node->nd_input_queue.q_mtx); item = ng_dequeue(&node->nd_input_queue); if (item == NULL) { - mtx_exit(&node->nd_input_queue.q_mtx, MTX_SPIN); + mtx_unlock_spin(&node->nd_input_queue.q_mtx); NG_NODE_UNREF(node); break; /* go look for another node */ } else { - mtx_exit(&node->nd_input_queue.q_mtx, MTX_SPIN); + mtx_unlock_spin(&node->nd_input_queue.q_mtx); ng_apply_item(item); } } @@ -3278,19 +3278,19 @@ static void ng_worklist_remove(node_p node) { - mtx_enter(&ng_worklist_mtx, MTX_SPIN); + mtx_lock_spin(&ng_worklist_mtx); if (node->nd_flags & NG_WORKQ) { TAILQ_REMOVE(&ng_worklist, node, nd_work); NG_NODE_UNREF(node); } node->nd_flags &= ~NG_WORKQ; - mtx_exit(&ng_worklist_mtx, MTX_SPIN); + mtx_unlock_spin(&ng_worklist_mtx); } static void ng_setisr(node_p node) { - mtx_enter(&ng_worklist_mtx, MTX_SPIN); + mtx_lock_spin(&ng_worklist_mtx); if ((node->nd_flags & NG_WORKQ) == 0) { /* * If we are not already on the work queue, @@ -3300,7 +3300,7 @@ TAILQ_INSERT_TAIL(&ng_worklist, node, nd_work); NG_NODE_REF(node); } - mtx_exit(&ng_worklist_mtx, MTX_SPIN); + mtx_unlock_spin(&ng_worklist_mtx); schednetisr(NETISR_NETGRAPH); } Index: sys/nfs/nfs_nqlease.c =================================================================== RCS file: /home/ncvs/src/sys/nfs/nfs_nqlease.c,v retrieving revision 1.55 diff -u -r1.55 nfs_nqlease.c --- sys/nfs/nfs_nqlease.c 2000/12/21 21:44:24 1.55 +++ sys/nfs/nfs_nqlease.c 2001/02/07 03:01:13 @@ -1194,7 +1194,7 @@ * Search the mount list for all nqnfs mounts and do their timer * queues. */ - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nxtmp) { if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, p)) { nxtmp = TAILQ_NEXT(mp, mnt_list); @@ -1208,11 +1208,11 @@ } } } - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); nxtmp = TAILQ_NEXT(mp, mnt_list); vfs_unbusy(mp, p); } - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); } #ifndef NFS_NOSERVER Index: sys/ntfs/ntfs_ihash.c =================================================================== RCS file: /home/ncvs/src/sys/ntfs/ntfs_ihash.c,v retrieving revision 1.14 diff -u -r1.14 ntfs_ihash.c --- sys/ntfs/ntfs_ihash.c 2001/02/04 12:37:47 1.14 +++ sys/ntfs/ntfs_ihash.c 2001/02/07 03:01:13 @@ -93,11 +93,11 @@ { struct ntnode *ip; - mtx_enter(&ntfs_nthash_mtx, MTX_DEF); + mtx_lock(&ntfs_nthash_mtx); LIST_FOREACH(ip, NTNOHASH(dev, inum), i_hash) if (inum == ip->i_number && dev == ip->i_dev) break; - mtx_exit(&ntfs_nthash_mtx, MTX_DEF); + mtx_unlock(&ntfs_nthash_mtx); return (ip); } @@ -111,11 +111,11 @@ { struct nthashhead *ipp; - mtx_enter(&ntfs_nthash_mtx, MTX_DEF); + mtx_lock(&ntfs_nthash_mtx); ipp = NTNOHASH(ip->i_dev, ip->i_number); LIST_INSERT_HEAD(ipp, ip, i_hash); ip->i_flag |= IN_HASHED; - mtx_exit(&ntfs_nthash_mtx, MTX_DEF); + mtx_unlock(&ntfs_nthash_mtx); } /* @@ -125,10 +125,10 @@ ntfs_nthashrem(ip) struct ntnode *ip; { - mtx_enter(&ntfs_nthash_mtx, MTX_DEF); + mtx_lock(&ntfs_nthash_mtx); if (ip->i_flag & IN_HASHED) { ip->i_flag &= ~IN_HASHED; LIST_REMOVE(ip, i_hash); } - mtx_exit(&ntfs_nthash_mtx, MTX_DEF); + mtx_unlock(&ntfs_nthash_mtx); } Index: sys/ntfs/ntfs_subr.c =================================================================== RCS file: /home/ncvs/src/sys/ntfs/ntfs_subr.c,v retrieving revision 1.12 diff -u -r1.12 ntfs_subr.c --- sys/ntfs/ntfs_subr.c 2001/02/04 13:13:14 1.12 +++ sys/ntfs/ntfs_subr.c 2001/02/07 03:01:13 @@ -360,7 +360,7 @@ dprintf(("ntfs_ntget: get ntnode %d: %p, usecount: %d\n", ip->i_number, ip, ip->i_usecount)); - mtx_enter(&ip->i_interlock, MTX_DEF); + mtx_lock(&ip->i_interlock); ip->i_usecount++; LOCKMGR(&ip->i_lock, LK_EXCLUSIVE | LK_INTERLOCK, &ip->i_interlock); @@ -438,7 +438,7 @@ dprintf(("ntfs_ntput: rele ntnode %d: %p, usecount: %d\n", ip->i_number, ip, ip->i_usecount)); - mtx_enter(&ip->i_interlock, MTX_DEF); + mtx_lock(&ip->i_interlock); ip->i_usecount--; #ifdef DIAGNOSTIC @@ -462,7 +462,7 @@ LIST_REMOVE(vap,va_list); ntfs_freentvattr(vap); } - mtx_exit(&ip->i_interlock, MTX_DEF); + mtx_unlock(&ip->i_interlock); mtx_destroy(&ip->i_interlock); lockdestroy(&ip->i_lock); @@ -479,9 +479,9 @@ ntfs_ntref(ip) struct ntnode *ip; { - mtx_enter(&ip->i_interlock, MTX_DEF); + mtx_lock(&ip->i_interlock); ip->i_usecount++; - mtx_exit(&ip->i_interlock, MTX_DEF); + mtx_unlock(&ip->i_interlock); dprintf(("ntfs_ntref: ino %d, usecount: %d\n", ip->i_number, ip->i_usecount)); @@ -498,13 +498,13 @@ dprintf(("ntfs_ntrele: rele ntnode %d: %p, usecount: %d\n", ip->i_number, ip, ip->i_usecount)); - mtx_enter(&ip->i_interlock, MTX_DEF); + mtx_lock(&ip->i_interlock); ip->i_usecount--; if (ip->i_usecount < 0) panic("ntfs_ntrele: ino: %d usecount: %d \n", ip->i_number,ip->i_usecount); - mtx_exit(&ip->i_interlock, MTX_DEF); + mtx_unlock(&ip->i_interlock); } /* Index: sys/ntfs/ntfs_vfsops.c =================================================================== RCS file: /home/ncvs/src/sys/ntfs/ntfs_vfsops.c,v retrieving revision 1.26 diff -u -r1.26 ntfs_vfsops.c --- sys/ntfs/ntfs_vfsops.c 2000/12/08 21:50:51 1.26 +++ sys/ntfs/ntfs_vfsops.c 2001/02/07 03:01:13 @@ -196,9 +196,9 @@ return (error); } - mtx_enter(&mountlist_mtx, MTX_DEF); + mtx_lock(&mountlist_mtx); TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list); - mtx_exit(&mountlist_mtx, MTX_DEF); + mtx_unlock(&mountlist_mtx); (void)ntfs_statfs(mp, &mp->mnt_stat, p); vfs_unbusy(mp); return (0); Index: sys/nwfs/nwfs_node.c =================================================================== RCS file: /home/ncvs/src/sys/nwfs/nwfs_node.c,v retrieving revision 1.14 diff -u -r1.14 nwfs_node.c --- sys/nwfs/nwfs_node.c 2001/02/04 13:13:15 1.14 +++ sys/nwfs/nwfs_node.c 2001/02/07 03:01:13 @@ -149,7 +149,7 @@ rescan: if (nwfs_hashlookup(nmp, fid, &np) == 0) { vp = NWTOV(np); - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); lockmgr(&nwhashlock, LK_RELEASE, NULL, p); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) goto loop; Index: sys/nwfs/nwfs_vnops.c =================================================================== RCS file: /home/ncvs/src/sys/nwfs/nwfs_vnops.c,v retrieving revision 1.13 diff -u -r1.13 nwfs_vnops.c --- sys/nwfs/nwfs_vnops.c 2000/10/29 14:54:49 1.13 +++ sys/nwfs/nwfs_vnops.c 2001/02/07 03:01:13 @@ -256,24 +256,24 @@ if (vp->v_type == VDIR) return 0; /* nothing to do now */ error = 0; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (np->opened == 0) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return 0; } - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); error = nwfs_vinvalbuf(vp, V_SAVE, ap->a_cred, ap->a_p, 1); - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (np->opened == 0) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return 0; } if (--np->opened == 0) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); error = ncp_close_file(NWFSTOCONN(VTONWFS(vp)), &np->n_fh, ap->a_p, ap->a_cred); } else - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); np->n_atime = 0; return (error); } Index: sys/pc98/i386/machdep.c =================================================================== RCS file: /home/ncvs/src/sys/pc98/i386/machdep.c,v retrieving revision 1.203 diff -u -r1.203 machdep.c --- sys/pc98/i386/machdep.c 2001/02/04 07:00:47 1.203 +++ sys/pc98/i386/machdep.c 2001/02/07 03:01:13 @@ -2219,7 +2219,7 @@ * Giant is used early for at least debugger traps and unexpected traps. */ mtx_init(&Giant, "Giant", MTX_DEF | MTX_RECURSE); - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* make ldt memory segments */ /* Index: sys/pc98/pc98/clock.c =================================================================== RCS file: /home/ncvs/src/sys/pc98/pc98/clock.c,v retrieving revision 1.98 diff -u -r1.98 clock.c --- sys/pc98/pc98/clock.c 2001/01/29 11:57:26 1.98 +++ sys/pc98/pc98/clock.c 2001/02/07 03:01:13 @@ -232,7 +232,7 @@ { if (timecounter->tc_get_timecount == i8254_get_timecount) { - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); if (i8254_ticked) i8254_ticked = 0; else { @@ -240,7 +240,7 @@ i8254_lastcount = 0; } clkintr_pending = 0; - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); } timer_func(&frame); switch (timer0_state) { @@ -257,14 +257,14 @@ break; case ACQUIRE_PENDING: - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); i8254_offset = i8254_get_timecount(NULL); i8254_lastcount = 0; timer0_max_count = TIMER_DIV(new_rate); outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); timer_func = new_function; timer0_state = ACQUIRED; break; @@ -272,7 +272,7 @@ case RELEASE_PENDING: if ((timer0_prescaler_count += timer0_max_count) >= hardclock_max_count) { - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); i8254_offset = i8254_get_timecount(NULL); i8254_lastcount = 0; timer0_max_count = hardclock_max_count; @@ -280,7 +280,7 @@ TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); timer0_prescaler_count = 0; timer_func = hardclock; timer0_state = RELEASED; @@ -465,7 +465,7 @@ { int high, low; - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); /* Select timer0 and latch counter value. */ outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); @@ -473,7 +473,7 @@ low = inb(TIMER_CNTR0); high = inb(TIMER_CNTR0); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); return ((high << 8) | low); } @@ -610,10 +610,10 @@ splx(x); return (-1); /* XXX Should be EBUSY, but nobody cares anyway. */ } - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); outb(TIMER_CNTR2, pitch); outb(TIMER_CNTR2, (pitch>>8)); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); if (!beeping) { /* enable counter2 output to speaker */ outb(IO_PPI, inb(IO_PPI) | 3); @@ -861,7 +861,7 @@ { int new_timer0_max_count; - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); timer_freq = freq; new_timer0_max_count = hardclock_max_count = TIMER_DIV(intr_freq); if (new_timer0_max_count != timer0_max_count) { @@ -870,7 +870,7 @@ outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); } - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); } /* @@ -885,11 +885,11 @@ i8254_restore(void) { - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); outb(TIMER_MODE, TIMER_SEL0 | TIMER_RATEGEN | TIMER_16BIT); outb(TIMER_CNTR0, timer0_max_count & 0xff); outb(TIMER_CNTR0, timer0_max_count >> 8); - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); } /* @@ -1540,7 +1540,7 @@ u_int eflags; eflags = read_eflags(); - mtx_enter(&clock_lock, MTX_SPIN); + mtx_lock_spin(&clock_lock); /* Select timer0 and latch counter value. */ outb(TIMER_MODE, TIMER_SEL0 | TIMER_LATCH); @@ -1564,7 +1564,7 @@ } i8254_lastcount = count; count += i8254_offset; - mtx_exit(&clock_lock, MTX_SPIN); + mtx_unlock_spin(&clock_lock); return (count); } Index: sys/pc98/pc98/npx.c =================================================================== RCS file: /home/ncvs/src/sys/pc98/pc98/npx.c,v retrieving revision 1.64 diff -u -r1.64 npx.c --- sys/pc98/pc98/npx.c 2001/01/28 12:31:03 1.64 +++ sys/pc98/pc98/npx.c 2001/02/07 03:01:13 @@ -774,7 +774,7 @@ u_short control; struct intrframe *frame; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); if (PCPU_GET(npxproc) == NULL || !npx_exists) { printf("npxintr: npxproc = %p, curproc = %p, npx_exists = %d\n", PCPU_GET(npxproc), curproc, npx_exists); @@ -837,7 +837,7 @@ */ psignal(curproc, SIGFPE); } - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } /* Index: sys/pc98/pc98/sio.c =================================================================== RCS file: /home/ncvs/src/sys/pc98/pc98/sio.c,v retrieving revision 1.143 diff -u -r1.143 sio.c --- sys/pc98/pc98/sio.c 2001/01/31 10:54:44 1.143 +++ sys/pc98/pc98/sio.c 2001/02/07 03:01:13 @@ -1418,7 +1418,7 @@ * but mask them in the processor as well in case there are some * (misconfigured) shared interrupts. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); /* EXTRA DELAY? */ /* @@ -1528,7 +1528,7 @@ CLR_FLAG(dev, COM_C_IIR_TXRDYBUG); } sio_setreg(com, com_cfcr, CFCR_8BITS); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); bus_release_resource(dev, SYS_RES_IOPORT, rid, port); return (iobase == siocniobase ? 0 : result); } @@ -1586,7 +1586,7 @@ } #endif - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); irqs = irqmap[1] & ~irqmap[0]; if (bus_get_resource(idev, SYS_RES_IRQ, 0, &xirq, NULL) == 0 && @@ -1864,7 +1864,7 @@ } else com->it_in.c_ispeed = com->it_in.c_ospeed = TTYDEF_SPEED; if (siosetwater(com, com->it_in.c_ispeed) != 0) { - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); /* * Leave i/o resources allocated if this is a `cn'-level * console, so that other devices can't snarf them. @@ -1873,7 +1873,7 @@ bus_release_resource(dev, SYS_RES_IOPORT, rid, port); return (ENOMEM); } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); termioschars(&com->it_in); com->it_out = com->it_in; @@ -2274,7 +2274,7 @@ } } - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); #ifdef PC98 if (IS_8251(com->pc98_if_type)) { com_tiocm_bis(com, TIOCM_LE); @@ -2302,7 +2302,7 @@ #ifdef PC98 } #endif - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); /* * Handle initial DCD. Callout devices get a fake initial * DCD (trapdoor DCD). If we are callout, then any sleeping @@ -2625,7 +2625,7 @@ * semantics instead of the save-and-disable semantics * that are used everywhere else. */ - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); incc = com->iptr - buf; if (tp->t_rawq.c_cc + incc > tp->t_ihiwat && (com->state & CS_RTS_IFLOW @@ -2646,7 +2646,7 @@ tp->t_lflag &= ~FLUSHO; comstart(tp); } - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); } while (buf < com->iptr); } else { do { @@ -2655,7 +2655,7 @@ * semantics instead of the save-and-disable semantics * that are used everywhere else. */ - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); line_status = buf[com->ierroff]; recv_data = *buf++; if (line_status @@ -2670,7 +2670,7 @@ recv_data |= TTY_PE; } (*linesw[tp->t_line].l_rint)(recv_data, tp); - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); } while (buf < com->iptr); } com_events -= (com->iptr - com->ibuf); @@ -2712,9 +2712,9 @@ #ifndef COM_MULTIPORT com = (struct com_s *)arg; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); siointr1(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); #else /* COM_MULTIPORT */ bool_t possibly_more_intrs; int unit; @@ -2726,7 +2726,7 @@ * devices, then the edge from one may be lost because another is * on. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); do { possibly_more_intrs = FALSE; for (unit = 0; unit < sio_numunits; ++unit) { @@ -2764,7 +2764,7 @@ /* XXX COM_UNLOCK(); */ } } while (possibly_more_intrs); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); #endif /* COM_MULTIPORT */ } @@ -3361,7 +3361,7 @@ * Discard any events related to never-opened or * going-away devices. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); incc = com->iptr - com->ibuf; com->iptr = com->ibuf; if (com->state & CS_CHECKMSR) { @@ -3369,13 +3369,13 @@ com->state &= ~CS_CHECKMSR; } com_events -= incc; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); continue; } if (com->iptr != com->ibuf) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); sioinput(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } if (com->state & CS_CHECKMSR) { u_char delta_modem_status; @@ -3383,13 +3383,13 @@ #ifdef PC98 if (!IS_8251(com->pc98_if_type)) { #endif - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); delta_modem_status = com->last_modem_status ^ com->prev_modem_status; com->prev_modem_status = com->last_modem_status; com_events -= LOTS_OF_EVENTS; com->state &= ~CS_CHECKMSR; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (delta_modem_status & MSR_DCD) (*linesw[tp->t_line].l_modem) (tp, com->prev_modem_status & MSR_DCD); @@ -3398,10 +3398,10 @@ #endif } if (com->state & CS_ODONE) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); com_events -= LOTS_OF_EVENTS; com->state &= ~CS_ODONE; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (!(com->state & CS_BUSY) && !(com->extra_state & CSE_BUSYCHECK)) { timeout(siobusycheck, com, hz / 100); @@ -3665,7 +3665,7 @@ if (com->state >= (CS_BUSY | CS_TTGO)) siointr1(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); splx(s); comstart(tp); if (com->ibufold != NULL) { @@ -3703,7 +3703,7 @@ ibufsize = 2048; #endif if (ibufsize == com->ibufsize) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); return (0); } @@ -3713,7 +3713,7 @@ */ ibuf = malloc(2 * ibufsize, M_DEVBUF, M_NOWAIT); if (ibuf == NULL) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); return (ENOMEM); } @@ -3731,7 +3731,7 @@ * Read current input buffer, if any. Continue with interrupts * disabled. */ - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->iptr != com->ibuf) sioinput(com); @@ -3766,7 +3766,7 @@ if (com == NULL) return; s = spltty(); - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (tp->t_state & TS_TTSTOP) com->state &= ~CS_TTGO; else @@ -3805,7 +3805,7 @@ outb(com->modem_ctl_port, com->mcr_image |= MCR_RTS); #endif } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (tp->t_state & (TS_TIMEOUT | TS_TTSTOP)) { ttwwakeup(tp); splx(s); @@ -3825,7 +3825,7 @@ #endif com->obufs[0].l_next = NULL; com->obufs[0].l_queued = TRUE; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->state & CS_BUSY) { qp = com->obufq.l_next; while ((next = qp->l_next) != NULL) @@ -3837,7 +3837,7 @@ com->obufq.l_next = &com->obufs[0]; com->state |= CS_BUSY; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } if (tp->t_outq.c_cc != 0 && !com->obufs[1].l_queued) { com->obufs[1].l_tail @@ -3849,7 +3849,7 @@ #endif com->obufs[1].l_next = NULL; com->obufs[1].l_queued = TRUE; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->state & CS_BUSY) { qp = com->obufq.l_next; while ((next = qp->l_next) != NULL) @@ -3861,14 +3861,14 @@ com->obufq.l_next = &com->obufs[1]; com->state |= CS_BUSY; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } tp->t_state |= TS_BUSY; } - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (com->state >= (CS_BUSY | CS_TTGO)) siointr1(com); /* fake interrupt to start output */ - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); ttwwakeup(tp); splx(s); } @@ -3886,7 +3886,7 @@ com = com_addr(DEV_TO_UNIT(tp->t_dev)); if (com == NULL || com->gone) return; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); if (rw & FWRITE) { #ifdef PC98 if (!IS_8251(com->pc98_if_type)) { @@ -3932,7 +3932,7 @@ com_events -= (com->iptr - com->ibuf); com->iptr = com->ibuf; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); comstart(tp); } @@ -3975,7 +3975,7 @@ mcr |= MCR_RTS; if (com->gone) return(0); - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); switch (how) { case DMSET: outb(com->modem_ctl_port, @@ -3988,7 +3988,7 @@ outb(com->modem_ctl_port, com->mcr_image &= ~mcr); break; } - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); return (0); } @@ -4047,9 +4047,9 @@ com = com_addr(unit); if (com != NULL && !com->gone && (com->state >= (CS_BUSY | CS_TTGO) || com->poll)) { - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); siointr1(com); - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); } } @@ -4071,10 +4071,10 @@ u_int delta; u_long total; - mtx_enter(&sio_lock, MTX_SPIN); + mtx_lock_spin(&sio_lock); delta = com->delta_error_counts[errnum]; com->delta_error_counts[errnum] = 0; - mtx_exit(&sio_lock, MTX_SPIN); + mtx_unlock_spin(&sio_lock); if (delta == 0) continue; total = com->error_counts[errnum] += delta; Index: sys/pci/if_dcreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_dcreg.h,v retrieving revision 1.17 diff -u -r1.17 if_dcreg.h --- sys/pci/if_dcreg.h 2000/11/25 08:00:10 1.17 +++ sys/pci/if_dcreg.h 2001/02/07 03:01:14 @@ -702,8 +702,8 @@ }; -#define DC_LOCK(_sc) mtx_enter(&(_sc)->dc_mtx, MTX_DEF) -#define DC_UNLOCK(_sc) mtx_exit(&(_sc)->dc_mtx, MTX_DEF) +#define DC_LOCK(_sc) mtx_lock(&(_sc)->dc_mtx) +#define DC_UNLOCK(_sc) mtx_unlock(&(_sc)->dc_mtx) #define DC_TX_POLL 0x00000001 #define DC_TX_COALESCE 0x00000002 Index: sys/pci/if_fxpvar.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_fxpvar.h,v retrieving revision 1.14 diff -u -r1.14 if_fxpvar.h --- sys/pci/if_fxpvar.h 2001/01/23 23:22:17 1.14 +++ sys/pci/if_fxpvar.h 2001/02/07 03:01:14 @@ -86,5 +86,5 @@ #define sc_if arpcom.ac_if #define FXP_UNIT(_sc) (_sc)->arpcom.ac_if.if_unit -#define FXP_LOCK(_sc) mtx_enter(&(_sc)->sc_mtx, MTX_DEF) -#define FXP_UNLOCK(_sc) mtx_exit(&(_sc)->sc_mtx, MTX_DEF) +#define FXP_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) +#define FXP_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) Index: sys/pci/if_pcnreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_pcnreg.h,v retrieving revision 1.6 diff -u -r1.6 if_pcnreg.h --- sys/pci/if_pcnreg.h 2000/11/23 00:28:43 1.6 +++ sys/pci/if_pcnreg.h 2001/02/07 03:01:14 @@ -451,8 +451,8 @@ struct mtx pcn_mtx; }; -#define PCN_LOCK(_sc) mtx_enter(&(_sc)->pcn_mtx, MTX_DEF) -#define PCN_UNLOCK(_sc) mtx_exit(&(_sc)->pcn_mtx, MTX_DEF) +#define PCN_LOCK(_sc) mtx_lock(&(_sc)->pcn_mtx) +#define PCN_UNLOCK(_sc) mtx_unlock(&(_sc)->pcn_mtx) /* * register space access macros Index: sys/pci/if_rlreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_rlreg.h,v retrieving revision 1.17 diff -u -r1.17 if_rlreg.h --- sys/pci/if_rlreg.h 2000/10/30 07:54:38 1.17 +++ sys/pci/if_rlreg.h 2001/02/07 03:01:14 @@ -373,8 +373,8 @@ struct mtx rl_mtx; }; -#define RL_LOCK(_sc) mtx_enter(&(_sc)->rl_mtx, MTX_DEF) -#define RL_UNLOCK(_sc) mtx_exit(&(_sc)->rl_mtx, MTX_DEF) +#define RL_LOCK(_sc) mtx_lock(&(_sc)->rl_mtx) +#define RL_UNLOCK(_sc) mtx_unlock(&(_sc)->rl_mtx) /* * register space access macros Index: sys/pci/if_sfreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_sfreg.h,v retrieving revision 1.7 diff -u -r1.7 if_sfreg.h --- sys/pci/if_sfreg.h 2000/10/13 17:54:18 1.7 +++ sys/pci/if_sfreg.h 2001/02/07 03:01:14 @@ -1048,8 +1048,8 @@ }; -#define SF_LOCK(_sc) mtx_enter(&(_sc)->sf_mtx, MTX_DEF) -#define SF_UNLOCK(_sc) mtx_exit(&(_sc)->sf_mtx, MTX_DEF) +#define SF_LOCK(_sc) mtx_lock(&(_sc)->sf_mtx) +#define SF_UNLOCK(_sc) mtx_unlock(&(_sc)->sf_mtx) #define SF_TIMEOUT 1000 Index: sys/pci/if_sisreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_sisreg.h,v retrieving revision 1.4 diff -u -r1.4 if_sisreg.h --- sys/pci/if_sisreg.h 2000/10/13 17:54:18 1.4 +++ sys/pci/if_sisreg.h 2001/02/07 03:01:14 @@ -392,8 +392,8 @@ struct mtx sis_mtx; }; -#define SIS_LOCK(_sc) mtx_enter(&(_sc)->sis_mtx, MTX_DEF) -#define SIS_UNLOCK(_sc) mtx_exit(&(_sc)->sis_mtx, MTX_DEF) +#define SIS_LOCK(_sc) mtx_lock(&(_sc)->sis_mtx) +#define SIS_UNLOCK(_sc) mtx_unlock(&(_sc)->sis_mtx) /* * register space access macros Index: sys/pci/if_skreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_skreg.h,v retrieving revision 1.14 diff -u -r1.14 if_skreg.h --- sys/pci/if_skreg.h 2000/10/13 17:54:18 1.14 +++ sys/pci/if_skreg.h 2001/02/07 03:01:14 @@ -1182,10 +1182,10 @@ struct mtx sk_mtx; }; -#define SK_LOCK(_sc) mtx_enter(&(_sc)->sk_mtx, MTX_DEF) -#define SK_UNLOCK(_sc) mtx_exit(&(_sc)->sk_mtx, MTX_DEF) -#define SK_IF_LOCK(_sc) mtx_enter(&(_sc)->sk_softc->sk_mtx, MTX_DEF) -#define SK_IF_UNLOCK(_sc) mtx_exit(&(_sc)->sk_softc->sk_mtx, MTX_DEF) +#define SK_LOCK(_sc) mtx_lock(&(_sc)->sk_mtx) +#define SK_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_mtx) +#define SK_IF_LOCK(_sc) mtx_lock(&(_sc)->sk_softc->sk_mtx) +#define SK_IF_UNLOCK(_sc) mtx_unlock(&(_sc)->sk_softc->sk_mtx) /* Softc for each logical interface */ struct sk_if_softc { Index: sys/pci/if_stereg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_stereg.h,v retrieving revision 1.6 diff -u -r1.6 if_stereg.h --- sys/pci/if_stereg.h 2000/10/13 18:35:49 1.6 +++ sys/pci/if_stereg.h 2001/02/07 03:01:14 @@ -517,8 +517,8 @@ struct mtx ste_mtx; }; -#define STE_LOCK(_sc) mtx_enter(&(_sc)->ste_mtx, MTX_DEF) -#define STE_UNLOCK(_sc) mtx_exit(&(_sc)->ste_mtx, MTX_DEF) +#define STE_LOCK(_sc) mtx_lock(&(_sc)->ste_mtx) +#define STE_UNLOCK(_sc) mtx_unlock(&(_sc)->ste_mtx) struct ste_mii_frame { u_int8_t mii_stdelim; Index: sys/pci/if_tireg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_tireg.h,v retrieving revision 1.22 diff -u -r1.22 if_tireg.h --- sys/pci/if_tireg.h 2000/10/21 00:13:35 1.22 +++ sys/pci/if_tireg.h 2001/02/07 03:01:14 @@ -1147,8 +1147,8 @@ struct mtx ti_mtx; }; -#define TI_LOCK(_sc) mtx_enter(&(_sc)->ti_mtx, MTX_DEF) -#define TI_UNLOCK(_sc) mtx_exit(&(_sc)->ti_mtx, MTX_DEF) +#define TI_LOCK(_sc) mtx_lock(&(_sc)->ti_mtx) +#define TI_UNLOCK(_sc) mtx_unlock(&(_sc)->ti_mtx) /* * Microchip Technology 24Cxx EEPROM control bytes Index: sys/pci/if_tlreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_tlreg.h,v retrieving revision 1.16 diff -u -r1.16 if_tlreg.h --- sys/pci/if_tlreg.h 2000/10/13 17:54:19 1.16 +++ sys/pci/if_tlreg.h 2001/02/07 03:01:14 @@ -129,8 +129,8 @@ struct mtx tl_mtx; }; -#define TL_LOCK(_sc) mtx_enter(&(_sc)->tl_mtx, MTX_DEF) -#define TL_UNLOCK(_sc) mtx_exit(&(_sc)->tl_mtx, MTX_DEF) +#define TL_LOCK(_sc) mtx_lock(&(_sc)->tl_mtx) +#define TL_UNLOCK(_sc) mtx_unlock(&(_sc)->tl_mtx) /* * Transmit interrupt threshold. Index: sys/pci/if_vrreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_vrreg.h,v retrieving revision 1.9 diff -u -r1.9 if_vrreg.h --- sys/pci/if_vrreg.h 2000/10/13 17:54:19 1.9 +++ sys/pci/if_vrreg.h 2001/02/07 03:01:14 @@ -414,8 +414,8 @@ struct mtx vr_mtx; }; -#define VR_LOCK(_sc) mtx_enter(&(_sc)->vr_mtx, MTX_DEF) -#define VR_UNLOCK(_sc) mtx_exit(&(_sc)->vr_mtx, MTX_DEF) +#define VR_LOCK(_sc) mtx_lock(&(_sc)->vr_mtx) +#define VR_UNLOCK(_sc) mtx_unlock(&(_sc)->vr_mtx) /* * register space access macros Index: sys/pci/if_wbreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_wbreg.h,v retrieving revision 1.8 diff -u -r1.8 if_wbreg.h --- sys/pci/if_wbreg.h 2000/10/13 17:54:19 1.8 +++ sys/pci/if_wbreg.h 2001/02/07 03:01:14 @@ -381,8 +381,8 @@ struct mtx wb_mtx; }; -#define WB_LOCK(_sc) mtx_enter(&(_sc)->wb_mtx, MTX_DEF) -#define WB_UNLOCK(_sc) mtx_exit(&(_sc)->wb_mtx, MTX_DEF) +#define WB_LOCK(_sc) mtx_lock(&(_sc)->wb_mtx) +#define WB_UNLOCK(_sc) mtx_unlock(&(_sc)->wb_mtx) /* * register space access macros Index: sys/pci/if_wxvar.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_wxvar.h,v retrieving revision 1.8 diff -u -r1.8 if_wxvar.h --- sys/pci/if_wxvar.h 2000/12/06 00:52:28 1.8 +++ sys/pci/if_wxvar.h 2001/02/07 03:01:14 @@ -214,10 +214,10 @@ #define UNTIMEOUT(f, arg, sc) untimeout(f, arg, (sc)->w.sch) #define INLINE __inline #ifdef SMPNG -#define WX_LOCK(_sc) mtx_enter(&(_sc)->wx_mtx, MTX_DEF) -#define WX_UNLOCK(_sc) mtx_exit(&(_sc)->wx_mtx, MTX_DEF) -#define WX_ILOCK(_sc) mtx_enter(&(_sc)->wx_mtx, MTX_DEF) -#define WX_IUNLK(_sc) mtx_exit(&(_sc)->wx_mtx, MTX_DEF) +#define WX_LOCK(_sc) mtx_lock(&(_sc)->wx_mtx) +#define WX_UNLOCK(_sc) mtx_unlock(&(_sc)->wx_mtx) +#define WX_ILOCK(_sc) mtx_lock(&(_sc)->wx_mtx) +#define WX_IUNLK(_sc) mtx_unlock(&(_sc)->wx_mtx) #else #define WX_LOCK(_sc) _sc->w.spl = splimp() #define WX_UNLOCK(_sc) splx(_sc->w.spl) Index: sys/pci/if_xlreg.h =================================================================== RCS file: /home/ncvs/src/sys/pci/if_xlreg.h,v retrieving revision 1.30 diff -u -r1.30 if_xlreg.h --- sys/pci/if_xlreg.h 2000/12/01 19:41:14 1.30 +++ sys/pci/if_xlreg.h 2001/02/07 03:01:14 @@ -588,8 +588,8 @@ struct mtx xl_mtx; }; -#define XL_LOCK(_sc) mtx_enter(&(_sc)->xl_mtx, MTX_DEF) -#define XL_UNLOCK(_sc) mtx_exit(&(_sc)->xl_mtx, MTX_DEF) +#define XL_LOCK(_sc) mtx_lock(&(_sc)->xl_mtx) +#define XL_UNLOCK(_sc) mtx_unlock(&(_sc)->xl_mtx) #define xl_rx_goodframes(x) \ ((x.xl_upper_frames_ok & 0x03) << 8) | x.xl_rx_frames_ok Index: sys/powerpc/powerpc/mp_machdep.c =================================================================== RCS file: /home/ncvs/src/sys/powerpc/powerpc/mp_machdep.c,v retrieving revision 1.2 diff -u -r1.2 mp_machdep.c --- sys/powerpc/powerpc/mp_machdep.c 2001/01/24 12:35:52 1.2 +++ sys/powerpc/powerpc/mp_machdep.c 2001/02/07 03:01:14 @@ -150,7 +150,7 @@ smp_init_secondary(void) { - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); printf("smp_init_secondary: called\n"); CTR0(KTR_SMP, "smp_init_secondary"); @@ -163,7 +163,7 @@ mp_ncpus = PCPU_GET(cpuno) + 1; spl0(); - mtx_exit(&Giant, MTX_DEF); + mtx_unlock(&Giant); } extern void smp_init_secondary_glue(void); @@ -379,7 +379,7 @@ { /* obtain rendezvous lock */ - mtx_enter(&smp_rv_mtx, MTX_SPIN); + mtx_lock_spin(&smp_rv_mtx); /* set static function pointers */ smp_rv_setup_func = setup_func; @@ -393,7 +393,7 @@ smp_rendezvous_action(); /* release lock */ - mtx_exit(&smp_rv_mtx, MTX_SPIN); + mtx_unlock_spin(&smp_rv_mtx); } static u_int64_t Index: sys/sys/buf.h =================================================================== RCS file: /home/ncvs/src/sys/sys/buf.h,v retrieving revision 1.114 diff -u -r1.114 buf.h --- sys/sys/buf.h 2001/01/10 04:43:51 1.114 +++ sys/sys/buf.h 2001/02/07 03:01:14 @@ -252,7 +252,7 @@ int s, ret; s = splbio(); - mtx_enter(&buftimelock, MTX_DEF); + mtx_lock(&buftimelock); locktype |= LK_INTERLOCK; bp->b_lock.lk_wmesg = buf_wmesg; bp->b_lock.lk_prio = PRIBIO + 4; @@ -271,7 +271,7 @@ int s, ret; s = splbio(); - mtx_enter(&buftimelock, MTX_DEF); + mtx_lock(&buftimelock); locktype |= LK_INTERLOCK; bp->b_lock.lk_wmesg = wmesg; bp->b_lock.lk_prio = (PRIBIO + 4) | catch; Index: sys/sys/mbuf.h =================================================================== RCS file: /home/ncvs/src/sys/sys/mbuf.h,v retrieving revision 1.67 diff -u -r1.67 mbuf.h --- sys/sys/mbuf.h 2001/01/20 21:29:10 1.67 +++ sys/sys/mbuf.h 2001/02/07 03:01:14 @@ -300,7 +300,7 @@ #define _MEXT_ALLOC_CNT(m_cnt, how) do { \ union mext_refcnt *__mcnt; \ \ - mtx_enter(&mcntfree.m_mtx, MTX_DEF); \ + mtx_lock(&mcntfree.m_mtx); \ if (mcntfree.m_head == NULL) \ m_alloc_ref(1, (how)); \ __mcnt = mcntfree.m_head; \ @@ -309,18 +309,18 @@ mbstat.m_refree--; \ __mcnt->refcnt = 0; \ } \ - mtx_exit(&mcntfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mcntfree.m_mtx); \ (m_cnt) = __mcnt; \ } while (0) #define _MEXT_DEALLOC_CNT(m_cnt) do { \ union mext_refcnt *__mcnt = (m_cnt); \ \ - mtx_enter(&mcntfree.m_mtx, MTX_DEF); \ + mtx_lock(&mcntfree.m_mtx); \ __mcnt->next_ref = mcntfree.m_head; \ mcntfree.m_head = __mcnt; \ mbstat.m_refree++; \ - mtx_exit(&mcntfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mcntfree.m_mtx); \ } while (0) #define MEXT_INIT_REF(m, how) do { \ @@ -371,14 +371,14 @@ int _mhow = (how); \ int _mtype = (type); \ \ - mtx_enter(&mmbfree.m_mtx, MTX_DEF); \ + mtx_lock(&mmbfree.m_mtx); \ _MGET(_mm, _mhow); \ if (_mm != NULL) { \ mbtypes[_mtype]++; \ - mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mmbfree.m_mtx); \ _MGET_SETUP(_mm, _mtype); \ } else \ - mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mmbfree.m_mtx); \ (m) = _mm; \ } while (0) @@ -398,14 +398,14 @@ int _mhow = (how); \ int _mtype = (type); \ \ - mtx_enter(&mmbfree.m_mtx, MTX_DEF); \ + mtx_lock(&mmbfree.m_mtx); \ _MGET(_mm, _mhow); \ if (_mm != NULL) { \ mbtypes[_mtype]++; \ - mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mmbfree.m_mtx); \ _MGETHDR_SETUP(_mm, _mtype); \ } else \ - mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mmbfree.m_mtx); \ (m) = _mm; \ } while (0) @@ -437,9 +437,9 @@ #define MCLGET(m, how) do { \ struct mbuf *_mm = (m); \ \ - mtx_enter(&mclfree.m_mtx, MTX_DEF); \ + mtx_lock(&mclfree.m_mtx); \ _MCLALLOC(_mm->m_ext.ext_buf, (how)); \ - mtx_exit(&mclfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mclfree.m_mtx); \ if (_mm->m_ext.ext_buf != NULL) { \ MEXT_INIT_REF(_mm, (how)); \ if (_mm->m_ext.ref_cnt == NULL) { \ @@ -474,12 +474,12 @@ #define _MCLFREE(p) do { \ union mcluster *_mp = (union mcluster *)(p); \ \ - mtx_enter(&mclfree.m_mtx, MTX_DEF); \ + mtx_lock(&mclfree.m_mtx); \ _mp->mcl_next = mclfree.m_head; \ mclfree.m_head = _mp; \ mbstat.m_clfree++; \ MBWAKEUP(m_clalloc_wid); \ - mtx_exit(&mclfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mclfree.m_mtx); \ } while (0) /* MEXTFREE: @@ -514,7 +514,7 @@ KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \ if (_mm->m_flags & M_EXT) \ MEXTFREE(_mm); \ - mtx_enter(&mmbfree.m_mtx, MTX_DEF); \ + mtx_lock(&mmbfree.m_mtx); \ mbtypes[_mm->m_type]--; \ _mm->m_type = MT_FREE; \ mbtypes[MT_FREE]++; \ @@ -522,7 +522,7 @@ _mm->m_next = mmbfree.m_head; \ mmbfree.m_head = _mm; \ MBWAKEUP(m_mballoc_wid); \ - mtx_exit(&mmbfree.m_mtx, MTX_DEF); \ + mtx_unlock(&mmbfree.m_mtx); \ } while (0) /* Index: sys/sys/mutex.h =================================================================== RCS file: /home/ncvs/src/sys/sys/mutex.h,v retrieving revision 1.22 diff -u -r1.22 mutex.h --- sys/sys/mutex.h 2001/01/24 10:57:01 1.22 +++ sys/sys/mutex.h 2001/02/07 03:01:14 @@ -48,31 +48,34 @@ #ifdef _KERNEL /* - * Mutex flags + * Mutex types and options stored in mutex->mtx_flags + */ +#define MTX_DEF 0x00000000 /* DEFAULT (sleep) lock */ +#define MTX_SPIN 0x00000001 /* Spin lock (disables interrupts) */ +#define MTX_RECURSE 0x00000002 /* Option: lock allowed to recurse */ + +/* + * Option flags passed to certain lock/unlock routines, through the use + * of corresponding mtx_{lock,unlock}_flags() interface macros. * - * Types + * XXX: The only reason we make these bits not interfere with the above "types + * and options" bits is because we have to pass both to the witness + * routines right now; if/when we clean up the witness interface to + * not check for mutex type from the passed in flag, but rather from + * the mutex lock's mtx_flags field, then we can change these values to + * 0x1, 0x2, ... */ -#define MTX_DEF 0x0 /* Default (spin/sleep) */ -#define MTX_SPIN 0x1 /* Spin only lock */ +#define MTX_NOSWITCH 0x00000004 /* Do not switch on release */ +#define MTX_QUIET 0x00000008 /* Don't log a mutex event */ -/* Options */ -#define MTX_RECURSE 0x2 /* Recursive lock (for mtx_init) */ -#define MTX_RLIKELY 0x4 /* Recursion likely */ -#define MTX_NORECURSE 0x8 /* No recursion possible */ -#define MTX_NOSPIN 0x10 /* Don't spin before sleeping */ -#define MTX_NOSWITCH 0x20 /* Do not switch on release */ -#define MTX_FIRST 0x40 /* First spin lock holder */ -#define MTX_TOPHALF 0x80 /* Interrupts not disabled on spin */ -#define MTX_QUIET 0x100 /* Don't log a mutex event */ - -/* options that should be passed on to mtx_enter_hard, mtx_exit_hard */ -#define MTX_HARDOPTS (MTX_SPIN | MTX_FIRST | MTX_TOPHALF | MTX_NOSWITCH) - -/* Flags/value used in mtx_lock */ -#define MTX_RECURSED 0x01 /* (non-spin) lock held recursively */ -#define MTX_CONTESTED 0x02 /* (non-spin) lock contested */ +/* + * State bits kept in mutex->mtx_lock, for the DEFAULT lock type. None of this, + * with the exception of MTX_UNOWNED, applies to spin locks. + */ +#define MTX_RECURSED 0x00000001 /* lock recursed (for MTX_DEF only) */ +#define MTX_CONTESTED 0x00000002 /* lock contested (for MTX_DEF only) */ +#define MTX_UNOWNED 0x00000004 /* Cookie for free mutex */ #define MTX_FLAGMASK ~(MTX_RECURSED | MTX_CONTESTED) -#define MTX_UNOWNED 0x8 /* Cookie for free mutex */ #endif /* _KERNEL */ @@ -84,62 +87,243 @@ * Sleep/spin mutex */ struct mtx { - volatile uintptr_t mtx_lock; /* lock owner/gate/flags */ + volatile uintptr_t mtx_lock; /* owner (and state for sleep locks) */ volatile u_int mtx_recurse; /* number of recursive holds */ u_int mtx_saveintr; /* saved flags (for spin locks) */ int mtx_flags; /* flags passed to mtx_init() */ const char *mtx_description; - TAILQ_HEAD(, proc) mtx_blocked; - LIST_ENTRY(mtx) mtx_contested; - struct mtx *mtx_next; /* all locks in system */ - struct mtx *mtx_prev; - struct mtx_debug *mtx_debug; + TAILQ_HEAD(, proc) mtx_blocked; /* threads blocked on this lock */ + LIST_ENTRY(mtx) mtx_contested; /* list of all contested locks */ + struct mtx *mtx_next; /* all existing locks */ + struct mtx *mtx_prev; /* in system... */ + struct mtx_debug *mtx_debug; /* debugging information... */ }; +/* + * XXX: Friendly reminder to fix things in MP code that is presently being + * XXX: worked on. + */ #define mp_fixme(string) #ifdef _KERNEL -/* Prototypes */ -void mtx_init(struct mtx *m, const char *description, int flag); + +/* + * Prototypes + * + * NOTE: Functions prepended with `_' (underscore) are exported to other parts + * of the kernel via macros, thus allowing us to use the cpp __FILE__ + * and __LINE__. These functions should not be called directly by any + * code using the IPI. Their macros cover their functionality. + * + * [See below for descriptions] + * + */ +void mtx_init(struct mtx *m, const char *description, int opts); void mtx_destroy(struct mtx *m); +void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line); +void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); +void _mtx_lock_spin(struct mtx *m, int opts, u_int mtx_intr, + const char *file, int line); +void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); +int _mtx_trylock(struct mtx *m, int opts, const char *file, int line); /* - * Wrap the following functions with cpp macros so that filenames and line - * numbers are embedded in the code correctly. + * We define our machine-independent (unoptimized) mutex micro-operations + * here, if they are not already defined in the machine-dependent mutex.h */ -void _mtx_enter(struct mtx *mtxp, int type, const char *file, int line); -int _mtx_try_enter(struct mtx *mtxp, int type, const char *file, int line); -void _mtx_exit(struct mtx *mtxp, int type, const char *file, int line); -#define mtx_enter(mtxp, type) \ - _mtx_enter((mtxp), (type), __FILE__, __LINE__) +/* Actually obtain mtx_lock */ +#ifndef _obtain_lock +#define _obtain_lock(mp, tid) \ + atomic_cmpset_acq_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED, (tid)) +#endif + +/* Actually release mtx_lock */ +#ifndef _release_lock +#define _release_lock(mp, tid) \ + atomic_cmpset_rel_ptr(&(mp)->mtx_lock, (tid), (void *)MTX_UNOWNED) +#endif + +/* Actually release mtx_lock quickly, assuming we own it. */ +#ifndef _release_lock_quick +#define _release_lock_quick(mp) \ + atomic_store_rel_ptr(&(mp)->mtx_lock, (void *)MTX_UNOWNED) +#endif -#define mtx_try_enter(mtxp, type) \ - _mtx_try_enter((mtxp), (type), __FILE__, __LINE__) +/* + * Obtain a sleep lock inline, or call the "hard" function if we can't get it + * easy. + */ +#ifndef _get_sleep_lock +#define _get_sleep_lock(mp, tid, opts) do { \ + if (!_obtain_lock((mp), (tid))) \ + _mtx_lock_sleep((mp), (opts), __FILE__, __LINE__); \ +} while (0) +#endif -#define mtx_exit(mtxp, type) \ - _mtx_exit((mtxp), (type), __FILE__, __LINE__) +/* + * Obtain a spin lock inline, or call the "hard" function if we can't get it + * easy. For spinlocks, we handle recursion inline (it turns out that function + * calls can be significantly expensive on some architectures). + * Since spin locks are not _too_ common, inlining this code is not too big + * a deal. + */ +#ifndef _get_spin_lock +#define _get_spin_lock(mp, tid, opts) do { \ + u_int _mtx_intr = save_intr(); \ + disable_intr(); \ + if (!_obtain_lock((mp), (tid))) { \ + if ((mp)->mtx_lock == (uintptr_t)(tid)) \ + (mp)->mtx_recurse++; \ + else \ + _mtx_lock_spin((mp), (opts), _mtx_intr, \ + __FILE__, __LINE__); \ + } else \ + (mp)->mtx_saveintr = _mtx_intr; \ +} while (0) +#endif -/* Global locks */ -extern struct mtx sched_lock; -extern struct mtx Giant; +/* + * Release a sleep lock inline, or call the "hard" function if we can't do it + * easy. + */ +#ifndef _rel_sleep_lock +#define _rel_sleep_lock(mp, tid, opts) do { \ + if (!_release_lock((mp), (tid))) \ + _mtx_unlock_sleep((mp), (opts), __FILE__, __LINE__); \ +} while (0) +#endif /* - * Used to replace return with an exit Giant and return. + * For spinlocks, we can handle everything inline, as it's pretty simple and + * a function call would be too expensive (at least on some architectures). + * Since spin locks are not _too_ common, inlining this code is not too big + * a deal. */ +#ifndef _rel_spin_lock +#define _rel_spin_lock(mp) do { \ + u_int _mtx_intr = (mp)->mtx_saveintr; \ + if (mtx_recursed((mp))) \ + (mp)->mtx_recurse--; \ + else { \ + _release_lock_quick((mp)); \ + restore_intr(_mtx_intr); \ + } \ +} while (0) +#endif -#define EGAR(a) \ -do { \ - mtx_exit(&Giant, MTX_DEF); \ - return (a); \ +/* + * Exported lock manipulation interface. + * + * mtx_lock(m) locks MTX_DEF mutex `m' + * + * mtx_lock_spin(m) locks MTX_SPIN mutex `m' + * + * mtx_unlock(m) unlocks MTX_DEF mutex `m' + * + * mtx_unlock_spin(m) unlocks MTX_SPIN mutex `m' + * + * mtx_lock_spin_flags(m, opts) and mtx_lock_flags(m, opts) locks mutex `m' + * and passes option flags `opts' to the "hard" function, if required. + * With these routines, it is possible to pass flags such as MTX_QUIET + * and/or MTX_NOSWITCH to the appropriate lock manipulation routines. + * + * mtx_trylock(m) attempts to acquire MTX_DEF mutex `m' but doesn't sleep if + * it cannot. Rather, it returns 0 on failure and non-zero on success. + * It does NOT handle recursion as we assume that if a caller is properly + * using this part of the interface, he will know that the lock in question + * is _not_ recursed. + * + * mtx_trylock_flags(m, opts) is used the same way as mtx_trylock() but accepts + * relevant option flags `opts.' + * + * mtx_owned(m) returns non-zero if the current thread owns the lock `m' + * + * mtx_recursed(m) returns non-zero if the lock `m' is presently recursed. + */ +#define mtx_lock(m) do { \ + MPASS(CURPROC != NULL); \ + _get_sleep_lock((m), CURTHD, 0); \ + WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \ } while (0) -#define VEGAR \ -do { \ - mtx_exit(&Giant, MTX_DEF); \ - return; \ +#define mtx_lock_spin(m) do { \ + MPASS(CURPROC != NULL); \ + _get_spin_lock((m), CURTHD, 0); \ + WITNESS_ENTER((m), (m)->mtx_flags, __FILE__, __LINE__); \ +} while (0) + +#define mtx_unlock(m) do { \ + MPASS(CURPROC != NULL); \ + WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \ + _rel_sleep_lock((m), CURTHD, 0); \ +} while (0) + +#define mtx_unlock_spin(m) do { \ + MPASS(CURPROC != NULL); \ + WITNESS_EXIT((m), (m)->mtx_flags, __FILE__, __LINE__); \ + _rel_spin_lock((m)); \ +} while (0) + +#define mtx_lock_flags(m, opts) do { \ + MPASS(CURPROC != NULL); \ + _get_sleep_lock((m), CURTHD, (opts)); \ + WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \ + __LINE__); \ } while (0) +#define mtx_lock_spin_flags(m, opts) do { \ + MPASS(CURPROC != NULL); \ + _get_spin_lock((m), CURTHD, (opts)); \ + WITNESS_ENTER((m), ((m)->mtx_flags | (opts)), __FILE__, \ + __LINE__); \ +} while (0) + +#define mtx_unlock_flags(m, opts) do { \ + MPASS(CURPROC != NULL); \ + WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \ + __LINE__); \ + _rel_sleep_lock((m), CURTHD, (opts)); \ +} while (0) + +/* + * The MTX_SPIN unlock case is all inlined, so we handle the MTX_QUIET + * flag right in the macro. Not a problem as if we don't have KTR_LOCK, this + * check will be optimized out. + */ +#define mtx_unlock_spin_flags(m, opts) do { \ + MPASS(CURPROC != NULL); \ + WITNESS_EXIT((m), ((m)->mtx_flags | (opts)), __FILE__, \ + __LINE__); \ + if (((opts) & MTX_QUIET) == 0) \ + CTR5(KTR_LOCK, "REL %s [%p] r=%d at %s:%d", \ + (m)->mtx_description, (m), (m)->mtx_recurse, \ + __FILE__, __LINE__); \ + _rel_spin_lock((m)); \ +} while (0) + +#define mtx_trylock(m) \ + _mtx_trylock((m), 0, __FILE__, __LINE__) + +#define mtx_trylock_flags(m, opts) \ + _mtx_trylock((m), (opts), __FILE__, __LINE__) + +#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD) + +#define mtx_recursed(m) ((m)->mtx_recurse != 0) + +/* + * Global locks. + */ +extern struct mtx sched_lock; +extern struct mtx Giant; + +/* + * Giant lock manipulation and clean exit macros. + * Used to replace return with an exit Giant and return. + * + * Note that DROP_GIANT*() needs to be paired with PICKUP_GIANT() + */ #define DROP_GIANT_NOSWITCH() \ do { \ int _giantcnt; \ @@ -148,7 +332,7 @@ if (mtx_owned(&Giant)) \ WITNESS_SAVE(&Giant, Giant); \ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ - mtx_exit(&Giant, MTX_DEF | MTX_NOSWITCH) + mtx_unlock_flags(&Giant, MTX_NOSWITCH) #define DROP_GIANT() \ do { \ @@ -158,12 +342,12 @@ if (mtx_owned(&Giant)) \ WITNESS_SAVE(&Giant, Giant); \ for (_giantcnt = 0; mtx_owned(&Giant); _giantcnt++) \ - mtx_exit(&Giant, MTX_DEF) + mtx_unlock(&Giant) #define PICKUP_GIANT() \ mtx_assert(&Giant, MA_NOTOWNED); \ while (_giantcnt--) \ - mtx_enter(&Giant, MTX_DEF); \ + mtx_lock(&Giant); \ if (mtx_owned(&Giant)) \ WITNESS_RESTORE(&Giant, Giant); \ } while (0) @@ -171,37 +355,49 @@ #define PARTIAL_PICKUP_GIANT() \ mtx_assert(&Giant, MA_NOTOWNED); \ while (_giantcnt--) \ - mtx_enter(&Giant, MTX_DEF); \ + mtx_lock(&Giant); \ if (mtx_owned(&Giant)) \ WITNESS_RESTORE(&Giant, Giant) /* - * Debugging + * The INVARIANTS-enabled mtx_assert() functionality. */ #ifdef INVARIANTS -#define MA_OWNED 1 -#define MA_NOTOWNED 2 -#define MA_RECURSED 4 -#define MA_NOTRECURSED 8 +#define MA_OWNED 0x01 +#define MA_NOTOWNED 0x02 +#define MA_RECURSED 0x04 +#define MA_NOTRECURSED 0x08 + void _mtx_assert(struct mtx *m, int what, const char *file, int line); -#define mtx_assert(m, what) _mtx_assert((m), (what), __FILE__, __LINE__) +#define mtx_assert(m, what) \ + _mtx_assert((m), (what), __FILE__, __LINE__) + #else /* INVARIANTS */ #define mtx_assert(m, what) #endif /* INVARIANTS */ +/* + * The MUTEX_DEBUG-enabled MPASS*() extra sanity-check macros. + */ #ifdef MUTEX_DEBUG #define MPASS(ex) \ if (!(ex)) \ - panic("Assertion %s failed at %s:%d", #ex, __FILE__, __LINE__) + panic("Assertion %s failed at %s:%d", #ex, __FILE__, \ + __LINE__) + #define MPASS2(ex, what) \ if (!(ex)) \ - panic("Assertion %s failed at %s:%d", what, __FILE__, __LINE__) + panic("Assertion %s failed at %s:%d", what, __FILE__, \ + __LINE__) + #define MPASS3(ex, file, line) \ if (!(ex)) \ panic("Assertion %s failed at %s:%d", #ex, file, line) + #define MPASS4(ex, what, file, line) \ if (!(ex)) \ panic("Assertion %s failed at %s:%d", what, file, line) + #else /* MUTEX_DEBUG */ #define MPASS(ex) #define MPASS2(ex, what) @@ -210,37 +406,8 @@ #endif /* MUTEX_DEBUG */ /* - * Externally visible mutex functions. - *------------------------------------------------------------------------------ + * Exported WITNESS-enabled functions and corresponding wrapper macros. */ - -/* - * Return non-zero if a mutex is already owned by the current thread. - */ -#define mtx_owned(m) (((m)->mtx_lock & MTX_FLAGMASK) == (uintptr_t)CURTHD) - -/* - * Return non-zero if a mutex has been recursively acquired. - */ -#define mtx_recursed(m) ((m)->mtx_recurse != 0) - -/* Common strings */ -#ifdef _KERN_MUTEX_C_ -char STR_mtx_enter_fmt[] = "GOT %s [%p] r=%d at %s:%d"; -char STR_mtx_exit_fmt[] = "REL %s [%p] r=%d at %s:%d"; -char STR_mtx_try_enter_fmt[] = "TRY_ENTER %s [%p] result=%d at %s:%d"; -char STR_mtx_bad_type[] = "((type) & (MTX_NORECURSE | MTX_NOSWITCH)) == 0"; -char STR_mtx_owned[] = "mtx_owned(mpp)"; -char STR_mtx_recurse[] = "mpp->mtx_recurse == 0"; -#else /* _KERN_MUTEX_C_ */ -extern char STR_mtx_enter_fmt[]; -extern char STR_mtx_bad_type[]; -extern char STR_mtx_exit_fmt[]; -extern char STR_mtx_owned[]; -extern char STR_mtx_recurse[]; -extern char STR_mtx_try_enter_fmt[]; -#endif /* _KERN_MUTEX_C_ */ - #ifdef WITNESS void witness_save(struct mtx *, const char **, int *); void witness_restore(struct mtx *, const char *, int); @@ -249,17 +416,26 @@ void witness_exit(struct mtx *, int, const char *, int); int witness_list(struct proc *); int witness_sleep(int, struct mtx *, const char *, int); + +#define WITNESS_ENTER(m, t, f, l) \ + witness_enter((m), (t), (f), (l)) -#define WITNESS_ENTER(m, t, f, l) witness_enter((m), (t), (f), (l)) -#define WITNESS_EXIT(m, t, f, l) witness_exit((m), (t), (f), (l)) -#define WITNESS_SLEEP(check, m) witness_sleep(check, (m), __FILE__, __LINE__) +#define WITNESS_EXIT(m, t, f, l) \ + witness_exit((m), (t), (f), (l)) + +#define WITNESS_SLEEP(check, m) \ + witness_sleep(check, (m), __FILE__, __LINE__) + #define WITNESS_SAVE_DECL(n) \ const char * __CONCAT(n, __wf); \ int __CONCAT(n, __wl) + #define WITNESS_SAVE(m, n) \ witness_save(m, &__CONCAT(n, __wf), &__CONCAT(n, __wl)) + #define WITNESS_RESTORE(m, n) \ witness_restore(m, __CONCAT(n, __wf), __CONCAT(n, __wl)) + #else /* WITNESS */ #define witness_enter(m, t, f, l) #define witness_tryenter(m, t, f, l) Index: sys/sys/proc.h =================================================================== RCS file: /home/ncvs/src/sys/sys/proc.h,v retrieving revision 1.145 diff -u -r1.145 proc.h --- sys/sys/proc.h 2001/01/31 04:29:52 1.145 +++ sys/sys/proc.h 2001/02/07 03:01:14 @@ -419,8 +419,8 @@ } while (0) /* Lock and unlock a process. */ -#define PROC_LOCK(p) mtx_enter(&(p)->p_mtx, MTX_DEF) -#define PROC_UNLOCK(p) mtx_exit(&(p)->p_mtx, MTX_DEF) +#define PROC_LOCK(p) mtx_lock(&(p)->p_mtx) +#define PROC_UNLOCK(p) mtx_unlock(&(p)->p_mtx) /* Lock and unlock the proc lists. */ #define ALLPROC_LOCK(how) \ Index: sys/ufs/ffs/ffs_vfsops.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ffs/ffs_vfsops.c,v retrieving revision 1.137 diff -u -r1.137 ffs_vfsops.c --- sys/ufs/ffs/ffs_vfsops.c 2001/02/04 13:13:21 1.137 +++ sys/ufs/ffs/ffs_vfsops.c 2001/02/07 03:01:14 @@ -393,7 +393,7 @@ if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); vfs_object_create(devvp, p, p->p_ucred); - mtx_enter(&devvp->v_interlock, MTX_DEF); + mtx_lock(&devvp->v_interlock); VOP_UNLOCK(devvp, LK_INTERLOCK, p); } @@ -454,10 +454,10 @@ } loop: - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { if (vp->v_mount != mp) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); goto loop; } nvp = LIST_NEXT(vp, v_mntvnodes); @@ -469,8 +469,8 @@ /* * Step 5: invalidate all cached file data. */ - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&mntvnode_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) { goto loop; } @@ -492,9 +492,9 @@ ip->i_effnlink = ip->i_nlink; brelse(bp); vput(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); return (0); } @@ -551,7 +551,7 @@ if (devvp->v_tag != VT_MFS && vn_isdisk(devvp, NULL)) { vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, p); vfs_object_create(devvp, p, cred); - mtx_enter(&devvp->v_interlock, MTX_DEF); + mtx_lock(&devvp->v_interlock); VOP_UNLOCK(devvp, LK_INTERLOCK, p); } @@ -937,7 +937,7 @@ wait = 1; lockreq = LK_EXCLUSIVE | LK_INTERLOCK; } - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); loop: for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) { /* @@ -946,19 +946,19 @@ */ if (vp->v_mount != mp) goto loop; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); nvp = LIST_NEXT(vp, v_mntvnodes); ip = VTOI(vp); if (vp->v_type == VNON || ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 && TAILQ_EMPTY(&vp->v_dirtyblkhd))) { - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); continue; } if (vp->v_type != VCHR) { - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); if ((error = vget(vp, lockreq, p)) != 0) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (error == ENOENT) goto loop; continue; @@ -967,15 +967,15 @@ allerror = error; VOP_UNLOCK(vp, 0, p); vrele(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } else { - mtx_exit(&mntvnode_mtx, MTX_DEF); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&mntvnode_mtx); + mtx_unlock(&vp->v_interlock); UFS_UPDATE(vp, wait); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); } } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); /* * Force stale file system control information to be flushed. */ @@ -984,7 +984,7 @@ allerror = error; /* Flushed work items may create new vnodes to clean */ if (count) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); goto loop; } } @@ -1055,17 +1055,17 @@ * case getnewvnode() or MALLOC() blocks, otherwise a duplicate * may occur! */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ffs_inode_hash_mtx); if (ffs_inode_hash_lock) { while (ffs_inode_hash_lock) { ffs_inode_hash_lock = -1; msleep(&ffs_inode_hash_lock, &ffs_inode_hash_mtx, PVM, "ffsvgt", 0); } - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); goto restart; } ffs_inode_hash_lock = 1; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); /* * If this MALLOC() is performed after the getnewvnode() @@ -1085,10 +1085,10 @@ * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ffs_inode_hash_mtx); want_wakeup = ffs_inode_hash_lock < 0; ffs_inode_hash_lock = 0; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); if (want_wakeup) wakeup(&ffs_inode_hash_lock); *vpp = NULL; @@ -1126,10 +1126,10 @@ * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ffs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ffs_inode_hash_mtx); want_wakeup = ffs_inode_hash_lock < 0; ffs_inode_hash_lock = 0; - mtx_exit(&ffs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ffs_inode_hash_mtx); if (want_wakeup) wakeup(&ffs_inode_hash_lock); Index: sys/ufs/ifs/ifs_vfsops.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ifs/ifs_vfsops.c,v retrieving revision 1.4 diff -u -r1.4 ifs_vfsops.c --- sys/ufs/ifs/ifs_vfsops.c 2000/12/14 09:15:27 1.4 +++ sys/ufs/ifs/ifs_vfsops.c 2001/02/07 03:01:14 @@ -176,17 +176,17 @@ * case getnewvnode() or MALLOC() blocks, otherwise a duplicate * may occur! */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ifs_inode_hash_mtx); if (ifs_inode_hash_lock) { while (ifs_inode_hash_lock) { ifs_inode_hash_lock = -1; msleep(&ifs_inode_hash_lock, &ifs_inode_hash_mtx, PVM, "ifsvgt", 0); } - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); goto restart; } ifs_inode_hash_lock = 1; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); /* * If this MALLOC() is performed after the getnewvnode() @@ -206,10 +206,10 @@ * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ifs_inode_hash_mtx); want_wakeup = ifs_inode_hash_lock < 0; ifs_inode_hash_lock = 0; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); if (want_wakeup) wakeup(&ifs_inode_hash_lock); *vpp = NULL; @@ -247,10 +247,10 @@ * otherwise the processes waken up immediately hit * themselves into the mutex. */ - mtx_enter(&ifs_inode_hash_mtx, MTX_DEF); + mtx_lock(&ifs_inode_hash_mtx); want_wakeup = ifs_inode_hash_lock < 0; ifs_inode_hash_lock = 0; - mtx_exit(&ifs_inode_hash_mtx, MTX_DEF); + mtx_unlock(&ifs_inode_hash_mtx); if (want_wakeup) wakeup(&ifs_inode_hash_lock); Index: sys/ufs/ufs/ufs_ihash.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ufs/ufs_ihash.c,v retrieving revision 1.28 diff -u -r1.28 ufs_ihash.c --- sys/ufs/ufs/ufs_ihash.c 2001/02/04 12:37:48 1.28 +++ sys/ufs/ufs/ufs_ihash.c 2001/02/07 03:01:14 @@ -77,11 +77,11 @@ { struct inode *ip; - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); LIST_FOREACH(ip, INOHASH(dev, inum), i_hash) if (inum == ip->i_number && dev == ip->i_dev) break; - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); if (ip) return (ITOV(ip)); @@ -102,18 +102,18 @@ struct vnode *vp; loop: - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); LIST_FOREACH(ip, INOHASH(dev, inum), i_hash) { if (inum == ip->i_number && dev == ip->i_dev) { vp = ITOV(ip); - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&ufs_ihash_mtx); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, p)) goto loop; return (vp); } } - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); return (NULL); } @@ -130,11 +130,11 @@ /* lock the inode, then put it on the appropriate hash list */ lockmgr(&ip->i_vnode->v_lock, LK_EXCLUSIVE, (struct mtx *)0, p); - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); ipp = INOHASH(ip->i_dev, ip->i_number); LIST_INSERT_HEAD(ipp, ip, i_hash); ip->i_flag |= IN_HASHED; - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); } /* @@ -144,10 +144,10 @@ ufs_ihashrem(ip) struct inode *ip; { - mtx_enter(&ufs_ihash_mtx, MTX_DEF); + mtx_lock(&ufs_ihash_mtx); if (ip->i_flag & IN_HASHED) { ip->i_flag &= ~IN_HASHED; LIST_REMOVE(ip, i_hash); } - mtx_exit(&ufs_ihash_mtx, MTX_DEF); + mtx_unlock(&ufs_ihash_mtx); } Index: sys/ufs/ufs/ufs_quota.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ufs/ufs_quota.c,v retrieving revision 1.41 diff -u -r1.41 ufs_quota.c --- sys/ufs/ufs/ufs_quota.c 2001/02/04 13:13:23 1.41 +++ sys/ufs/ufs/ufs_quota.c 2001/02/07 03:01:14 @@ -666,7 +666,7 @@ * Search vnodes associated with this mount point, * synchronizing any modified dquot structures. */ - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); again: for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nextvp) { if (vp->v_mount != mp) @@ -674,11 +674,11 @@ nextvp = LIST_NEXT(vp, v_mntvnodes); if (vp->v_type == VNON) continue; - mtx_enter(&vp->v_interlock, MTX_DEF); - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_lock(&vp->v_interlock); + mtx_unlock(&mntvnode_mtx); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, p); if (error) { - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (error == ENOENT) goto again; continue; @@ -689,11 +689,11 @@ dqsync(vp, dq); } vput(vp); - mtx_enter(&mntvnode_mtx, MTX_DEF); + mtx_lock(&mntvnode_mtx); if (LIST_NEXT(vp, v_mntvnodes) != nextvp) goto again; } - mtx_exit(&mntvnode_mtx, MTX_DEF); + mtx_unlock(&mntvnode_mtx); return (0); } Index: sys/ufs/ufs/ufs_vnops.c =================================================================== RCS file: /home/ncvs/src/sys/ufs/ufs/ufs_vnops.c,v retrieving revision 1.154 diff -u -r1.154 ufs_vnops.c --- sys/ufs/ufs/ufs_vnops.c 2000/11/04 08:10:56 1.154 +++ sys/ufs/ufs/ufs_vnops.c 2001/02/07 03:01:14 @@ -284,10 +284,10 @@ { register struct vnode *vp = ap->a_vp; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount > 1) ufs_itimes(vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (0); } @@ -1863,10 +1863,10 @@ { struct vnode *vp = ap->a_vp; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount > 1) ufs_itimes(vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (VOCALL(spec_vnodeop_p, VOFFSET(vop_close), ap)); } @@ -1937,10 +1937,10 @@ { struct vnode *vp = ap->a_vp; - mtx_enter(&vp->v_interlock, MTX_DEF); + mtx_lock(&vp->v_interlock); if (vp->v_usecount > 1) ufs_itimes(vp); - mtx_exit(&vp->v_interlock, MTX_DEF); + mtx_unlock(&vp->v_interlock); return (VOCALL(fifo_vnodeop_p, VOFFSET(vop_close), ap)); } Index: sys/vm/vm_fault.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_fault.c,v retrieving revision 1.114 diff -u -r1.114 vm_fault.c --- sys/vm/vm_fault.c 2001/01/24 11:20:05 1.114 +++ sys/vm/vm_fault.c 2001/02/07 03:01:14 @@ -854,7 +854,7 @@ vm_page_activate(fs.m); } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (curproc && (curproc->p_sflag & PS_INMEM) && curproc->p_stats) { if (hardfault) { curproc->p_stats->p_ru.ru_majflt++; @@ -862,7 +862,7 @@ curproc->p_stats->p_ru.ru_minflt++; } } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* * Unlock everything, and return Index: sys/vm/vm_glue.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_glue.c,v retrieving revision 1.106 diff -u -r1.106 vm_glue.c --- sys/vm/vm_glue.c 2001/01/25 01:38:09 1.106 +++ sys/vm/vm_glue.c 2001/02/07 03:01:14 @@ -313,18 +313,18 @@ { mtx_assert(&p->p_mtx, MA_OWNED); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & PS_INMEM) == 0) { ++p->p_lock; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); PROC_UNLOCK(p); mtx_assert(&Giant, MA_OWNED); pmap_swapin_proc(p); PROC_LOCK(p); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat == SRUN) { setrunqueue(p); } @@ -334,7 +334,7 @@ /* undo the effect of setting SLOCK above */ --p->p_lock; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } /* @@ -366,7 +366,7 @@ ppri = INT_MIN; ALLPROC_LOCK(AP_SHARED); LIST_FOREACH(p, &allproc, p_list) { - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat == SRUN && (p->p_sflag & (PS_INMEM | PS_SWAPPING)) == 0) { @@ -385,7 +385,7 @@ ppri = pri; } } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } ALLPROC_LOCK(AP_RELEASE); @@ -396,9 +396,9 @@ tsleep(&proc0, PVM, "sched", 0); goto loop; } - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_sflag &= ~PS_SWAPINREQ; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* * We would like to bring someone in. (only if there is space). @@ -406,9 +406,9 @@ PROC_LOCK(p); faultin(p); PROC_UNLOCK(p); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_swtime = 0; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); goto loop; } @@ -461,15 +461,15 @@ } vm = p->p_vmspace; PROC_UNLOCK(p); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((p->p_sflag & (PS_INMEM|PS_SWAPPING)) != PS_INMEM) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } switch (p->p_stat) { default: - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; case SSLEEP: @@ -478,7 +478,7 @@ * do not swapout a realtime process */ if (RTP_PRIO_IS_REALTIME(p->p_rtprio.type)) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } @@ -489,7 +489,7 @@ */ if (((p->p_priority & 0x7f) < PSOCK) || (p->p_slptime < swap_idle_threshold1)) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } @@ -501,10 +501,10 @@ if (((action & VM_SWAP_NORMAL) == 0) && (((action & VM_SWAP_IDLE) == 0) || (p->p_slptime < swap_idle_threshold2))) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); ++vm->vm_refcnt; /* @@ -522,17 +522,17 @@ * If the process has been asleep for awhile and had * most of its pages taken away already, swap it out. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if ((action & VM_SWAP_NORMAL) || ((action & VM_SWAP_IDLE) && (p->p_slptime > swap_idle_threshold2))) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); swapout(p); vmspace_free(vm); didswap++; goto retry; } else - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } } ALLPROC_LOCK(AP_RELEASE); @@ -559,19 +559,19 @@ p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); (void) splhigh(); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_sflag &= ~PS_INMEM; p->p_sflag |= PS_SWAPPING; if (p->p_stat == SRUN) remrunqueue(p); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); (void) spl0(); pmap_swapout_proc(p); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); p->p_sflag &= ~PS_SWAPPING; p->p_swtime = 0; - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); } #endif /* !NO_SWAPPING */ Index: sys/vm/vm_map.h =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_map.h,v retrieving revision 1.56 diff -u -r1.56 vm_map.h --- sys/vm/vm_map.h 2000/10/12 22:37:28 1.56 +++ sys/vm/vm_map.h 2001/02/07 03:01:14 @@ -291,15 +291,15 @@ #define vm_map_set_recursive(map) \ do { \ - mtx_enter((map)->lock.lk_interlock, MTX_DEF); \ + mtx_lock((map)->lock.lk_interlock); \ (map)->lock.lk_flags |= LK_CANRECURSE; \ - mtx_exit((map)->lock.lk_interlock, MTX_DEF); \ + mtx_unlock((map)->lock.lk_interlock); \ } while(0) #define vm_map_clear_recursive(map) \ do { \ - mtx_enter((map)->lock.lk_interlock, MTX_DEF); \ + mtx_lock((map)->lock.lk_interlock); \ (map)->lock.lk_flags &= ~LK_CANRECURSE; \ - mtx_exit((map)->lock.lk_interlock, MTX_DEF); \ + mtx_unlock((map)->lock.lk_interlock); \ } while(0) /* Index: sys/vm/vm_meter.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_meter.c,v retrieving revision 1.46 diff -u -r1.46 vm_meter.c --- sys/vm/vm_meter.c 2001/02/04 13:13:24 1.46 +++ sys/vm/vm_meter.c 2001/02/07 03:01:14 @@ -153,10 +153,10 @@ LIST_FOREACH(p, &allproc, p_list) { if (p->p_flag & P_SYSTEM) continue; - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); switch (p->p_stat) { case 0: - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; case SMTX: @@ -170,7 +170,7 @@ } else if (p->p_slptime < maxslp) totalp->t_sw++; if (p->p_slptime >= maxslp) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } break; @@ -186,12 +186,12 @@ else totalp->t_sw++; if (p->p_stat == SIDL) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } break; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* * Note active objects. */ Index: sys/vm/vm_object.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_object.c,v retrieving revision 1.182 diff -u -r1.182 vm_object.c --- sys/vm/vm_object.c 2001/02/04 13:13:24 1.182 +++ sys/vm/vm_object.c 2001/02/07 03:01:14 @@ -458,9 +458,9 @@ /* * Remove the object from the global object list. */ - mtx_enter(&vm_object_list_mtx, MTX_DEF); + mtx_lock(&vm_object_list_mtx); TAILQ_REMOVE(&vm_object_list, object, object_list); - mtx_exit(&vm_object_list_mtx, MTX_DEF); + mtx_unlock(&vm_object_list_mtx); wakeup(object); Index: sys/vm/vm_pageout.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_pageout.c,v retrieving revision 1.170 diff -u -r1.170 vm_pageout.c --- sys/vm/vm_pageout.c 2001/02/04 13:13:24 1.170 +++ sys/vm/vm_pageout.c 2001/02/07 03:01:14 @@ -1140,12 +1140,12 @@ * if the process is in a non-running type state, * don't touch it. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat != SRUN && p->p_stat != SSLEEP) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); /* * get the process size */ @@ -1162,11 +1162,11 @@ ALLPROC_LOCK(AP_RELEASE); if (bigproc != NULL) { killproc(bigproc, "out of swap space"); - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); bigproc->p_estcpu = 0; bigproc->p_nice = PRIO_MIN; resetpriority(bigproc); - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); wakeup(&cnt.v_free_count); } } @@ -1305,7 +1305,7 @@ { int pass; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); /* * Initialize some paging parameters. @@ -1449,7 +1449,7 @@ { struct proc *p; - mtx_enter(&Giant, MTX_DEF); + mtx_lock(&Giant); while (TRUE) { tsleep(&vm_daemon_needed, PPAUSE, "psleep", 0); @@ -1477,9 +1477,9 @@ * if the process is in a non-running type state, * don't touch it. */ - mtx_enter(&sched_lock, MTX_SPIN); + mtx_lock_spin(&sched_lock); if (p->p_stat != SRUN && p->p_stat != SSLEEP) { - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); continue; } /* @@ -1496,7 +1496,7 @@ */ if ((p->p_sflag & PS_INMEM) == 0) limit = 0; /* XXX */ - mtx_exit(&sched_lock, MTX_SPIN); + mtx_unlock_spin(&sched_lock); size = vmspace_resident_count(p->p_vmspace); if (limit >= 0 && size >= limit) { Index: sys/vm/vm_zone.c =================================================================== RCS file: /home/ncvs/src/sys/vm/vm_zone.c,v retrieving revision 1.40 diff -u -r1.40 vm_zone.c --- sys/vm/vm_zone.c 2001/01/23 03:40:27 1.40 +++ sys/vm/vm_zone.c 2001/02/07 03:01:14 @@ -173,9 +173,9 @@ /* our zone is good and ready, add it to the list */ if ((z->zflags & ZONE_BOOT) == 0) { mtx_init(&(z)->zmtx, "zone", MTX_DEF); - mtx_enter(&zone_mtx, MTX_DEF); + mtx_lock(&zone_mtx); SLIST_INSERT_HEAD(&zlist, z, zent); - mtx_exit(&zone_mtx, MTX_DEF); + mtx_unlock(&zone_mtx); } return 1; @@ -245,9 +245,9 @@ z->zmax = nitems; z->ztotal = nitems; - mtx_enter(&zone_mtx, MTX_DEF); + mtx_lock(&zone_mtx); SLIST_INSERT_HEAD(&zlist, z, zent); - mtx_exit(&zone_mtx, MTX_DEF); + mtx_unlock(&zone_mtx); } /* @@ -300,15 +300,15 @@ * map. */ if (lockstatus(&kernel_map->lock, NULL)) { - mtx_exit(&z->zmtx, MTX_DEF); + mtx_unlock(&z->zmtx); item = (void *) kmem_malloc(kmem_map, nbytes, M_WAITOK); - mtx_enter(&z->zmtx, MTX_DEF); + mtx_lock(&z->zmtx); if (item != NULL) atomic_add_int(&zone_kmem_pages, z->zalloc); } else { - mtx_exit(&z->zmtx, MTX_DEF); + mtx_unlock(&z->zmtx); item = (void *) kmem_alloc(kernel_map, nbytes); - mtx_enter(&z->zmtx, MTX_DEF); + mtx_lock(&z->zmtx); if (item != NULL) atomic_add_int(&zone_kern_pages, z->zalloc); } @@ -363,11 +363,11 @@ void *item; KASSERT(z != NULL, ("invalid zone")); - mtx_enter(&z->zmtx, MTX_DEF); + mtx_lock(&z->zmtx); if (z->zfreecnt <= z->zfreemin) { item = _zget(z); - mtx_exit(&z->zmtx, MTX_DEF); + mtx_unlock(&z->zmtx); return item; } @@ -382,7 +382,7 @@ z->zfreecnt--; z->znalloc++; - mtx_exit(&z->zmtx, MTX_DEF); + mtx_unlock(&z->zmtx); return item; } @@ -394,7 +394,7 @@ { KASSERT(z != NULL, ("invalid zone")); KASSERT(item != NULL, ("invalid item")); - mtx_enter(&z->zmtx, MTX_DEF); + mtx_lock(&z->zmtx); ((void **) item)[0] = z->zitems; #ifdef INVARIANTS @@ -405,7 +405,7 @@ z->zitems = item; z->zfreecnt++; - mtx_exit(&z->zmtx, MTX_DEF); + mtx_unlock(&z->zmtx); } /* @@ -418,22 +418,22 @@ char tmpbuf[128]; vm_zone_t z; - mtx_enter(&zone_mtx, MTX_DEF); + mtx_lock(&zone_mtx); len = snprintf(tmpbuf, sizeof(tmpbuf), "\nITEM SIZE LIMIT USED FREE REQUESTS\n\n"); error = SYSCTL_OUT(req, tmpbuf, SLIST_EMPTY(&zlist) ? len-1 : len); SLIST_FOREACH(z, &zlist, zent) { - mtx_enter(&z->zmtx, MTX_DEF); + mtx_lock(&z->zmtx); len = snprintf(tmpbuf, sizeof(tmpbuf), "%-14.14s %6.6u, %8.8u, %6.6u, %6.6u, %8.8u\n", z->zname, z->zsize, z->zmax, (z->ztotal - z->zfreecnt), z->zfreecnt, z->znalloc); - mtx_exit(&z->zmtx, MTX_DEF); + mtx_unlock(&z->zmtx); if (SLIST_NEXT(z, zent) == NULL) tmpbuf[len - 1] = 0; error = SYSCTL_OUT(req, tmpbuf, len); } - mtx_exit(&zone_mtx, MTX_DEF); + mtx_unlock(&zone_mtx); return (error); }