diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 42fc200b03cb..1d6ec0fda5c7 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -491,17 +491,15 @@ pv_list_lock_object(vm_paddr_t pa) #endif __read_mostly static struct vm_page pv_fake_page; -static void -pmap_pv_list_lock(vm_page_t m) -{ - obm_lock(&m->md.pv_lock, pv_list_lock_object(VM_PAGE_TO_PHYS(m))); -} +#define pmap_pv_list_lock(m) do { \ + vm_page_t _pvl_m = (m); \ + obm_lock(&_pvl_m->md.pv_lock, pv_list_lock_object(VM_PAGE_TO_PHYS(_pvl_m)));\ +} while (0) -static void -pmap_pv_list_unlock(vm_page_t m) -{ - obm_unlock(&m->md.pv_lock, pv_list_lock_object(VM_PAGE_TO_PHYS(m))); -} +#define pmap_pv_list_unlock(m) do { \ + vm_page_t _pvl_m = (m); \ + obm_unlock(&_pvl_m->md.pv_lock, pv_list_lock_object(VM_PAGE_TO_PHYS(_pvl_m)));\ +} while (0) /* * Helper for pmap_pv_list_lock_pde(). The pte_locked argument @@ -527,7 +525,7 @@ pmap_pv_list_lock_pde1(vm_page_t m, bool pte_locked) * other owners must release their locks without * waiting for us. */ - if (m == sm || obm_trylock(&sm->md.pv_lock)) { + if (m == sm || obm_trylock(&sm->md.pv_lock, lo)) { for (i = 1, mt = sm + 1; i < NPTEPG; i++, mt++) { if (m != mt) obm_lock(&mt->md.pv_lock, lo); diff --git a/sys/kern/kern_obm.c b/sys/kern/kern_obm.c index b7cb43129b94..47c4088a27cd 100644 --- a/sys/kern/kern_obm.c +++ b/sys/kern/kern_obm.c @@ -42,19 +42,6 @@ __FBSDID("$FreeBSD$"); #include #include -#ifdef OBM_DEBUG -static SYSCTL_NODE(_debug, OID_AUTO, obm, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, - ""); -static u_long obm_slow_lock; -SYSCTL_LONG(_debug_obm, OID_AUTO, slow_lock, CTLFLAG_RD, - &obm_slow_lock, 0, - ""); -static u_long obm_slow_unlock; -SYSCTL_LONG(_debug_obm, OID_AUTO, slow_unlock, CTLFLAG_RD, - &obm_slow_unlock, 0, - ""); -#endif - void obm_init_lo(struct lock_object *lo, const char *name) { @@ -68,16 +55,19 @@ obm_init(obm_lock_t *obm) obm->lk = OBM_UNLOCKED; } -void -obm_lock_slow(obm_lock_t *obm, struct lock_object *lo, uint8_t v) +static void __noinline +obm_lock_hard(obm_lock_t *obm, struct lock_object *lo, uint8_t v LOCK_FILE_LINE_ARG_DEF) { struct turnstile *ts; - struct lock_delay_arg lda; +#ifdef LOCK_PROFILING + int contested = 0; + uint64_t waittime = 0; +#endif -#ifdef OBM_DEBUG - atomic_add_long(&obm_slow_lock, 1); +#ifdef LOCK_PROFILING + lock_profile_obtain_lock_failed(lo, &contested, &waittime); #endif - lock_delay_arg_init(&lda, &locks_delay); + for (;;) { if (v == OBM_UNLOCKED) { if (atomic_fcmpset_acq_char(&obm->lk, &v, @@ -100,16 +90,19 @@ obm_lock_slow(obm_lock_t *obm, struct lock_object *lo, uint8_t v) turnstile_wait(ts, NULL, TS_SHARED_QUEUE); v = atomic_load_8(&obm->lk); } + +#ifdef LOCK_PROFILING + lock_profile_obtain_lock_success(lo, contested, waittime, file, line); +#endif TD_LOCKS_INC(curthread); } -void -obm_unlock_slow(obm_lock_t *obm, struct lock_object *lo) + +static void __noinline +obm_unlock_hard(obm_lock_t *obm, struct lock_object *lo) { struct turnstile *ts; -#ifdef OBM_DEBUG - atomic_add_long(&obm_slow_unlock, 1); -#endif + lock_profile_release_lock(lo); turnstile_chain_lock(lo); atomic_store_rel_8(&obm->lk, OBM_UNLOCKED); ts = turnstile_lookup(lo); @@ -120,3 +113,49 @@ obm_unlock_slow(obm_lock_t *obm, struct lock_object *lo) turnstile_chain_unlock(lo); TD_LOCKS_DEC(curthread); } + +bool +_obm_trylock(obm_lock_t *obm, struct lock_object *lo LOCK_FILE_LINE_ARG_DEF) +{ + if (atomic_cmpset_acq_8(&obm->lk, OBM_UNLOCKED, OBM_LOCKED) != 0) { +#ifdef LOCK_PROFILING + lock_profile_obtain_lock_success(lo, 0, 0, file, line); +#endif + TD_LOCKS_INC(curthread); + return (true); + } + return (false); +} + +void +_obm_lock(obm_lock_t *obm, struct lock_object *lo LOCK_FILE_LINE_ARG_DEF) +{ + uint8_t v; + + v = OBM_UNLOCKED; + if (__predict_true(atomic_fcmpset_acq_8(&obm->lk, &v, OBM_LOCKED))) { +#ifdef LOCK_PROFILING + lock_profile_obtain_lock_success(lo, 0, 0, file, line); +#endif + TD_LOCKS_INC(curthread); + } else { + MPASS(v == OBM_LOCKED || v == (OBM_LOCKED | OBM_CONTESTED) || + v == OBM_UNLOCKED); + obm_lock_hard(obm, lo, v LOCK_FILE_LINE_ARG); + } +} + +void +_obm_unlock(obm_lock_t *obm, struct lock_object *lo LOCK_FILE_LINE_ARG_DEF) +{ + uint8_t v; + + v = OBM_LOCKED; + if (atomic_fcmpset_rel_8(&obm->lk, &v, OBM_UNLOCKED)) { + lock_profile_release_lock(lo); + TD_LOCKS_DEC(curthread); + } else { + MPASS(v == OBM_LOCKED || v == (OBM_LOCKED | OBM_CONTESTED)); + obm_unlock_hard(obm, lo); + } +} diff --git a/sys/sys/obm.h b/sys/sys/obm.h index 2ccfe34945d3..6a872e2de8d2 100644 --- a/sys/sys/obm.h +++ b/sys/sys/obm.h @@ -44,10 +44,15 @@ #include #include +#ifndef LOCK_DEBUG +#error "LOCK_DEBUG not defined, include before " +#endif + void obm_init_lo(struct lock_object *lo, const char *name); void obm_init(obm_lock_t *obm); -void obm_lock_slow(obm_lock_t *obm, struct lock_object *lo, uint8_t v); -void obm_unlock_slow(obm_lock_t *obm, struct lock_object *lo); +bool _obm_trylock(obm_lock_t *obm, struct lock_object *lo LOCK_FILE_LINE_ARG_DEF); +void _obm_lock(obm_lock_t *obm, struct lock_object *lo LOCK_FILE_LINE_ARG_DEF); +void _obm_unlock(obm_lock_t *obm, struct lock_object *lo LOCK_FILE_LINE_ARG_DEF); __used static void obm_assert_locked(obm_lock_t *obm) @@ -60,44 +65,21 @@ obm_assert_locked(obm_lock_t *obm) #endif } -static inline bool -obm_trylock(obm_lock_t *obm) -{ - if (atomic_cmpset_acq_8(&obm->lk, OBM_UNLOCKED, OBM_LOCKED) != 0) { - TD_LOCKS_INC(curthread); - return (true); - } - return (false); -} - -static inline void -obm_lock(obm_lock_t *obm, struct lock_object *lo) -{ - uint8_t v; - - v = OBM_UNLOCKED; - if (__predict_true(atomic_fcmpset_acq_8(&obm->lk, &v, OBM_LOCKED))) { - TD_LOCKS_INC(curthread); - } else { - MPASS(v == OBM_LOCKED || v == (OBM_LOCKED | OBM_CONTESTED) || - v == OBM_UNLOCKED); - obm_lock_slow(obm, lo, v); - } -} - -static inline void -obm_unlock(obm_lock_t *obm, struct lock_object *lo) -{ - uint8_t v; - - v = OBM_LOCKED; - if (atomic_fcmpset_rel_8(&obm->lk, &v, OBM_UNLOCKED)) { - TD_LOCKS_DEC(curthread); - } else { - MPASS(v == OBM_LOCKED || v == (OBM_LOCKED | OBM_CONTESTED)); - obm_unlock_slow(obm, lo); - } -} +#if (LOCK_DEBUG > 0) +#define obm_trylock(obm, lo) \ + _obm_trylock(obm, lo, __FILE__, __LINE__) +#define obm_lock(obm, lo) \ + _obm_lock(obm, lo, __FILE__, __LINE__) +#define obm_unlock(obm, lo) \ + _obm_unlock(obm, lo, __FILE__, __LINE__) +#else +#define obm_trylock(obm, lo) \ + _obm_trylock(obm, lo) +#define obm_lock(obm, lo) \ + _obm_lock(obm, lo) +#define obm_unlock(obm, lo) \ + _obm_unlock(obm, lo) +#endif #endif #endif