Index: sys/sparc64/sparc64/pmap.c =================================================================== --- sys/sparc64/sparc64/pmap.c (revision 225467) +++ sys/sparc64/sparc64/pmap.c (working copy) @@ -890,7 +890,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va) struct tte *tp; int color; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + pmap_bgl_assert_locked(); KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_cache_enter: fake page")); PMAP_STATS_INC(pmap_ncache_enter); @@ -965,7 +965,7 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va) struct tte *tp; int color; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + pmap_bgl_assert_locked(); CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, m->md.colors[DCACHE_COLOR(va)]); KASSERT((m->flags & PG_FICTITIOUS) == 0, @@ -1040,7 +1040,7 @@ pmap_kenter(vm_offset_t va, vm_page_t m) vm_page_t om; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + pmap_bgl_assert_locked(); PMAP_STATS_INC(pmap_nkenter); tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", @@ -1102,7 +1102,7 @@ pmap_kremove(vm_offset_t va) struct tte *tp; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + pmap_bgl_assert_locked(); PMAP_STATS_INC(pmap_nkremove); tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, @@ -1153,19 +1153,16 @@ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; - int locked; PMAP_STATS_INC(pmap_nqenter); va = sva; - if (!(locked = mtx_owned(&vm_page_queue_mtx))) - vm_page_lock_queues(); + pmap_bgl_lock(); while (count-- > 0) { pmap_kenter(va, *m); va += PAGE_SIZE; m++; } - if (!locked) - vm_page_unlock_queues(); + pmap_bgl_unlock(); tlb_range_demap(kernel_pmap, sva, va); } @@ -1177,18 +1174,15 @@ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; - int locked; PMAP_STATS_INC(pmap_nqremove); va = sva; - if (!(locked = mtx_owned(&vm_page_queue_mtx))) - vm_page_lock_queues(); + pmap_bgl_lock(); while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } - if (!locked) - vm_page_unlock_queues(); + pmap_bgl_unlock(); tlb_range_demap(kernel_pmap, sva, va); } @@ -1329,7 +1323,7 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, vm_page_t m; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + pmap_bgl_assert_locked(); data = atomic_readandclear_long(&tp->tte_data); if ((data & TD_FAKE) == 0) { m = PHYS_TO_VM_PAGE(TD_PA(data)); @@ -1366,7 +1360,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offse pm->pm_context[curcpu], start, end); if (PMAP_REMOVE_DONE(pm)) return; - vm_page_lock_queues(); + pmap_bgl_lock(); PMAP_LOCK(pm); if (end - start > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, start, end, pmap_remove_tte); @@ -1379,7 +1373,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offse tlb_range_demap(pm, start, end - 1); } PMAP_UNLOCK(pm); - vm_page_unlock_queues(); + pmap_bgl_unlock(); } void @@ -1392,7 +1386,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); - vm_page_lock_queues(); + pmap_bgl_lock(); for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { tpn = TAILQ_NEXT(tp, tte_link); if ((tp->tte_data & TD_PV) == 0) @@ -1415,7 +1409,7 @@ pmap_remove_all(vm_page_t m) PMAP_UNLOCK(pm); } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + pmap_bgl_unlock(); } static int @@ -1453,7 +1447,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset if (prot & VM_PROT_WRITE) return; - vm_page_lock_queues(); + pmap_bgl_lock(); PMAP_LOCK(pm); if (eva - sva > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, sva, eva, pmap_protect_tte); @@ -1465,7 +1459,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset tlb_range_demap(pm, sva, eva - 1); } PMAP_UNLOCK(pm); - vm_page_unlock_queues(); + pmap_bgl_unlock(); } /* @@ -1478,11 +1472,11 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t ac vm_prot_t prot, boolean_t wired) { - vm_page_lock_queues(); + pmap_bgl_lock(); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot, wired); - vm_page_unlock_queues(); PMAP_UNLOCK(pm); + pmap_bgl_unlock(); } /* @@ -1501,7 +1495,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_pa vm_page_t real; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + pmap_bgl_assert_locked(); PMAP_LOCK_ASSERT(pm, MA_OWNED); KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || VM_OBJECT_LOCKED(m->object), @@ -1644,27 +1638,27 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm psize = atop(end - start); m = m_start; - vm_page_lock_queues(); + pmap_bgl_lock(); PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } - vm_page_unlock_queues(); PMAP_UNLOCK(pm); + pmap_bgl_unlock(); } void pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - vm_page_lock_queues(); + pmap_bgl_lock(); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - vm_page_unlock_queues(); PMAP_UNLOCK(pm); + pmap_bgl_unlock(); } void @@ -1729,7 +1723,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off if (dst_addr != src_addr) return; - vm_page_lock_queues(); + pmap_bgl_lock(); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); @@ -1747,9 +1741,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off pmap_copy_tte(src_pmap, dst_pmap, tp, va); tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1); } - vm_page_unlock_queues(); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); + pmap_bgl_unlock(); } void @@ -1946,7 +1940,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m) ("pmap_page_exists_quick: page %p is not managed", m)); loops = 0; rv = FALSE; - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -1957,7 +1951,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m) if (++loops >= 16) break; } - vm_page_unlock_queues(); + pmap_bgl_unlock(); return (rv); } @@ -1974,11 +1968,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED)) count++; - vm_page_unlock_queues(); + pmap_bgl_unlock(); return (count); } @@ -2005,13 +1999,13 @@ pmap_page_is_mapped(vm_page_t m) rv = FALSE; if ((m->oflags & VPO_UNMANAGED) != 0) return (rv); - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) if ((tp->tte_data & TD_PV) != 0) { rv = TRUE; break; } - vm_page_unlock_queues(); + pmap_bgl_unlock(); return (rv); } @@ -2037,7 +2031,7 @@ pmap_ts_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); count = 0; - vm_page_lock_queues(); + pmap_bgl_lock(); if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) { tpf = tp; do { @@ -2051,7 +2045,7 @@ pmap_ts_referenced(vm_page_t m) break; } while ((tp = tpn) != NULL && tp != tpf); } - vm_page_unlock_queues(); + pmap_bgl_unlock(); return (count); } @@ -2074,7 +2068,7 @@ pmap_is_modified(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (rv); - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2083,7 +2077,7 @@ pmap_is_modified(vm_page_t m) break; } } - vm_page_unlock_queues(); + pmap_bgl_unlock(); return (rv); } @@ -2117,7 +2111,7 @@ pmap_is_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); rv = FALSE; - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2126,7 +2120,7 @@ pmap_is_referenced(vm_page_t m) break; } } - vm_page_unlock_queues(); + pmap_bgl_unlock(); return (rv); } @@ -2149,7 +2143,7 @@ pmap_clear_modify(vm_page_t m) */ if ((m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2157,7 +2151,7 @@ pmap_clear_modify(vm_page_t m) if ((data & TD_W) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } - vm_page_unlock_queues(); + pmap_bgl_unlock(); } void @@ -2168,7 +2162,7 @@ pmap_clear_reference(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2176,7 +2170,7 @@ pmap_clear_reference(vm_page_t m) if ((data & TD_REF) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } - vm_page_unlock_queues(); + pmap_bgl_unlock(); } void @@ -2197,7 +2191,7 @@ pmap_remove_write(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + pmap_bgl_lock(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2208,7 +2202,7 @@ pmap_remove_write(vm_page_t m) } } vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); + pmap_bgl_unlock(); } int Index: sys/sparc64/sparc64/tsb.c =================================================================== --- sys/sparc64/sparc64/tsb.c (revision 225467) +++ sys/sparc64/sparc64/tsb.c (working copy) @@ -119,6 +119,9 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t int b0; int i; + pmap_bgl_assert_locked(); + PMAP_LOCK_ASSERT(pm, MA_OWNED); + if (DCACHE_COLOR(VM_PAGE_TO_PHYS(m)) != DCACHE_COLOR(va)) { CTR5(KTR_SPARE2, "tsb_tte_enter: off colour va=%#lx pa=%#lx o=%p ot=%d pi=%#lx", @@ -131,8 +134,6 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t PMAP_STATS_INC(tsb_nenter_u_oc); } - mtx_assert(&vm_page_queue_mtx, MA_OWNED); - PMAP_LOCK_ASSERT(pm, MA_OWNED); if (pm == kernel_pmap) { PMAP_STATS_INC(tsb_nenter_k); tp = tsb_kvtotte(va); Index: sys/vm/pmap.h =================================================================== --- sys/vm/pmap.h (revision 225467) +++ sys/vm/pmap.h (working copy) @@ -102,6 +102,9 @@ void pmap_align_superpage(vm_object_t, vm_ooffse #if defined(__mips__) void pmap_align_tlb(vm_offset_t *); #endif +void pmap_bgl_assert_locked(void); +void pmap_bgl_lock(void); +void pmap_bgl_unlock(void); void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t); void pmap_clear_modify(vm_page_t m); void pmap_clear_reference(vm_page_t m); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c (revision 225467) +++ sys/vm/vm_page.c (working copy) @@ -137,14 +137,25 @@ SYSCTL_INT(_vm, OID_AUTO, tryrelock_restart, CTLFL static uma_zone_t fakepg_zone; +static struct mtx pmap_bgl_mtx; + +static void pmap_bgl_initlock(void *dummy); static void vm_page_clear_dirty_mask(vm_page_t m, int pagebits); static void vm_page_queue_remove(int queue, vm_page_t m); static void vm_page_enqueue(int queue, vm_page_t m); static void vm_page_init_fakepg(void *dummy); +SYSINIT(pmap_bgl_lock, SI_SUB_VM, SI_ORDER_FIRST, pmap_bgl_initlock, NULL); SYSINIT(vm_page, SI_SUB_VM, SI_ORDER_SECOND, vm_page_init_fakepg, NULL); static void +pmap_bgl_initlock(void *dummy __unused) +{ + + mtx_init(&pmap_bgl_mtx, "pmap big giant lock", NULL, MTX_DEF); +} + +static void vm_page_init_fakepg(void *dummy) { @@ -152,6 +163,27 @@ vm_page_init_fakepg(void *dummy) NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_NOFREE | UMA_ZONE_VM); } +void +pmap_bgl_lock(void) +{ + + mtx_lock(&pmap_bgl_mtx); +} + +void +pmap_bgl_unlock(void) +{ + + mtx_unlock(&pmap_bgl_mtx); +} + +void +pmap_bgl_assert_locked(void) +{ + + mtx_assert(&pmap_bgl_mtx, MA_OWNED); +} + /* Make sure that u_long is at least 64 bits when PAGE_SIZE is 32K. */ #if PAGE_SIZE == 32768 #ifdef CTASSERT