Index: sys/sparc64/sparc64/pmap.c =================================================================== --- sys/sparc64/sparc64/pmap.c (revision 225638) +++ sys/sparc64/sparc64/pmap.c (working copy) @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -188,6 +189,11 @@ extern int tl1_immu_miss_patch_tsb_mask_1[]; extern int tl1_immu_miss_patch_tsb_mask_2[]; /* + * MD page read/write lock. + */ +struct rwlock md_page_rwlock; + +/* * If user pmap is processed with pmap_remove and with pmap_remove and the * resident count drops to 0, there are no more pages to remove, so we * need not continue. @@ -671,16 +677,19 @@ pmap_bootstrap(u_int cpu_impl) /* * Initialize the kernel pmap (which is statically allocated). - * NOTE: PMAP_LOCK_INIT() is needed as part of the initialization - * but sparc64 start up is not ready to initialize mutexes yet. - * It is called in machdep.c. */ pm = kernel_pmap; for (i = 0; i < MAXCPU; i++) pm->pm_context[i] = TLB_CTX_KERNEL; CPU_FILL(&pm->pm_active); + PMAP_LOCK_INIT(pm); /* + * Initialize the MD page lock. + */ + rw_init(&md_page_rwlock, "MD page"); + + /* * Flush all non-locked TLB entries possibly left over by the * firmware. */ @@ -890,7 +899,7 @@ pmap_cache_enter(vm_page_t m, vm_offset_t va) struct tte *tp; int color; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + MDPAGE_ASSERT_WLOCKED(); KASSERT((m->flags & PG_FICTITIOUS) == 0, ("pmap_cache_enter: fake page")); PMAP_STATS_INC(pmap_ncache_enter); @@ -965,7 +974,7 @@ pmap_cache_remove(vm_page_t m, vm_offset_t va) struct tte *tp; int color; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + MDPAGE_ASSERT_WLOCKED(); CTR3(KTR_PMAP, "pmap_cache_remove: m=%p va=%#lx c=%d", m, va, m->md.colors[DCACHE_COLOR(va)]); KASSERT((m->flags & PG_FICTITIOUS) == 0, @@ -1040,7 +1049,7 @@ pmap_kenter(vm_offset_t va, vm_page_t m) vm_page_t om; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + MDPAGE_ASSERT_WLOCKED(); PMAP_STATS_INC(pmap_nkenter); tp = tsb_kvtotte(va); CTR4(KTR_PMAP, "pmap_kenter: va=%#lx pa=%#lx tp=%p data=%#lx", @@ -1102,7 +1111,7 @@ pmap_kremove(vm_offset_t va) struct tte *tp; vm_page_t m; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + MDPAGE_ASSERT_WLOCKED(); PMAP_STATS_INC(pmap_nkremove); tp = tsb_kvtotte(va); CTR3(KTR_PMAP, "pmap_kremove: va=%#lx tp=%p data=%#lx", va, tp, @@ -1153,19 +1162,16 @@ void pmap_qenter(vm_offset_t sva, vm_page_t *m, int count) { vm_offset_t va; - int locked; PMAP_STATS_INC(pmap_nqenter); va = sva; - if (!(locked = mtx_owned(&vm_page_queue_mtx))) - vm_page_lock_queues(); + MDPAGE_WLOCK(); while (count-- > 0) { pmap_kenter(va, *m); va += PAGE_SIZE; m++; } - if (!locked) - vm_page_unlock_queues(); + MDPAGE_WUNLOCK(); tlb_range_demap(kernel_pmap, sva, va); } @@ -1177,18 +1183,15 @@ void pmap_qremove(vm_offset_t sva, int count) { vm_offset_t va; - int locked; PMAP_STATS_INC(pmap_nqremove); va = sva; - if (!(locked = mtx_owned(&vm_page_queue_mtx))) - vm_page_lock_queues(); + MDPAGE_WLOCK(); while (count-- > 0) { pmap_kremove(va); va += PAGE_SIZE; } - if (!locked) - vm_page_unlock_queues(); + MDPAGE_WUNLOCK(); tlb_range_demap(kernel_pmap, sva, va); } @@ -1329,7 +1332,7 @@ pmap_remove_tte(struct pmap *pm, struct pmap *pm2, vm_page_t m; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + MDPAGE_ASSERT_WLOCKED(); data = atomic_readandclear_long(&tp->tte_data); if ((data & TD_FAKE) == 0) { m = PHYS_TO_VM_PAGE(TD_PA(data)); @@ -1366,7 +1369,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offse pm->pm_context[curcpu], start, end); if (PMAP_REMOVE_DONE(pm)) return; - vm_page_lock_queues(); + MDPAGE_WLOCK(); PMAP_LOCK(pm); if (end - start > PMAP_TSB_THRESH) { tsb_foreach(pm, NULL, start, end, pmap_remove_tte); @@ -1379,7 +1382,7 @@ pmap_remove(pmap_t pm, vm_offset_t start, vm_offse tlb_range_demap(pm, start, end - 1); } PMAP_UNLOCK(pm); - vm_page_unlock_queues(); + MDPAGE_WUNLOCK(); } void @@ -1392,7 +1395,7 @@ pmap_remove_all(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_remove_all: page %p is not managed", m)); - vm_page_lock_queues(); + MDPAGE_WLOCK(); for (tp = TAILQ_FIRST(&m->md.tte_list); tp != NULL; tp = tpn) { tpn = TAILQ_NEXT(tp, tte_link); if ((tp->tte_data & TD_PV) == 0) @@ -1414,8 +1417,8 @@ pmap_remove_all(vm_page_t m) TTE_ZERO(tp); PMAP_UNLOCK(pm); } + MDPAGE_WUNLOCK(); vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); } static int @@ -1477,11 +1480,11 @@ pmap_enter(pmap_t pm, vm_offset_t va, vm_prot_t ac vm_prot_t prot, boolean_t wired) { - vm_page_lock_queues(); + MDPAGE_WLOCK(); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot, wired); - vm_page_unlock_queues(); PMAP_UNLOCK(pm); + MDPAGE_WUNLOCK(); } /* @@ -1500,7 +1503,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_pa vm_page_t real; u_long data; - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + MDPAGE_ASSERT_WLOCKED(); PMAP_LOCK_ASSERT(pm, MA_OWNED); KASSERT((m->oflags & (VPO_UNMANAGED | VPO_BUSY)) != 0 || VM_OBJECT_LOCKED(m->object), @@ -1511,6 +1514,7 @@ pmap_enter_locked(pmap_t pm, vm_offset_t va, vm_pa /* * If this is a fake page from the device_pager, but it covers actual * physical memory, convert to the real backing page. + */ if ((m->flags & PG_FICTITIOUS) != 0) { real = vm_phys_paddr_to_vm_page(pa); @@ -1643,27 +1647,27 @@ pmap_enter_object(pmap_t pm, vm_offset_t start, vm psize = atop(end - start); m = m_start; - vm_page_lock_queues(); + MDPAGE_WLOCK(); PMAP_LOCK(pm); while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { pmap_enter_locked(pm, start + ptoa(diff), m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); m = TAILQ_NEXT(m, listq); } - vm_page_unlock_queues(); PMAP_UNLOCK(pm); + MDPAGE_WUNLOCK(); } void pmap_enter_quick(pmap_t pm, vm_offset_t va, vm_page_t m, vm_prot_t prot) { - vm_page_lock_queues(); + MDPAGE_WLOCK(); PMAP_LOCK(pm); pmap_enter_locked(pm, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), FALSE); - vm_page_unlock_queues(); PMAP_UNLOCK(pm); + MDPAGE_WUNLOCK(); } void @@ -1708,6 +1712,8 @@ pmap_copy_tte(pmap_t src_pmap, pmap_t dst_pmap, st vm_page_t m; u_long data; + MDPAGE_ASSERT_WLOCKED(); + if ((tp->tte_data & TD_FAKE) != 0) return (1); if (tsb_tte_lookup(dst_pmap, va) == NULL) { @@ -1728,7 +1734,7 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off if (dst_addr != src_addr) return; - vm_page_lock_queues(); + MDPAGE_WLOCK(); if (dst_pmap < src_pmap) { PMAP_LOCK(dst_pmap); PMAP_LOCK(src_pmap); @@ -1746,9 +1752,9 @@ pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_off pmap_copy_tte(src_pmap, dst_pmap, tp, va); tlb_range_demap(dst_pmap, src_addr, src_addr + len - 1); } - vm_page_unlock_queues(); PMAP_UNLOCK(src_pmap); PMAP_UNLOCK(dst_pmap); + MDPAGE_WUNLOCK(); } void @@ -1945,7 +1951,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m) ("pmap_page_exists_quick: page %p is not managed", m)); loops = 0; rv = FALSE; - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -1956,7 +1962,7 @@ pmap_page_exists_quick(pmap_t pm, vm_page_t m) if (++loops >= 16) break; } - vm_page_unlock_queues(); + MDPAGE_RUNLOCK(); return (rv); } @@ -1973,11 +1979,11 @@ pmap_page_wired_mappings(vm_page_t m) count = 0; if ((m->oflags & VPO_UNMANAGED) != 0) return (count); - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) if ((tp->tte_data & (TD_PV | TD_WIRED)) == (TD_PV | TD_WIRED)) count++; - vm_page_unlock_queues(); + MDPAGE_RUNLOCK(); return (count); } @@ -2004,13 +2010,13 @@ pmap_page_is_mapped(vm_page_t m) rv = FALSE; if ((m->oflags & VPO_UNMANAGED) != 0) return (rv); - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) if ((tp->tte_data & TD_PV) != 0) { rv = TRUE; break; } - vm_page_unlock_queues(); + MDPAGE_RUNLOCK(); return (rv); } @@ -2036,7 +2042,7 @@ pmap_ts_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); count = 0; - vm_page_lock_queues(); + MDPAGE_WLOCK(); if ((tp = TAILQ_FIRST(&m->md.tte_list)) != NULL) { tpf = tp; do { @@ -2050,7 +2056,7 @@ pmap_ts_referenced(vm_page_t m) break; } while ((tp = tpn) != NULL && tp != tpf); } - vm_page_unlock_queues(); + MDPAGE_WUNLOCK(); return (count); } @@ -2073,7 +2079,7 @@ pmap_is_modified(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return (rv); - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2082,7 +2088,7 @@ pmap_is_modified(vm_page_t m) break; } } - vm_page_unlock_queues(); + MDPAGE_RUNLOCK(); return (rv); } @@ -2116,7 +2122,7 @@ pmap_is_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); rv = FALSE; - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2125,7 +2131,7 @@ pmap_is_referenced(vm_page_t m) break; } } - vm_page_unlock_queues(); + MDPAGE_RUNLOCK(); return (rv); } @@ -2148,7 +2154,7 @@ pmap_clear_modify(vm_page_t m) */ if ((m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2156,7 +2162,7 @@ pmap_clear_modify(vm_page_t m) if ((data & TD_W) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } - vm_page_unlock_queues(); + MDPAGE_RUNLOCK(); } void @@ -2167,7 +2173,7 @@ pmap_clear_reference(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2175,7 +2181,7 @@ pmap_clear_reference(vm_page_t m) if ((data & TD_REF) != 0) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } - vm_page_unlock_queues(); + MDPAGE_RUNLOCK(); } void @@ -2196,7 +2202,7 @@ pmap_remove_write(vm_page_t m) if ((m->oflags & VPO_BUSY) == 0 && (m->aflags & PGA_WRITEABLE) == 0) return; - vm_page_lock_queues(); + MDPAGE_RLOCK(); TAILQ_FOREACH(tp, &m->md.tte_list, tte_link) { if ((tp->tte_data & TD_PV) == 0) continue; @@ -2206,8 +2212,8 @@ pmap_remove_write(vm_page_t m) tlb_page_demap(TTE_GET_PMAP(tp), TTE_GET_VA(tp)); } } + MDPAGE_RUNLOCK(); vm_page_aflag_clear(m, PGA_WRITEABLE); - vm_page_unlock_queues(); } int Index: sys/sparc64/sparc64/machdep.c =================================================================== --- sys/sparc64/sparc64/machdep.c (revision 225639) +++ sys/sparc64/sparc64/machdep.c (working copy) @@ -597,11 +597,6 @@ sparc64_init(caddr_t mdp, u_long o1, u_long o2, u_ wrpr(pil, 0, 0); wrpr(pstate, 0, PSTATE_KERNEL); - /* - * Finish pmap initialization now that we're ready for mutexes. - */ - PMAP_LOCK_INIT(kernel_pmap); - OF_getprop(root, "name", sparc64_model, sizeof(sparc64_model) - 1); kdb_init(); Index: sys/sparc64/sparc64/tsb.c =================================================================== --- sys/sparc64/sparc64/tsb.c (revision 225638) +++ sys/sparc64/sparc64/tsb.c (working copy) @@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -131,7 +132,7 @@ tsb_tte_enter(pmap_t pm, vm_page_t m, vm_offset_t PMAP_STATS_INC(tsb_nenter_u_oc); } - mtx_assert(&vm_page_queue_mtx, MA_OWNED); + MDPAGE_ASSERT_WLOCKED(); PMAP_LOCK_ASSERT(pm, MA_OWNED); if (pm == kernel_pmap) { PMAP_STATS_INC(tsb_nenter_k); Index: sys/sparc64/include/pmap.h =================================================================== --- sys/sparc64/include/pmap.h (revision 225638) +++ sys/sparc64/include/pmap.h (working copy) @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -78,6 +79,12 @@ struct pmap { #define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) +#define MDPAGE_RLOCK() rw_rlock(&md_page_rwlock) +#define MDPAGE_WLOCK() rw_wlock(&md_page_rwlock) +#define MDPAGE_RUNLOCK() rw_runlock(&md_page_rwlock) +#define MDPAGE_WUNLOCK() rw_wunlock(&md_page_rwlock) +#define MDPAGE_ASSERT_WLOCKED() rw_assert(&md_page_rwlock, RA_WLOCKED) + #define pmap_page_get_memattr(m) VM_MEMATTR_DEFAULT #define pmap_page_set_memattr(m, ma) (void)0 @@ -101,6 +108,7 @@ void pmap_set_kctx(void); extern struct pmap kernel_pmap_store; #define kernel_pmap (&kernel_pmap_store) +extern struct rwlock md_page_rwlock; extern vm_paddr_t phys_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end;