Index: arm/include/pmap.h =================================================================== RCS file: /home/ncvs/src/sys/arm/include/pmap.h,v retrieving revision 1.8 diff -u -p -r1.8 pmap.h --- arm/include/pmap.h 26 Feb 2005 18:59:01 -0000 1.8 +++ arm/include/pmap.h 17 Mar 2005 21:03:34 -0000 @@ -143,6 +143,7 @@ struct pmap { int pm_count; /* reference count */ int pm_active; /* active on cpus */ struct pmap_statistics pm_stats; /* pmap statictics */ + TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ LIST_ENTRY(pmap) pm_list; /* List of all pmaps */ }; @@ -163,6 +164,7 @@ typedef struct pv_entry { pmap_t pv_pmap; /* pmap where mapping lies */ vm_offset_t pv_va; /* virtual address for mapping */ TAILQ_ENTRY(pv_entry) pv_list; + TAILQ_ENTRY(pv_entry) pv_plist; int pv_flags; /* flags (wired, etc...) */ } *pv_entry_t; Index: arm/arm/pmap.c =================================================================== RCS file: /home/ncvs/src/sys/arm/arm/pmap.c,v retrieving revision 1.26 diff -u -p -r1.26 pmap.c --- arm/arm/pmap.c 16 Mar 2005 23:56:29 -0000 1.26 +++ arm/arm/pmap.c 17 Mar 2005 23:45:29 -0000 @@ -402,7 +402,7 @@ int pmap_needs_pte_sync; /* * Data for the pv entry allocation mechanism */ -#define MINPV 1024 +#define MINPV 2048 #ifndef PMAP_SHPGPERPROC #define PMAP_SHPGPERPROC 200 @@ -1538,7 +1538,6 @@ pmap_clearbit(struct vm_page *pg, u_int pm = pv->pv_pmap; oflags = pv->pv_flags; pv->pv_flags &= ~maskbits; - pmap_update(pv->pv_pmap); #if 0 pmap_acquire_pmap_lock(pm); @@ -1706,6 +1705,7 @@ pmap_enter_pv(struct vm_page *pg, struct mtx_lock(&pg->md.pvh_mtx); #endif TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); + TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); if (pm == pmap_kernel()) { if (flags & PVF_WRITE) @@ -1779,6 +1779,7 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t { TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); + TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); if (pve->pv_flags & PVF_WIRED) --pm->pm_stats.wired_count; pg->md.pv_list_count--; @@ -1792,8 +1793,13 @@ pmap_nuke_pv(struct vm_page *pg, pmap_t pg->md.urw_mappings--; else pg->md.uro_mappings--; - if (TAILQ_FIRST(&pg->md.pv_list) == NULL) + if (TAILQ_FIRST(&pg->md.pv_list) == NULL || + (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0)) { + pg->md.pvh_attrs &= ~PVF_MOD; vm_page_flag_clear(pg, PG_WRITEABLE); + } + if (pve->pv_flags & PVF_WRITE) + pmap_vac_me_harder(pg, pm, pve->pv_va); } static struct pv_entry * @@ -1869,6 +1875,11 @@ pmap_modify_pv(struct vm_page *pg, pmap_ pg->md.uro_mappings++; pg->md.urw_mappings--; } + if (pg->md.krw_mappings == 0 && pg->md.urw_mappings == 0) { + pg->md.pvh_attrs &= ~PVF_MOD; + vm_page_flag_clear(pg, PG_WRITEABLE); + } + pmap_vac_me_harder(pg, pm, 0); } return (oflags); @@ -2772,10 +2783,36 @@ pmap_page_protect(vm_page_t m, vm_prot_t void pmap_remove_pages(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) { - - cpu_idcache_wbinv_all(); - cpu_tlb_flushID(); - cpu_cpwait(); + struct pv_entry *pv, *npv; + struct l2_bucket *l2b = NULL; + vm_page_t m; + pt_entry_t *pt; + + vm_page_lock_queues(); + for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { + if (pv->pv_va >= eva || pv->pv_va < sva) { + npv = TAILQ_NEXT(pv, pv_plist); + continue; + } + if (pv->pv_flags & PVF_WIRED) { + /* The page is wired, cannot remove it now. */ + npv = TAILQ_NEXT(pv, pv_plist); + continue; + } + pmap->pm_stats.resident_count--; + l2b = pmap_get_l2_bucket(pmap, pv->pv_va); + KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); + pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; + m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK); + *pt = 0; + PTE_SYNC(pt); + npv = TAILQ_NEXT(pv, pv_plist); + if (pv->pv_flags & PVF_MOD) + vm_page_dirty(m); + pmap_nuke_pv(m, pmap, pv); + pmap_free_pv_entry(pv); + } + vm_page_unlock_queues(); } @@ -3060,12 +3097,10 @@ pmap_remove_all(vm_page_t m) if (TAILQ_EMPTY(&m->md.pv_list)) return; curpm = vmspace_pmap(curproc->p_vmspace); - pmap_update(curpm); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { if (flush == FALSE && (pv->pv_pmap == curpm || pv->pv_pmap == pmap_kernel())) flush = TRUE; - pmap_update(pv->pv_pmap); l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); KASSERT(l2b != NULL, ("No l2 bucket")); ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; @@ -3121,7 +3156,6 @@ pmap_protect(pmap_t pm, vm_offset_t sva, mtx_lock(&Giant); - pmap_update(pm); /* * OK, at this point, we know we're doing write-protect operation. * If the pmap is active, write-back the range. @@ -3131,6 +3165,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; flags = 0; + vm_page_lock_queues(); while (sva < eva) { next_bucket = L2_NEXT_BUCKET(sva); if (next_bucket > eva) @@ -3185,6 +3220,7 @@ pmap_protect(pmap_t pm, vm_offset_t sva, if (PV_BEEN_REFD(flags)) pmap_tlb_flushD(pm); } + vm_page_unlock_queues(); mtx_unlock(&Giant); } @@ -3215,7 +3251,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, u_int oflags; vm_paddr_t pa; - pmap_update(pmap); vm_page_lock_queues(); if (va == vector_page) { pa = systempage.pv_pa; @@ -3271,9 +3306,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, nflags |= PVF_REF; - if (((prot & VM_PROT_WRITE) != 0 && - m && ((m->flags & PG_WRITEABLE) || - (m->md.pvh_attrs & PVF_MOD) != 0))) { + if (m && ((prot & VM_PROT_WRITE) != 0 || + (m->md.pvh_attrs & PVF_MOD))) { /* * This is a writable mapping, and the * page's mod state indicates it has @@ -3281,6 +3315,8 @@ pmap_enter(pmap_t pmap, vm_offset_t va, * writable from the outset. */ nflags |= PVF_MOD; + if (!(m->md.pvh_attrs & PVF_MOD)) + vm_page_dirty(m); } } else { /* @@ -3332,7 +3368,6 @@ pmap_enter(pmap_t pmap, vm_offset_t va, pve = pmap_remove_pv(opg, pmap, va); if (!m) pmap_free_pv_entry(pve); - pmap_vac_me_harder(opg, pmap, 0); KASSERT(pve != NULL, ("No pv")); #if 0 simple_unlock(&opg->mdpage.pvh_slock); @@ -3361,7 +3396,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, if ((pve = pmap_get_pv_entry()) == NULL) { panic("pmap_enter: no pv entries"); } - if (m) + if (m && ((m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) == 0)) pmap_enter_pv(m, pve, pmap, va, nflags); } /* @@ -3444,7 +3479,7 @@ pmap_enter_quick(pmap_t pmap, vm_offset_ VM_OBJECT_UNLOCK(m->object); mtx_lock(&Giant); pmap_enter(pmap, va, m, VM_PROT_READ|VM_PROT_EXECUTE, FALSE); - pmap_dcache_wbinv_all(pmap); /* XXX: shouldn't be needed */ + pmap_dcache_wbinv_all(pmap); /* XXX: Shouldn't be needed */ mtx_unlock(&Giant); VM_OBJECT_LOCK(m->object); vm_page_lock_queues(); @@ -3616,22 +3651,6 @@ pmap_extract_and_hold(pmap_t pmap, vm_of return (m); } -void -pmap_update(pmap_t pm) -{ - - if (pmap_is_current(pm)) { - /* - * If we're dealing with a current userland pmap, move its L1 - * to the end of the LRU. - */ - if (pm != pmap_kernel()) - pmap_use_l1(pm); - } - -} - - /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. @@ -3649,12 +3668,12 @@ pmap_pinit(pmap_t pmap) pmap->pm_count = 1; pmap->pm_active = 0; + TAILQ_INIT(&pmap->pm_pvlist); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); pmap->pm_stats.resident_count = 1; if (vector_page < KERNBASE) { pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), VM_PROT_READ, 1); - pmap_update(pmap); } } @@ -3726,7 +3745,6 @@ pmap_remove(pmap_t pm, vm_offset_t sva, #endif vm_page_lock_queues(); - pmap_update(pm); if (!pmap_is_current(pm)) { cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; } else @@ -3864,15 +3882,13 @@ pmap_remove(pmap_t pm, vm_offset_t sva, PTE_SYNC_CURRENT(pm, cleanlist[cnt].pte); } - /* - * If it looks like we're removing a whole bunch - * of mappings, it's faster to just write-back - * the whole cache now and defer TLB flushes until - * pmap_update() is called. - */ if (total <= PMAP_REMOVE_CLEAN_LIST_SIZE) cleanlist_idx = 0; else { + /* + * We are removing so much entries it's just + * easier to flush the whole cache. + */ cleanlist_idx = PMAP_REMOVE_CLEAN_LIST_SIZE + 1; pmap_idcache_wbinv_all(pm); flushall = 1; @@ -4080,7 +4096,6 @@ pmap_clean_page(struct pv_entry *pv, boo for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { if (npv->pv_pmap == pmap_kernel() || npv->pv_pmap == pm) { flags |= npv->pv_flags; - pmap_update(npv->pv_pmap); /* * The page is mapped non-cacheable in * this map. No need to flush the cache.