diff -ru4 --exclude=compile ../sys/alpha/alpha/pmap.c ./alpha/alpha/pmap.c --- ../sys/alpha/alpha/pmap.c Sat May 20 03:08:22 2000 +++ ./alpha/alpha/pmap.c Sun May 21 04:49:37 2000 @@ -237,11 +237,8 @@ */ #define pte_prot(m, p) (protection_codes[m == pmap_kernel() ? 0 : 1][p]) int protection_codes[2][8]; -#define pa_index(pa) atop((pa) - vm_first_phys) -#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) - /* * Return non-zero if this pmap is currently active */ #define pmap_isactive(pmap) (pmap->pm_active) @@ -319,10 +316,8 @@ vm_offset_t avail_end; /* PA of last available physical page */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ -static vm_offset_t vm_first_phys; -static int pv_npg; static vm_object_t kptobj; static int nklev3, nklev2; @@ -350,27 +345,25 @@ * All those kernel PT submaps that BSD is so fond of */ pt_entry_t *CMAP1 = 0; static pt_entry_t *CMAP2; -static pv_table_t *pv_table; caddr_t CADDR1; static caddr_t CADDR2; static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv)); static pv_entry_t get_pv_entry __P((void)); static void alpha_protection_init __P((void)); -static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); +static void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem)); -static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa)); -static void pmap_remove_all __P((vm_offset_t pa)); +static void pmap_remove_all __P((vm_page_t m)); static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, - vm_offset_t pa, vm_page_t mpte)); + vm_page_t m, vm_page_t mpte)); static int pmap_remove_pte __P((pmap_t pmap, pt_entry_t* ptq, vm_offset_t sva)); static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); -static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv, +static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m, vm_offset_t va)); static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va, - vm_page_t mpte, vm_offset_t pa)); + vm_page_t mpte, vm_page_t m)); static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va)); static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p)); @@ -627,47 +620,36 @@ void pmap_init(phys_start, phys_end) vm_offset_t phys_start, phys_end; { - vm_offset_t addr; - vm_size_t s; int i; int initial_pvs; /* - * calculate the number of pv_entries needed - */ - vm_first_phys = phys_avail[0]; - for (i = 0; phys_avail[i + 1]; i += 2); - pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; - - /* * Allocate memory for random pmap data structures. Includes the * pv_head_table. */ - s = (vm_size_t) (sizeof(pv_table_t) * pv_npg); - s = round_page(s); - addr = (vm_offset_t) kmem_alloc(kernel_map, s); - pv_table = (pv_table_t *) addr; - for(i = 0; i < pv_npg; i++) { - vm_offset_t pa; - TAILQ_INIT(&pv_table[i].pv_list); - pv_table[i].pv_list_count = 0; - pa = vm_first_phys + i * PAGE_SIZE; - pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa); - } + for(i = 0; i < vm_page_array_size; i++) { + vm_page_t m; + + m = &vm_page_array[i]; + TAILQ_INIT(&m->md.pv_list); + m->md.pv_list_count = 0; + m->md.pv_flags = 0; + } /* * init the pv free list */ - initial_pvs = pv_npg; + initial_pvs = vm_page_array_size; if (initial_pvs < MINPV) initial_pvs = MINPV; pvzone = &pvzone_store; pvinit = (struct pv_entry *) kmem_alloc(kernel_map, initial_pvs * sizeof (struct pv_entry)); - zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg); + zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, + vm_page_array_size); /* * object for kernel page table pages */ kptobj = vm_object_allocate(OBJT_DEFAULT, NKLEV3MAPS + NKLEV2MAPS); @@ -685,31 +667,13 @@ */ void pmap_init2() { - pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg; + pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); } -/* - * Used to map a range of physical addresses into kernel - * virtual address space. - * - * For now, VM is already on, we only need to map the - * specified memory. - */ -vm_offset_t -pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot) -{ - while (start < end) { - pmap_enter(kernel_pmap, virt, start, prot, FALSE); - virt += PAGE_SIZE; - start += PAGE_SIZE; - } - return (virt); -} - /*************************************************** * Manipulate TLBs for a pmap ***************************************************/ @@ -824,27 +788,8 @@ else return 0; } -/* - * determine if a page is managed (memory vs. device) - */ -static PMAP_INLINE int -pmap_is_managed(pa) - vm_offset_t pa; -{ - int i; - - if (!pmap_initialized) - return 0; - - for (i = 0; phys_avail[i + 1]; i += 2) { - if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) - return 1; - } - return 0; -} - /*************************************************** * Low level mapping routines..... ***************************************************/ @@ -931,8 +876,27 @@ *pte = 0; pmap_invalidate_page(kernel_pmap, va); } +/* + * Used to map a range of physical addresses into kernel + * virtual address space. + * + * For now, VM is already on, we only need to map the + * specified memory. + */ +vm_offset_t +pmap_map(vm_offset_t virt, vm_offset_t start, vm_offset_t end, int prot) +{ + while (start < end) { + pmap_kenter(virt, start); + virt += PAGE_SIZE; + start += PAGE_SIZE; + } + return (virt); +} + + static vm_page_t pmap_page_lookup(vm_object_t object, vm_pindex_t pindex) { vm_page_t m; @@ -1699,11 +1663,9 @@ */ void pmap_collect() { - pv_table_t *ppv; int i; - vm_offset_t pa; vm_page_t m; static int warningdone=0; if (pmap_pagedaemon_waken == 0) @@ -1713,18 +1675,14 @@ printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); warningdone++; } - for(i = 0; i < pv_npg; i++) { - if ((ppv = &pv_table[i]) == 0) - continue; - m = ppv->pv_vm_page; - if ((pa = VM_PAGE_TO_PHYS(m)) == 0) - continue; + for(i = 0; i < vm_page_array_size; i++) { + m = &vm_page_array[i]; if (m->wire_count || m->hold_count || m->busy || - (m->flags & PG_BUSY)) + (m->flags & PG_BUSY)) continue; - pmap_remove_all(pa); + pmap_remove_all(m); } pmap_pagedaemon_waken = 0; } @@ -1736,17 +1694,17 @@ * the entry. In either case we free the now unused entry. */ static int -pmap_remove_entry(pmap_t pmap, pv_table_t* ppv, vm_offset_t va) +pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) { pv_entry_t pv; int rtval; int s; s = splvm(); - if (ppv->pv_list_count < pmap->pm_stats.resident_count) { - for (pv = TAILQ_FIRST(&ppv->pv_list); + if (m->md.pv_list_count < pmap->pm_stats.resident_count) { + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; @@ -1762,12 +1720,12 @@ rtval = 0; if (pv) { rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; - if (TAILQ_FIRST(&ppv->pv_list) == NULL) - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; + if (TAILQ_FIRST(&m->md.pv_list) == NULL) + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); } @@ -1780,26 +1738,23 @@ * Create a pv entry for page at pa for * (pmap, va). */ static void -pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_offset_t pa) +pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m) { int s; pv_entry_t pv; - pv_table_t *ppv; s = splvm(); pv = get_pv_entry(); pv->pv_va = va; pv->pv_pmap = pmap; pv->pv_ptem = mpte; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); - - ppv = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count++; + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count++; splx(s); } @@ -1809,9 +1764,9 @@ static int pmap_remove_pte(pmap_t pmap, pt_entry_t* ptq, vm_offset_t va) { pt_entry_t oldpte; - pv_table_t *ppv; + vm_page_t m; oldpte = *ptq; PMAP_DEBUG_VA(va); *ptq = 0; @@ -1819,10 +1774,10 @@ pmap->pm_stats.wired_count -= 1; pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { - ppv = pa_to_pvh(pmap_pte_pa(&oldpte)); - return pmap_remove_entry(pmap, ppv, va); + m = PHYS_TO_VM_PAGE(pmap_pte_pa(&oldpte)); + return pmap_remove_entry(pmap, m, va); } else { return pmap_unuse_pt(pmap, va, NULL); } @@ -1910,12 +1865,11 @@ * pmap_remove (slow...) */ static void -pmap_remove_all(vm_offset_t pa) +pmap_remove_all(vm_page_t m) { register pv_entry_t pv; - pv_table_t *ppv; pt_entry_t *pte, tpte; int nmodify; int s; @@ -1924,22 +1878,21 @@ /* * XXX this makes pmap_page_protect(NONE) illegal for non-managed * pages! */ - if (!pmap_is_managed(pa)) { + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", pa); } #endif s = splvm(); - ppv = pa_to_pvh(pa); - while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { + while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); pv->pv_pmap->pm_stats.resident_count--; - if (pmap_pte_pa(pte) != pa) - panic("pmap_remove_all: pv_table for %lx is inconsistent", pa); + if (pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(m)) + panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m)); tpte = *pte; PMAP_DEBUG_VA(pv->pv_va); @@ -1949,15 +1902,15 @@ pmap_invalidate_page(pv->pv_pmap, pv->pv_va); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); } - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); splx(s); return; } @@ -2038,11 +1991,12 @@ * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { + vm_offset_t pa; pt_entry_t *pte; vm_offset_t opa; pt_entry_t origpte, newpte; vm_page_t mpte; @@ -2075,9 +2029,9 @@ panic("pmap_enter: invalid kernel page tables pmap=%p, va=0x%lx\n", pmap, va); } origpte = *pte; - pa &= ~PAGE_MASK; + pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK; managed = 0; opa = pmap_pte_pa(pte); /* @@ -2115,14 +2069,14 @@ panic("pmap_enter: pte vanished, va: 0x%lx", va); } /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ - if (pmap_is_managed(pa)) { - pmap_insert_entry(pmap, va, mpte, pa); + if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) { + pmap_insert_entry(pmap, va, mpte, m); managed |= PG_MANAGED; } /* @@ -2138,17 +2092,17 @@ */ newpte = pmap_phys_to_pte(pa) | pte_prot(pmap, prot) | PG_V | managed; if (managed) { - pv_table_t* ppv; + vm_page_t om; /* * Set up referenced/modified emulation for the new mapping */ - ppv = pa_to_pvh(pa); - if ((ppv->pv_flags & PV_TABLE_REF) == 0) + om = PHYS_TO_VM_PAGE(pa); + if ((om->md.pv_flags & PV_TABLE_REF) == 0) newpte |= PG_FOR | PG_FOW | PG_FOE; - else if ((ppv->pv_flags & PV_TABLE_MOD) == 0) + else if ((om->md.pv_flags & PV_TABLE_MOD) == 0) newpte |= PG_FOW; } if (wired) @@ -2179,9 +2133,9 @@ * but is *MUCH* faster than pmap_enter... */ static vm_page_t -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_page_t mpte) +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte) { register pt_entry_t *pte; @@ -2243,14 +2197,14 @@ return 0; } /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ PMAP_DEBUG_VA(va); - pmap_insert_entry(pmap, va, mpte, pa); + pmap_insert_entry(pmap, va, mpte, m); /* * Increment counters */ @@ -2258,9 +2212,9 @@ /* * Now validate mapping with RO protection */ - *pte = pmap_phys_to_pte(pa) | PG_V | PG_KRE | PG_URE | PG_MANAGED; + *pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | PG_KRE | PG_URE | PG_MANAGED; alpha_pal_imb(); /* XXX overkill? */ return mpte; } @@ -2320,10 +2274,9 @@ if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + alpha_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + alpha_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); } objpgs -= 1; @@ -2340,10 +2293,9 @@ if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + alpha_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + alpha_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); } } @@ -2436,10 +2388,9 @@ if ((m->queue - m->pc) == PQ_CACHE) { vm_page_deactivate(m); } vm_page_busy(m); - mpte = pmap_enter_quick(pmap, addr, - VM_PAGE_TO_PHYS(m), mpte); + mpte = pmap_enter_quick(pmap, addr, m, mpte); vm_page_flag_set(m, PG_MAPPED); vm_page_wakeup(m); } } @@ -2574,26 +2525,24 @@ * this routine returns true if a physical page resides * in the given pmap. */ boolean_t -pmap_page_exists(pmap, pa) +pmap_page_exists(pmap, m) pmap_t pmap; - vm_offset_t pa; + vm_page_t m; { register pv_entry_t pv; - pv_table_t *ppv; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; s = splvm(); - ppv = pa_to_pvh(pa); /* * Not found, check current mappings returning immediately if found. */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { if (pv->pv_pmap == pmap) { splx(s); @@ -2618,9 +2567,9 @@ pmap_t pmap; vm_offset_t sva, eva; { pt_entry_t *pte, tpte; - pv_table_t *ppv; + vm_page_t m; pv_entry_t pv, npv; int s; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY @@ -2659,19 +2608,19 @@ } PMAP_DEBUG_VA(pv->pv_va); *pte = 0; - ppv = pa_to_pvh(pmap_pte_pa(&tpte)); + m = PHYS_TO_VM_PAGE(pmap_pte_pa(&tpte)); pv->pv_pmap->pm_stats.resident_count--; npv = TAILQ_NEXT(pv, pv_plist); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - ppv->pv_list_count--; - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - if (TAILQ_FIRST(&ppv->pv_list) == NULL) { - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + m->md.pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + if (TAILQ_FIRST(&m->md.pv_list) == NULL) { + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); } pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); @@ -2683,28 +2632,26 @@ /* * this routine is used to modify bits in ptes */ static void -pmap_changebit(vm_offset_t pa, int bit, boolean_t setem) +pmap_changebit(vm_page_t m, int bit, boolean_t setem) { pv_entry_t pv; - pv_table_t *ppv; pt_entry_t *pte; int changed; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; s = splvm(); changed = 0; - ppv = pa_to_pvh(pa); /* * Loop over all current mappings setting/clearing as appropos If * setting RO do we need to clear the VAC? */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { /* @@ -2746,15 +2693,15 @@ * * Lower the permission for all mappings to a given page. */ void -pmap_page_protect(vm_offset_t phys, vm_prot_t prot) +pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { - pmap_changebit(phys, PG_KWE|PG_UWE, FALSE); + pmap_changebit(m, PG_KWE|PG_UWE, FALSE); } else { - pmap_remove_all(phys); + pmap_remove_all(m); } } } @@ -2771,20 +2718,16 @@ * Return the count of reference bits for a page, clearing all of them. * */ int -pmap_ts_referenced(vm_offset_t pa) +pmap_ts_referenced(vm_page_t m) { - pv_table_t *ppv; - - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return 0; - ppv = pa_to_pvh(pa); - - if (ppv->pv_flags & PV_TABLE_REF) { - pmap_changebit(pa, PG_FOR|PG_FOE|PG_FOW, TRUE); - ppv->pv_flags &= ~PV_TABLE_REF; + if (m->md.pv_flags & PV_TABLE_REF) { + pmap_changebit(m, PG_FOR|PG_FOE|PG_FOW, TRUE); + m->md.pv_flags &= ~PV_TABLE_REF; return 1; } return 0; @@ -2796,36 +2739,29 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t -pmap_is_modified(vm_offset_t pa) +pmap_is_modified(vm_page_t m) { - pv_table_t *ppv; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; - ppv = pa_to_pvh(pa); - - return (ppv->pv_flags & PV_TABLE_MOD) != 0; + return (m->md.pv_flags & PV_TABLE_MOD) != 0; } /* * Clear the modify bits on the specified physical page. */ void -pmap_clear_modify(vm_offset_t pa) +pmap_clear_modify(vm_page_t m) { - pv_table_t *ppv; - - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; - ppv = pa_to_pvh(pa); - - if (ppv->pv_flags & PV_TABLE_MOD) { - pmap_changebit(pa, PG_FOW, TRUE); - ppv->pv_flags &= ~PV_TABLE_MOD; + if (m->md.pv_flags & PV_TABLE_MOD) { + pmap_changebit(m, PG_FOW, TRUE); + m->md.pv_flags &= ~PV_TABLE_MOD; } } /* @@ -2837,32 +2773,25 @@ */ void pmap_page_is_free(vm_page_t m) { - pv_table_t *ppv; - - ppv = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - ppv->pv_flags = 0; + m->md.pv_flags = 0; } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void -pmap_clear_reference(vm_offset_t pa) +pmap_clear_reference(vm_page_t m) { - pv_table_t *ppv; - - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; - ppv = pa_to_pvh(pa); - - if (ppv->pv_flags & PV_TABLE_REF) { - pmap_changebit(pa, PG_FOR|PG_FOE|PG_FOW, TRUE); - ppv->pv_flags &= ~PV_TABLE_REF; + if (m->md.pv_flags & PV_TABLE_REF) { + pmap_changebit(m, PG_FOR|PG_FOE|PG_FOW, TRUE); + m->md.pv_flags &= ~PV_TABLE_REF; } } /* @@ -2875,9 +2804,9 @@ pmap_emulate_reference(struct proc *p, vm_offset_t v, int user, int write) { pt_entry_t faultoff, *pte; vm_offset_t pa; - pv_table_t *ppv; + vm_page_t m; /* * Convert process and virtual address to physical address. */ @@ -2931,18 +2860,18 @@ * The rules: * (1) always mark page as used, and * (2) if it was a write fault, mark page as modified. */ - ppv = pa_to_pvh(pa); - ppv->pv_flags |= PV_TABLE_REF; + m = PHYS_TO_VM_PAGE(pa); + m->md.pv_flags |= PV_TABLE_REF; faultoff = PG_FOR | PG_FOE; - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); + vm_page_flag_set(m, PG_REFERENCED); if (write) { - ppv->pv_flags |= PV_TABLE_MOD; - vm_page_dirty(ppv->pv_vm_page); + m->md.pv_flags |= PV_TABLE_MOD; + vm_page_dirty(m); faultoff |= PG_FOW; } - pmap_changebit(pa, faultoff, FALSE); + pmap_changebit(m, faultoff, FALSE); if ((*pte & faultoff) != 0) { #if 1 /* * XXX dfr - don't think its possible in our pmap @@ -3019,49 +2948,47 @@ vm_offset_t addr; { pt_entry_t *pte; - vm_page_t m; int val = 0; pte = pmap_lev3pte(pmap, addr); if (pte == 0) { return 0; } if (pmap_pte_v(pte)) { - pv_table_t *ppv; + vm_page_t m; vm_offset_t pa; val = MINCORE_INCORE; if ((*pte & PG_MANAGED) == 0) return val; pa = pmap_pte_pa(pte); - ppv = pa_to_pvh(pa); - m = ppv->pv_vm_page; + m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ - if (ppv->pv_flags & PV_TABLE_MOD) + if (m->md.pv_flags & PV_TABLE_MOD) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; /* * Modified by someone */ - else if (m->dirty || pmap_is_modified(pa)) + else if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; /* * Referenced by us */ - if (ppv->pv_flags & PV_TABLE_REF) + if (m->md.pv_flags & PV_TABLE_REF) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; /* * Referenced by someone */ - else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) { + else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } } @@ -3105,16 +3032,18 @@ pmap_active = 0; } vm_offset_t -pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { +pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) +{ return addr; } #if 0 #if defined(PMAP_DEBUG) -pmap_pid_dump(int pid) { +pmap_pid_dump(int pid) +{ pmap_t pmap; struct proc *p; int npte = 0; int index; @@ -3146,9 +3075,9 @@ if (pte && pmap_pte_v(pte)) { vm_offset_t pa; vm_page_t m; pa = *(int *)pte; - m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); + m = PHYS_TO_VM_PAGE(pa); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); npte++; index++; @@ -3170,9 +3099,9 @@ #if defined(DEBUG) static void pads __P((pmap_t pm)); -static void pmap_pvdump __P((vm_offset_t pa)); +static void pmap_pvdump __P((vm_page_t m)); /* print address space of pmap*/ static void pads(pm) @@ -3202,14 +3131,13 @@ static void pmap_pvdump(pa) vm_offset_t pa; { - pv_table_t *ppv; - register pv_entry_t pv; + pv_entry_t pv; printf("pa %x", pa); - ppv = pa_to_pvh(pa); - for (pv = TAILQ_FIRST(&ppv->pv_list); + m = PHYS_TO_VM_PAGE(pa); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { #ifdef used_to_be printf(" -> pmap %x, va %x, flags %x", diff -ru4 --exclude=compile ../sys/alpha/include/pmap.h ./alpha/include/pmap.h --- ../sys/alpha/include/pmap.h Sat May 20 03:08:22 2000 +++ ./alpha/include/pmap.h Sat May 20 00:19:57 2000 @@ -157,14 +157,14 @@ /* * Pmap stuff */ struct pv_entry; -typedef struct { + +struct md_page { int pv_list_count; - struct vm_page *pv_vm_page; int pv_flags; TAILQ_HEAD(,pv_entry) pv_list; -} pv_table_t; +}; #define PV_TABLE_MOD 0x01 /* modified */ #define PV_TABLE_REF 0x02 /* referenced */ diff -ru4 --exclude=compile ../sys/cam/scsi/scsi_da.c ./cam/scsi/scsi_da.c --- ../sys/cam/scsi/scsi_da.c Sat May 20 03:08:24 2000 +++ ./cam/scsi/scsi_da.c Sat May 20 00:19:59 2000 @@ -616,13 +616,11 @@ while (num > 0) { if (is_physical_memory(addr)) { - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(addr), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(addr)); } else { - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(0), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(0)); } xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); csio.ccb_h.ccb_state = DA_CCB_DUMP; diff -ru4 --exclude=compile ../sys/dev/ata/ata-disk.c ./dev/ata/ata-disk.c --- ../sys/dev/ata/ata-disk.c Sat May 20 03:08:26 2000 +++ ./dev/ata/ata-disk.c Sat May 20 00:20:01 2000 @@ -273,13 +273,11 @@ while (count > 0) { DELAY(1000); if (is_physical_memory(addr)) - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(addr), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(addr)); else - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(0), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(0)); bzero(&request, sizeof(struct ad_request)); request.device = adp; request.blockaddr = blkno; diff -ru4 --exclude=compile ../sys/i386/i386/mem.c ./i386/i386/mem.c --- ../sys/i386/i386/mem.c Sat May 20 03:08:31 2000 +++ ./i386/i386/mem.c Sat May 20 00:20:07 2000 @@ -175,18 +175,15 @@ /* minor device 0 is physical memory */ case 0: v = uio->uio_offset; - pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v, - uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, - TRUE); + pmap_kenter((vm_offset_t)ptvmmap, v); o = (int)uio->uio_offset & PAGE_MASK; c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); c = min(c, (u_int)(PAGE_SIZE - o)); c = min(c, (u_int)iov->iov_len); error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); - pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap, - (vm_offset_t)&ptvmmap[PAGE_SIZE]); + pmap_kremove((vm_offset_t)ptvmmap); continue; /* minor device 1 is kernel memory */ case 1: { diff -ru4 --exclude=compile ../sys/i386/i386/pmap.c ./i386/i386/pmap.c --- ../sys/i386/i386/pmap.c Sat May 20 03:08:31 2000 +++ ./i386/i386/pmap.c Sun May 21 04:51:02 2000 @@ -143,23 +143,18 @@ */ #define pte_prot(m, p) (protection_codes[p]) static int protection_codes[8]; -#define pa_index(pa) atop((pa) - vm_first_phys) -#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) - static struct pmap kernel_pmap_store; pmap_t kernel_pmap; vm_offset_t avail_start; /* PA of first available physical page */ vm_offset_t avail_end; /* PA of last available physical page */ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ -static vm_offset_t vm_first_phys; static int pgeflag; /* PG_G or-in */ static int pseflag; /* PG_PS or-in */ -static int pv_npg; static vm_object_t kptobj; static int nkpt; @@ -179,9 +174,8 @@ * All those kernel PT submaps that BSD is so fond of */ pt_entry_t *CMAP1 = 0; static pt_entry_t *CMAP2, *ptmmap; -static pv_table_t *pv_table; caddr_t CADDR1 = 0, ptvmmap = 0; static caddr_t CADDR2; static pt_entry_t *msgbufmap; struct msgbuf *msgbufp=0; @@ -196,23 +190,21 @@ static PMAP_INLINE void free_pv_entry __P((pv_entry_t pv)); static unsigned * get_ptbase __P((pmap_t pmap)); static pv_entry_t get_pv_entry __P((void)); static void i386_protection_init __P((void)); -static __inline void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); -static void pmap_clearbit __P((vm_offset_t pa, int bit)); +static __inline void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem)); -static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa)); -static void pmap_remove_all __P((vm_offset_t pa)); +static void pmap_remove_all __P((vm_page_t m)); static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, - vm_offset_t pa, vm_page_t mpte)); + vm_page_t m, vm_page_t mpte)); static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq, vm_offset_t sva)); static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); -static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv, +static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m, vm_offset_t va)); -static boolean_t pmap_testbit __P((vm_offset_t pa, int bit)); +static boolean_t pmap_testbit __P((vm_page_t m, int bit)); static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va, - vm_page_t mpte, vm_offset_t pa)); + vm_page_t mpte, vm_page_t m)); static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va)); static int pmap_release_free_page __P((pmap_t pmap, vm_page_t p)); @@ -255,9 +247,10 @@ * by using a large (4MB) page for much of the kernel * (.text, .data, .bss) */ static vm_offset_t -pmap_kmem_choose(vm_offset_t addr) { +pmap_kmem_choose(vm_offset_t addr) +{ vm_offset_t newaddr = addr; #ifndef DISABLE_PSE if (cpu_feature & CPUID_PSE) { newaddr = (addr + (NBPDR - 1)) & ~(NBPDR - 1); @@ -487,10 +480,8 @@ void pmap_init(phys_start, phys_end) vm_offset_t phys_start, phys_end; { - vm_offset_t addr; - vm_size_t s; int i; int initial_pvs; /* @@ -498,41 +489,31 @@ */ kptobj = vm_object_allocate(OBJT_DEFAULT, NKPDE); /* - * calculate the number of pv_entries needed - */ - vm_first_phys = phys_avail[0]; - for (i = 0; phys_avail[i + 1]; i += 2); - pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; - - /* * Allocate memory for random pmap data structures. Includes the * pv_head_table. */ - s = (vm_size_t) (sizeof(pv_table_t) * pv_npg); - s = round_page(s); - addr = (vm_offset_t) kmem_alloc(kernel_map, s); - pv_table = (pv_table_t *) addr; - for(i = 0; i < pv_npg; i++) { - vm_offset_t pa; - TAILQ_INIT(&pv_table[i].pv_list); - pv_table[i].pv_list_count = 0; - pa = vm_first_phys + i * PAGE_SIZE; - pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa); + for(i = 0; i < vm_page_array_size; i++) { + vm_page_t m; + + m = &vm_page_array[i]; + TAILQ_INIT(&m->md.pv_list); + m->md.pv_list_count = 0; } /* * init the pv free list */ - initial_pvs = pv_npg; + initial_pvs = vm_page_array_size; if (initial_pvs < MINPV) initial_pvs = MINPV; pvzone = &pvzone_store; pvinit = (struct pv_entry *) kmem_alloc(kernel_map, initial_pvs * sizeof (struct pv_entry)); - zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg); + zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, + vm_page_array_size); /* * Now it is safe to enable pv_table recording. */ @@ -544,36 +525,15 @@ * high water mark so that the system can recover from excessive * numbers of pv entries. */ void -pmap_init2() { - pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg; +pmap_init2() +{ + pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); } -/* - * Used to map a range of physical addresses into kernel - * virtual address space. - * - * For now, VM is already on, we only need to map the - * specified memory. - */ -vm_offset_t -pmap_map(virt, start, end, prot) - vm_offset_t virt; - vm_offset_t start; - vm_offset_t end; - int prot; -{ - while (start < end) { - pmap_enter(kernel_pmap, virt, start, prot, FALSE); - virt += PAGE_SIZE; - start += PAGE_SIZE; - } - return (virt); -} - /*************************************************** * Low level helper routines..... ***************************************************/ @@ -584,9 +544,10 @@ * This code checks for non-writeable/modified pages. * This should be an invalid condition. */ static int -pmap_nw_modified(pt_entry_t ptea) { +pmap_nw_modified(pt_entry_t ptea) +{ int pte; pte = (int) ptea; @@ -602,17 +563,19 @@ * this routine defines the region(s) of memory that should * not be tested for the modified bit. */ static PMAP_INLINE int -pmap_track_modified( vm_offset_t va) { +pmap_track_modified(vm_offset_t va) +{ if ((va < clean_sva) || (va >= clean_eva)) return 1; else return 0; } static PMAP_INLINE void -invltlb_1pg( vm_offset_t va) { +invltlb_1pg(vm_offset_t va) +{ #if defined(I386_CPU) if (cpu_class == CPUCLASS_386) { invltlb(); } else @@ -741,28 +704,8 @@ return 0; } -/* - * determine if a page is managed (memory vs. device) - */ -static PMAP_INLINE int -pmap_is_managed(pa) - vm_offset_t pa; -{ - int i; - - if (!pmap_initialized) - return 0; - - for (i = 0; phys_avail[i + 1]; i += 2) { - if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) - return 1; - } - return 0; -} - - /*************************************************** * Low level mapping routines..... ***************************************************/ @@ -801,8 +744,31 @@ invltlb_1pg(va); /* XXX what about SMP? */ } /* + * Used to map a range of physical addresses into kernel + * virtual address space. + * + * For now, VM is already on, we only need to map the + * specified memory. + */ +vm_offset_t +pmap_map(virt, start, end, prot) + vm_offset_t virt; + vm_offset_t start; + vm_offset_t end; + int prot; +{ + while (start < end) { + pmap_kenter(virt, start); + virt += PAGE_SIZE; + start += PAGE_SIZE; + } + return (virt); +} + + +/* * Add a list of wired pages to the kva * this routine is only used for temporary * kernel mappings that do not need to have * page modification or references recorded. @@ -1077,9 +1043,10 @@ return 0; } static PMAP_INLINE int -pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) { +pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) +{ vm_page_unhold(m); if (m->hold_count == 0) return _pmap_unwire_pte_hold(pmap, m); else @@ -1538,12 +1505,11 @@ * This routine is very drastic, but can save the system * in a pinch. */ void -pmap_collect() { - pv_table_t *ppv; +pmap_collect() +{ int i; - vm_offset_t pa; vm_page_t m; static int warningdone=0; if (pmap_pagedaemon_waken == 0) @@ -1553,18 +1519,14 @@ printf("pmap_collect: collecting pv entries -- suggest increasing PMAP_SHPGPERPROC\n"); warningdone++; } - for(i = 0; i < pv_npg; i++) { - if ((ppv = &pv_table[i]) == 0) - continue; - m = ppv->pv_vm_page; - if ((pa = VM_PAGE_TO_PHYS(m)) == 0) - continue; + for(i = 0; i < vm_page_array_size; i++) { + m = &vm_page_array[i]; if (m->wire_count || m->hold_count || m->busy || - (m->flags & PG_BUSY)) + (m->flags & PG_BUSY)) continue; - pmap_remove_all(pa); + pmap_remove_all(m); } pmap_pagedaemon_waken = 0; } @@ -1576,20 +1538,20 @@ * the entry. In either case we free the now unused entry. */ static int -pmap_remove_entry(pmap, ppv, va) +pmap_remove_entry(pmap, m, va) struct pmap *pmap; - pv_table_t *ppv; + vm_page_t m; vm_offset_t va; { pv_entry_t pv; int rtval; int s; s = splvm(); - if (ppv->pv_list_count < pmap->pm_stats.resident_count) { - for (pv = TAILQ_FIRST(&ppv->pv_list); + if (m->md.pv_list_count < pmap->pm_stats.resident_count) { + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { if (pmap == pv->pv_pmap && va == pv->pv_va) break; @@ -1606,12 +1568,12 @@ rtval = 0; if (pv) { rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; - if (TAILQ_FIRST(&ppv->pv_list) == NULL) - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; + if (TAILQ_FIRST(&m->md.pv_list) == NULL) + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); free_pv_entry(pv); } @@ -1624,30 +1586,27 @@ * Create a pv entry for page at pa for * (pmap, va). */ static void -pmap_insert_entry(pmap, va, mpte, pa) +pmap_insert_entry(pmap, va, mpte, m) pmap_t pmap; vm_offset_t va; vm_page_t mpte; - vm_offset_t pa; + vm_page_t m; { int s; pv_entry_t pv; - pv_table_t *ppv; s = splvm(); pv = get_pv_entry(); pv->pv_va = va; pv->pv_pmap = pmap; pv->pv_ptem = mpte; TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); - - ppv = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count++; + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count++; splx(s); } @@ -1660,9 +1619,9 @@ unsigned *ptq; vm_offset_t va; { unsigned oldpte; - pv_table_t *ppv; + vm_page_t m; oldpte = loadandclear(ptq); if (oldpte & PG_W) pmap->pm_stats.wired_count -= 1; @@ -1673,9 +1632,9 @@ if (oldpte & PG_G) invlpg(va); pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { - ppv = pa_to_pvh(oldpte); + m = PHYS_TO_VM_PAGE(oldpte); if (oldpte & PG_M) { #if defined(PMAP_DIAGNOSTIC) if (pmap_nw_modified((pt_entry_t) oldpte)) { printf( @@ -1683,13 +1642,13 @@ va, oldpte); } #endif if (pmap_track_modified(va)) - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } if (oldpte & PG_A) - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); - return pmap_remove_entry(pmap, ppv, va); + vm_page_flag_set(m, PG_REFERENCED); + return pmap_remove_entry(pmap, m, va); } else { return pmap_unuse_pt(pmap, va, NULL); } @@ -1835,29 +1794,27 @@ * pmap_remove (slow...) */ static void -pmap_remove_all(pa) - vm_offset_t pa; +pmap_remove_all(m) + vm_page_t m; { register pv_entry_t pv; - pv_table_t *ppv; register unsigned *pte, tpte; int s; #if defined(PMAP_DIAGNOSTIC) /* * XXX this makes pmap_page_protect(NONE) illegal for non-managed * pages! */ - if (!pmap_is_managed(pa)) { + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", pa); } #endif s = splvm(); - ppv = pa_to_pvh(pa); - while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { + while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pv->pv_pmap->pm_stats.resident_count--; pte = pmap_pte_quick(pv->pv_pmap, pv->pv_va); @@ -1865,9 +1822,9 @@ if (tpte & PG_W) pv->pv_pmap->pm_stats.wired_count--; if (tpte & PG_A) - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); + vm_page_flag_set(m, PG_REFERENCED); /* * Update the vm_page_t clean and reference bits. */ @@ -1879,20 +1836,20 @@ pv->pv_va, tpte); } #endif if (pmap_track_modified(pv->pv_va)) - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } pmap_TLB_invalidate(pv->pv_pmap, pv->pv_va); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); } - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); splx(s); } @@ -1907,9 +1864,8 @@ vm_offset_t pdnxt, ptpaddr; vm_pindex_t sindex, eindex; int anychanged; - if (pmap == NULL) return; if ((prot & VM_PROT_READ) == VM_PROT_NONE) { @@ -1954,24 +1910,24 @@ for (; sindex != pdnxt; sindex++) { unsigned pbits; - pv_table_t *ppv; + vm_page_t m; pbits = ptbase[sindex]; if (pbits & PG_MANAGED) { - ppv = NULL; + m = NULL; if (pbits & PG_A) { - ppv = pa_to_pvh(pbits); - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); + m = PHYS_TO_VM_PAGE(pbits); + vm_page_flag_set(m, PG_REFERENCED); pbits &= ~PG_A; } if (pbits & PG_M) { if (pmap_track_modified(i386_ptob(sindex))) { - if (ppv == NULL) - ppv = pa_to_pvh(pbits); - vm_page_dirty(ppv->pv_vm_page); + if (m == NULL) + m = PHYS_TO_VM_PAGE(pbits); + vm_page_dirty(m); pbits &= ~PG_M; } } } @@ -2000,11 +1956,12 @@ * or lose information. That is, this routine must actually * insert this page into the given map NOW. */ void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { + vm_offset_t pa; register unsigned *pte; vm_offset_t opa; vm_offset_t origpte, newpte; vm_page_t mpte; @@ -2057,10 +2014,10 @@ panic("pmap_enter: invalid page directory, pdir=%p, va=0x%x\n", (void *)pmap->pm_pdir[PTDPTDI], va); } + pa = VM_PAGE_TO_PHYS(m) & PG_FRAME; origpte = *(vm_offset_t *)pte; - pa &= PG_FRAME; opa = origpte & PG_FRAME; if (origpte & PG_PS) panic("pmap_enter: attempted pmap_enter on 4MB page"); @@ -2113,11 +2070,11 @@ * so we go ahead and sense modify status. */ if (origpte & PG_MANAGED) { if ((origpte & PG_M) && pmap_track_modified(va)) { - pv_table_t *ppv; - ppv = pa_to_pvh(opa); - vm_page_dirty(ppv->pv_vm_page); + vm_page_t om; + om = PHYS_TO_VM_PAGE(opa); + vm_page_dirty(om); } pa |= PG_MANAGED; } goto validate; @@ -2133,14 +2090,14 @@ panic("pmap_enter: pte vanished, va: 0x%x", va); } /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ - if (pmap_is_managed(pa)) { - pmap_insert_entry(pmap, va, mpte, pa); + if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) { + pmap_insert_entry(pmap, va, mpte, m); pa |= PG_MANAGED; } /* @@ -2192,15 +2149,16 @@ * but is *MUCH* faster than pmap_enter... */ static vm_page_t -pmap_enter_quick(pmap, va, pa, mpte) +pmap_enter_quick(pmap, va, m, mpte) register pmap_t pmap; vm_offset_t va; - register vm_offset_t pa; + vm_page_t m; vm_page_t mpte; { - register unsigned *pte; + unsigned *pte; + vm_offset_t pa; /* * In the case that a page table page is not * resident, we are creating it here. @@ -2260,19 +2218,21 @@ return 0; } /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ - pmap_insert_entry(pmap, va, mpte, pa); + pmap_insert_entry(pmap, va, mpte, m); /* * Increment counters */ pmap->pm_stats.resident_count++; + pa = VM_PAGE_TO_PHYS(m); + /* * Now validate mapping with RO protection */ *pte = pa | PG_V | PG_U | PG_MANAGED; @@ -2398,10 +2358,9 @@ if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + i386_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + i386_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); } objpgs -= 1; @@ -2419,10 +2378,9 @@ if ((p->queue - p->pc) == PQ_CACHE) vm_page_deactivate(p); vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + i386_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + i386_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); } } @@ -2515,10 +2473,9 @@ if ((m->queue - m->pc) == PQ_CACHE) { vm_page_deactivate(m); } vm_page_busy(m); - mpte = pmap_enter_quick(pmap, addr, - VM_PAGE_TO_PHYS(m), mpte); + mpte = pmap_enter_quick(pmap, addr, m, mpte); vm_page_flag_set(m, PG_MAPPED); vm_page_wakeup(m); } } @@ -2576,8 +2533,9 @@ vm_offset_t addr; vm_offset_t end_addr = src_addr + len; vm_offset_t pdnxt; unsigned src_frame, dst_frame; + vm_page_t m; if (dst_addr != src_addr) return; @@ -2658,13 +2616,13 @@ * Clear the modified and * accessed (referenced) bits * during the copy. */ + m = PHYS_TO_VM_PAGE(ptetemp); *dst_pte = ptetemp & ~(PG_M | PG_A); dst_pmap->pm_stats.resident_count++; pmap_insert_entry(dst_pmap, addr, - dstmpte, - (ptetemp & PG_FRAME)); + dstmpte, m); } else { pmap_unwire_pte_hold(dst_pmap, dstmpte); } if (dstmpte->hold_count >= srcmpte->hold_count) @@ -2849,26 +2807,24 @@ * this routine returns true if a physical page resides * in the given pmap. */ boolean_t -pmap_page_exists(pmap, pa) +pmap_page_exists(pmap, m) pmap_t pmap; - vm_offset_t pa; + vm_page_t m; { register pv_entry_t pv; - pv_table_t *ppv; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; s = splvm(); - ppv = pa_to_pvh(pa); /* * Not found, check current mappings returning immediately if found. */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { if (pv->pv_pmap == pmap) { splx(s); @@ -2893,11 +2849,11 @@ pmap_t pmap; vm_offset_t sva, eva; { unsigned *pte, tpte; - pv_table_t *ppv; pv_entry_t pv, npv; int s; + vm_page_t m; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY if (!curproc || (pmap != vmspace_pmap(curproc->p_vmspace))) { printf("warning: pmap_remove_pages called with non-current pmap\n"); @@ -2930,30 +2886,30 @@ continue; } *pte = 0; - ppv = pa_to_pvh(tpte); + m = PHYS_TO_VM_PAGE(tpte); - KASSERT(ppv < &pv_table[pv_npg], + KASSERT(m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %x", tpte)); pv->pv_pmap->pm_stats.resident_count--; /* * Update the vm_page_t clean and reference bits. */ if (tpte & PG_M) { - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } npv = TAILQ_NEXT(pv, pv_plist); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - ppv->pv_list_count--; - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - if (TAILQ_FIRST(&ppv->pv_list) == NULL) { - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + m->md.pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + if (TAILQ_FIRST(&m->md.pv_list) == NULL) { + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); } pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); @@ -2967,27 +2923,25 @@ * note that the testbit/changebit routines are inline, * and a lot of things compile-time evaluate. */ static boolean_t -pmap_testbit(pa, bit) - register vm_offset_t pa; +pmap_testbit(m, bit) + vm_page_t m; int bit; { - register pv_entry_t pv; - pv_table_t *ppv; + pv_entry_t pv; unsigned *pte; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; - ppv = pa_to_pvh(pa); - if (TAILQ_FIRST(&ppv->pv_list) == NULL) + if (TAILQ_FIRST(&m->md.pv_list) == NULL) return FALSE; s = splvm(); - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { /* @@ -3019,29 +2973,27 @@ /* * this routine is used to modify bits in ptes */ static __inline void -pmap_changebit(pa, bit, setem) - vm_offset_t pa; +pmap_changebit(m, bit, setem) + vm_page_t m; int bit; boolean_t setem; { register pv_entry_t pv; - pv_table_t *ppv; register unsigned *pte; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; s = splvm(); - ppv = pa_to_pvh(pa); /* * Loop over all current mappings setting/clearing as appropos If * setting RO do we need to clear the VAC? */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { /* @@ -3068,9 +3020,9 @@ vm_offset_t pbits = *(vm_offset_t *)pte; if (pbits & bit) { if (bit == PG_RW) { if (pbits & PG_M) { - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } *(int *)pte = pbits & ~(PG_M|PG_RW); } else { *(int *)pte = pbits & ~bit; @@ -3082,34 +3034,20 @@ splx(s); } /* - * pmap_clearbit: - * - * Clear a bit/bits in every pte mapping a given physical page. Making - * this inline allows the pmap_changebit inline to be well optimized. - */ -static __inline void -pmap_clearbit( - vm_offset_t pa, - int bit) -{ - pmap_changebit(pa, bit, FALSE); -} - -/* * pmap_page_protect: * * Lower the permission for all mappings to a given page. */ void -pmap_page_protect(vm_offset_t phys, vm_prot_t prot) +pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { - pmap_clearbit(phys, PG_RW); + pmap_changebit(m, PG_RW, FALSE); } else { - pmap_remove_all(phys); + pmap_remove_all(m); } } } @@ -3125,33 +3063,30 @@ * * Return the count of reference bits for a page, clearing all of them. */ int -pmap_ts_referenced(vm_offset_t pa) +pmap_ts_referenced(vm_page_t m) { register pv_entry_t pv, pvf, pvn; - pv_table_t *ppv; unsigned *pte; int s; int rtval = 0; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return (rtval); s = splvm(); - ppv = pa_to_pvh(pa); - - if ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { + if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pvf = pv; do { pvn = TAILQ_NEXT(pv, pv_list); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); - TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); if (!pmap_track_modified(pv->pv_va)) continue; @@ -3180,31 +3115,31 @@ * Return whether or not the specified physical page was modified * in any physical maps. */ boolean_t -pmap_is_modified(vm_offset_t pa) +pmap_is_modified(vm_page_t m) { - return pmap_testbit((pa), PG_M); + return pmap_testbit(m, PG_M); } /* * Clear the modify bits on the specified physical page. */ void -pmap_clear_modify(vm_offset_t pa) +pmap_clear_modify(vm_page_t m) { - pmap_clearbit(pa, PG_M); + pmap_changebit(m, PG_M, FALSE); } /* * pmap_clear_reference: * * Clear the reference bit on the specified physical page. */ void -pmap_clear_reference(vm_offset_t pa) +pmap_clear_reference(vm_page_t m) { - pmap_clearbit(pa, PG_A); + pmap_changebit(m, PG_A, FALSE); } /* * Miscellaneous support routines follow @@ -3303,19 +3238,17 @@ return 0; } if ((pte = *ptep) != 0) { - pv_table_t *ppv; vm_offset_t pa; val = MINCORE_INCORE; if ((pte & PG_MANAGED) == 0) return val; pa = pte & PG_FRAME; - ppv = pa_to_pvh((pa & PG_FRAME)); - m = ppv->pv_vm_page; + m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ @@ -3323,9 +3256,9 @@ val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; /* * Modified by someone */ - else if (m->dirty || pmap_is_modified(pa)) + else if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; /* * Referenced by us */ @@ -3334,9 +3267,9 @@ /* * Referenced by someone */ - else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) { + else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); } } @@ -3360,9 +3293,10 @@ load_cr3(p->p_addr->u_pcb.pcb_cr3 = vtophys(pmap->pm_pdir)); } vm_offset_t -pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { +pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) +{ if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) { return addr; } @@ -3372,9 +3306,10 @@ } #if defined(PMAP_DEBUG) -pmap_pid_dump(int pid) { +pmap_pid_dump(int pid) +{ pmap_t pmap; struct proc *p; int npte = 0; int index; @@ -3406,9 +3341,9 @@ if (pte && pmap_pte_v(pte)) { vm_offset_t pa; vm_page_t m; pa = *(int *)pte; - m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); + m = PHYS_TO_VM_PAGE(pa); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); npte++; index++; @@ -3461,14 +3396,14 @@ void pmap_pvdump(pa) vm_offset_t pa; { - pv_table_t *ppv; register pv_entry_t pv; + vm_page_t m; printf("pa %x", pa); - ppv = pa_to_pvh(pa); - for (pv = TAILQ_FIRST(&ppv->pv_list); + m = PHYS_TO_VM_PAGE(pa); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { #ifdef used_to_be printf(" -> pmap %p, va %x, flags %x", diff -ru4 --exclude=compile ../sys/i386/include/pmap.h ./i386/include/pmap.h --- ../sys/i386/include/pmap.h Sat May 20 03:08:32 2000 +++ ./i386/include/pmap.h Sat May 20 00:20:08 2000 @@ -187,13 +187,13 @@ /* * Pmap stuff */ struct pv_entry; -typedef struct { + +struct md_page { int pv_list_count; - struct vm_page *pv_vm_page; TAILQ_HEAD(,pv_entry) pv_list; -} pv_table_t; +}; struct pmap { pd_entry_t *pm_pdir; /* KVA of page directory */ vm_object_t pm_pteobj; /* Container for pte's */ diff -ru4 --exclude=compile ../sys/pc98/pc98/wd.c ./pc98/pc98/wd.c --- ../sys/pc98/pc98/wd.c Sat May 20 03:08:47 2000 +++ ./pc98/pc98/wd.c Sat May 20 00:20:22 2000 @@ -2166,13 +2166,12 @@ return (EIO); } while (blkcnt != 0) { if (is_physical_memory((vm_offset_t)addr)) - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page((vm_offset_t)addr), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, + trunc_page((vm_offset_t)addr)); else - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(0), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(0)); /* Ready to send data? */ DELAY(5); /* ATA spec */ if (wdwait(du, WDCS_READY | WDCS_SEEKCMPLT | WDCS_DRQ, TIMEOUT) Only in ./ufs/ffs: ffs_softdep.c Only in ./ufs/ffs: softdep.h diff -ru4 --exclude=compile ../sys/vm/pmap.h ./vm/pmap.h --- ../sys/vm/pmap.h Sat May 20 03:08:50 2000 +++ ./vm/pmap.h Sun May 21 04:46:52 2000 @@ -93,30 +93,30 @@ #ifdef __alpha__ void pmap_page_is_free __P((vm_page_t m)); #endif void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t)); -void pmap_clear_modify __P((vm_offset_t pa)); -void pmap_clear_reference __P((vm_offset_t pa)); +void pmap_clear_modify __P((vm_page_t m)); +void pmap_clear_reference __P((vm_page_t m)); void pmap_collect __P((void)); void pmap_copy __P((pmap_t, pmap_t, vm_offset_t, vm_size_t, vm_offset_t)); void pmap_copy_page __P((vm_offset_t, vm_offset_t)); void pmap_destroy __P((pmap_t)); -void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, +void pmap_enter __P((pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t)); -vm_offset_t pmap_extract __P((pmap_t, vm_offset_t)); +vm_offset_t pmap_extract __P((pmap_t pmap, vm_offset_t va)); void pmap_growkernel __P((vm_offset_t)); void pmap_init __P((vm_offset_t, vm_offset_t)); -boolean_t pmap_is_modified __P((vm_offset_t pa)); -boolean_t pmap_ts_referenced __P((vm_offset_t pa)); -void pmap_kenter __P((vm_offset_t, vm_offset_t)); +boolean_t pmap_is_modified __P((vm_page_t m)); +boolean_t pmap_ts_referenced __P((vm_page_t m)); +void pmap_kenter __P((vm_offset_t va, vm_offset_t pa)); void pmap_kremove __P((vm_offset_t)); vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int)); void pmap_object_init_pt __P((pmap_t pmap, vm_offset_t addr, vm_object_t object, vm_pindex_t pindex, vm_offset_t size, int pagelimit)); -boolean_t pmap_page_exists __P((pmap_t, vm_offset_t)); -void pmap_page_protect __P((vm_offset_t, vm_prot_t)); +boolean_t pmap_page_exists __P((pmap_t pmap, vm_page_t m)); +void pmap_page_protect __P((vm_page_t m, vm_prot_t prot)); void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t)); vm_offset_t pmap_phys_address __P((int)); void pmap_pinit __P((pmap_t)); @@ -139,9 +139,9 @@ void pmap_swapout_proc __P((struct proc *p)); void pmap_swapin_proc __P((struct proc *p)); void pmap_activate __P((struct proc *p)); vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size)); -void pmap_init2 __P((void)); +void pmap_init2 __P((void)); #endif /* _KERNEL */ #endif /* _PMAP_VM_ */ diff -ru4 --exclude=compile ../sys/vm/swap_pager.c ./vm/swap_pager.c --- ../sys/vm/swap_pager.c Sat May 20 03:08:50 2000 +++ ./vm/swap_pager.c Sat May 20 00:20:25 2000 @@ -1591,9 +1591,9 @@ * vm_page_wakeup(). We do not set reqpage's * valid bits here, it is up to the caller. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); m->valid = VM_PAGE_BITS_ALL; vm_page_undirty(m); vm_page_flag_clear(m, PG_ZERO); @@ -1617,9 +1617,9 @@ * status, then finish the I/O ( which decrements the * busy count and possibly wakes waiter's up ). */ vm_page_protect(m, VM_PROT_READ); - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_undirty(m); vm_page_io_finish(m); } } diff -ru4 --exclude=compile ../sys/vm/vm_fault.c ./vm/vm_fault.c --- ../sys/vm/vm_fault.c Sat May 20 03:08:50 2000 +++ ./vm/vm_fault.c Sat May 20 00:20:25 2000 @@ -825,9 +825,9 @@ vm_page_zero_invalid(fs.m, TRUE); printf("Warning: page %p partially invalid on fault\n", fs.m); } - pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired); + pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired); if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { pmap_prefault(fs.map->pmap, vaddr, fs.entry); } @@ -1074,10 +1074,9 @@ * Enter it in the pmap... */ vm_page_flag_clear(dst_m, PG_ZERO); - pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), - prot, FALSE); + pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE); vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED); /* * Mark it no longer busy, and put it on the active list. diff -ru4 --exclude=compile ../sys/vm/vm_kern.c ./vm/vm_kern.c --- ../sys/vm/vm_kern.c Sat May 20 03:08:50 2000 +++ ./vm/vm_kern.c Sat May 20 00:20:25 2000 @@ -398,10 +398,9 @@ vm_page_wakeup(m); /* * Because this is kernel_pmap, this call will not block. */ - pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m), - VM_PROT_ALL, 1); + pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); } vm_map_unlock(map); diff -ru4 --exclude=compile ../sys/vm/vm_mmap.c ./vm/vm_mmap.c --- ../sys/vm/vm_mmap.c Sat May 20 03:08:50 2000 +++ ./vm/vm_mmap.c Sat May 20 19:28:43 2000 @@ -808,12 +808,12 @@ */ if (m) { mincoreinfo = MINCORE_INCORE; if (m->dirty || - pmap_is_modified(VM_PAGE_TO_PHYS(m))) + pmap_is_modified(m)) mincoreinfo |= MINCORE_MODIFIED_OTHER; if ((m->flags & PG_REFERENCED) || - pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { + pmap_ts_referenced(m)) { vm_page_flag_set(m, PG_REFERENCED); mincoreinfo |= MINCORE_REFERENCED_OTHER; } } diff -ru4 --exclude=compile ../sys/vm/vm_object.c ./vm/vm_object.c --- ../sys/vm/vm_object.c Sat May 20 03:08:50 2000 +++ ./vm/vm_object.c Sat May 20 00:20:25 2000 @@ -866,9 +866,9 @@ * But we do make the page is freeable as we * can without actually taking the step of unmapping * it. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); m->dirty = 0; m->act_count = 0; vm_page_dontneed(m); if (tobject->type == OBJT_SWAP) diff -ru4 --exclude=compile ../sys/vm/vm_page.c ./vm/vm_page.c --- ../sys/vm/vm_page.c Sat May 20 03:08:50 2000 +++ ./vm/vm_page.c Sat May 20 00:20:25 2000 @@ -118,9 +118,9 @@ } } vm_page_t vm_page_array = 0; -static int vm_page_array_size = 0; +int vm_page_array_size = 0; long first_page = 0; int vm_page_zero_count = 0; static __inline int vm_page_hash __P((vm_object_t object, vm_pindex_t pindex)); @@ -142,8 +142,32 @@ panic("vm_set_page_size: page size not a power of two"); } /* + * vm_add_new_page: + * + * Add a new page to the freelist for use by the system. + * Must be called at splhigh(). + */ +vm_page_t +vm_add_new_page(pa) + vm_offset_t pa; +{ + vm_page_t m; + + ++cnt.v_page_count; + ++cnt.v_free_count; + m = PHYS_TO_VM_PAGE(pa); + m->phys_addr = pa; + m->flags = 0; + m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; + m->queue = m->pc + PQ_FREE; + TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); + vm_page_queues[m->queue].lcnt++; + return (m); +} + +/* * vm_page_startup: * * Initializes the resident memory module. * @@ -158,9 +182,8 @@ vm_offset_t enda; register vm_offset_t vaddr; { register vm_offset_t mapped; - register vm_page_t m; register struct vm_page **bucket; vm_size_t npages, page_range; register vm_offset_t new_start; int i; @@ -295,17 +318,9 @@ pa = ptoa(first_managed_page); else pa = phys_avail[i]; while (pa < phys_avail[i + 1] && npages-- > 0) { - ++cnt.v_page_count; - ++cnt.v_free_count; - m = PHYS_TO_VM_PAGE(pa); - m->phys_addr = pa; - m->flags = 0; - m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; - m->queue = m->pc + PQ_FREE; - TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); - vm_page_queues[m->queue].lcnt++; + vm_add_new_page(pa); pa += PAGE_SIZE; } } return (mapped); @@ -1517,9 +1532,9 @@ pagebits = vm_page_bits(base, size); m->valid |= pagebits; m->dirty &= ~pagebits; if (base == 0 && size == PAGE_SIZE) { - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_flag_clear(m, PG_NOSYNC); } } @@ -1648,10 +1663,9 @@ void vm_page_test_dirty(m) vm_page_t m; { - if ((m->dirty != VM_PAGE_BITS_ALL) && - pmap_is_modified(VM_PAGE_TO_PHYS(m))) { + if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { vm_page_dirty(m); } } diff -ru4 --exclude=compile ../sys/vm/vm_page.h ./vm/vm_page.h --- ../sys/vm/vm_page.h Sat May 20 03:08:50 2000 +++ ./vm/vm_page.h Sun May 21 04:54:45 2000 @@ -116,8 +116,9 @@ vm_object_t object; /* which object am I in (O,P)*/ vm_pindex_t pindex; /* offset into object (O,P) */ vm_offset_t phys_addr; /* physical address of page */ + struct md_page md; /* machine dependant stuff */ u_short queue; /* page queue index */ u_short flags, /* see below */ pc; /* page color */ u_short wire_count; /* wired down maps refs (P) */ @@ -277,8 +278,9 @@ extern int vm_page_zero_count; extern vm_page_t vm_page_array; /* First resident page in table */ +extern int vm_page_array_size; /* number of vm_page_t's */ extern long first_page; /* first physical page number */ #define VM_PAGE_TO_PHYS(entry) ((entry)->phys_addr) @@ -395,8 +397,9 @@ vm_page_t vm_page_lookup __P((vm_object_t, vm_pindex_t)); void vm_page_remove __P((vm_page_t)); void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t)); vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t)); +vm_page_t vm_add_new_page __P((vm_offset_t pa)); void vm_page_unwire __P((vm_page_t, int)); void vm_page_wire __P((vm_page_t)); void vm_page_unqueue __P((vm_page_t)); void vm_page_unqueue_nowakeup __P((vm_page_t)); @@ -447,13 +450,13 @@ vm_page_protect(vm_page_t mem, int prot) { if (prot == VM_PROT_NONE) { if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { - pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE); + pmap_page_protect(mem, VM_PROT_NONE); vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); } } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { - pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ); + pmap_page_protect(mem, VM_PROT_READ); vm_page_flag_clear(mem, PG_WRITEABLE); } } diff -ru4 --exclude=compile ../sys/vm/vm_pageout.c ./vm/vm_pageout.c --- ../sys/vm/vm_pageout.c Sat May 20 03:08:50 2000 +++ ./vm/vm_pageout.c Sun May 21 04:55:26 2000 @@ -394,9 +394,9 @@ * Page outside of range of object. Right now we * essentially lose the changes by pretending it * worked. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); + pmap_clear_modify(mt); vm_page_undirty(mt); break; case VM_PAGER_ERROR: case VM_PAGER_FAIL: @@ -474,14 +474,14 @@ if (p->wire_count != 0 || p->hold_count != 0 || p->busy != 0 || (p->flags & PG_BUSY) || - !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { + !pmap_page_exists(vm_map_pmap(map), p)) { p = next; continue; } - actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); + actcount = pmap_ts_referenced(p); if (actcount) { vm_page_flag_set(p, PG_REFERENCED); } else if (p->flags & PG_REFERENCED) { actcount = 1; @@ -708,9 +708,9 @@ * references. */ if (m->object->ref_count == 0) { vm_page_flag_clear(m, PG_REFERENCED); - pmap_clear_reference(VM_PAGE_TO_PHYS(m)); + pmap_clear_reference(m); /* * Otherwise, if the page has been referenced while in the * inactive queue, we bump the "activation count" upwards, @@ -720,9 +720,9 @@ * level VM system not knowing anything about existing * references. */ } else if (((m->flags & PG_REFERENCED) == 0) && - (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { + (actcount = pmap_ts_referenced(m))) { vm_page_activate(m); m->act_count += (actcount + ACT_ADVANCE); continue; } @@ -734,9 +734,9 @@ * likely place pages back onto the inactive queue again. */ if ((m->flags & PG_REFERENCED) != 0) { vm_page_flag_clear(m, PG_REFERENCED); - actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount = pmap_ts_referenced(m); vm_page_activate(m); m->act_count += (actcount + ACT_ADVANCE + 1); continue; } @@ -986,9 +986,9 @@ if (m->object->ref_count != 0) { if (m->flags & PG_REFERENCED) { actcount += 1; } - actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount += pmap_ts_referenced(m); if (actcount) { m->act_count += ACT_ADVANCE + actcount; if (m->act_count > ACT_MAX) m->act_count = ACT_MAX; @@ -1198,9 +1198,9 @@ vm_page_flag_clear(m, PG_REFERENCED); actcount += 1; } - actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount += pmap_ts_referenced(m); if (actcount) { m->act_count += ACT_ADVANCE + actcount; if (m->act_count > ACT_MAX) m->act_count = ACT_MAX; diff -ru4 --exclude=compile ../sys/vm/vnode_pager.c ./vm/vnode_pager.c --- ../sys/vm/vnode_pager.c Sat May 20 03:08:50 2000 +++ ./vm/vnode_pager.c Sat May 20 00:20:25 2000 @@ -451,9 +451,9 @@ bzero((caddr_t) kva + i * bsize, bsize); } } vm_pager_unmap_page(kva); - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_flag_clear(m, PG_ZERO); if (error) { return VM_PAGER_ERROR; } @@ -514,9 +514,9 @@ bzero((caddr_t) kva + count, PAGE_SIZE - count); } vm_pager_unmap_page(kva); } - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_undirty(m); vm_page_flag_clear(m, PG_ZERO); if (!error) m->valid = VM_PAGE_BITS_ALL; @@ -781,9 +781,9 @@ * Read filled up entire page. */ mt->valid = VM_PAGE_BITS_ALL; vm_page_undirty(mt); /* should be an assert? XXX */ - pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); + pmap_clear_modify(mt); } else { /* * Read did not fill up entire page. Since this * is getpages, the page may be mapped, so we have