diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/alpha/alpha/pmap.c ./alpha/alpha/pmap.c --- ../sys/alpha/alpha/pmap.c Sun May 7 00:38:18 2000 +++ ./alpha/alpha/pmap.c Fri May 12 20:43:05 2000 @@ -239,7 +239,4 @@ int protection_codes[2][8]; -#define pa_index(pa) atop((pa) - vm_first_phys) -#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) - /* * Return non-zero if this pmap is currently active @@ -321,6 +318,4 @@ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ -static vm_offset_t vm_first_phys; -static int pv_npg; static vm_object_t kptobj; @@ -352,5 +347,4 @@ pt_entry_t *CMAP1 = 0; static pt_entry_t *CMAP2; -static pv_table_t *pv_table; caddr_t CADDR1; static caddr_t CADDR2; @@ -359,16 +353,15 @@ static pv_entry_t get_pv_entry __P((void)); static void alpha_protection_init __P((void)); -static void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); +static void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem)); -static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa)); -static void pmap_remove_all __P((vm_offset_t pa)); +static void pmap_remove_all __P((vm_page_t m)); static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, - vm_offset_t pa, vm_page_t mpte)); + vm_page_t m, vm_page_t mpte)); static int pmap_remove_pte __P((pmap_t pmap, pt_entry_t* ptq, vm_offset_t sva)); static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); -static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv, +static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m, vm_offset_t va)); static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va, - vm_page_t mpte, vm_offset_t pa)); + vm_page_t mpte, vm_page_t m)); static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va)); @@ -629,37 +622,25 @@ vm_offset_t phys_start, phys_end; { - vm_offset_t addr; - vm_size_t s; int i; int initial_pvs; /* - * calculate the number of pv_entries needed - */ - vm_first_phys = phys_avail[0]; - for (i = 0; phys_avail[i + 1]; i += 2); - pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; - - /* * Allocate memory for random pmap data structures. Includes the * pv_head_table. */ - s = (vm_size_t) (sizeof(pv_table_t) * pv_npg); - s = round_page(s); - addr = (vm_offset_t) kmem_alloc(kernel_map, s); - pv_table = (pv_table_t *) addr; - for(i = 0; i < pv_npg; i++) { - vm_offset_t pa; - TAILQ_INIT(&pv_table[i].pv_list); - pv_table[i].pv_list_count = 0; - pa = vm_first_phys + i * PAGE_SIZE; - pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa); - } + for(i = 0; i < vm_page_array_size; i++) { + vm_page_t m; + + m = &vm_page_array[i]; + TAILQ_INIT(&m->md.pv_list); + m->md.pv_list_count = 0; + m->md.pv_flags = 0; + } /* * init the pv free list */ - initial_pvs = pv_npg; + initial_pvs = vm_page_array_size; if (initial_pvs < MINPV) initial_pvs = MINPV; @@ -667,5 +648,6 @@ pvinit = (struct pv_entry *) kmem_alloc(kernel_map, initial_pvs * sizeof (struct pv_entry)); - zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg); + zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, + vm_page_array_size); /* * object for kernel page table pages @@ -687,5 +669,5 @@ pmap_init2() { - pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg; + pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); @@ -703,5 +685,5 @@ { while (start < end) { - pmap_enter(kernel_pmap, virt, start, prot, FALSE); + pmap_kenterp(virt, start); virt += PAGE_SIZE; start += PAGE_SIZE; @@ -826,23 +808,4 @@ } -/* - * determine if a page is managed (memory vs. device) - */ -static PMAP_INLINE int -pmap_is_managed(pa) - vm_offset_t pa; -{ - int i; - - if (!pmap_initialized) - return 0; - - for (i = 0; phys_avail[i + 1]; i += 2) { - if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) - return 1; - } - return 0; -} - /*************************************************** @@ -919,4 +882,11 @@ } +void +pmap_kenterp(vm_offset_t va, vm_offset_t pa) +{ + /* XXX placeholder, dyson did other stuff here */ + pmap_kenter(va, pa); +} + /* * remove a page from the kernel pagetables @@ -1701,7 +1671,5 @@ pmap_collect() { - pv_table_t *ppv; int i; - vm_offset_t pa; vm_page_t m; static int warningdone=0; @@ -1715,14 +1683,12 @@ } - for(i = 0; i < pv_npg; i++) { - if ((ppv = &pv_table[i]) == 0) - continue; - m = ppv->pv_vm_page; - if ((pa = VM_PAGE_TO_PHYS(m)) == 0) - continue; + for(i = 0; i < vm_page_array_size; i++) { + m = &vm_page_array[i]; if (m->wire_count || m->hold_count || m->busy || - (m->flags & PG_BUSY)) + (m->flags & PG_BUSY)) continue; - pmap_remove_all(pa); +/* XXX dyson m->m_flags |= PG_BUSY */ + pmap_remove_all(m); +/* XXX dyson vm_page_wakeup(m); */ } pmap_pagedaemon_waken = 0; @@ -1738,5 +1704,5 @@ static int -pmap_remove_entry(pmap_t pmap, pv_table_t* ppv, vm_offset_t va) +pmap_remove_entry(pmap_t pmap, vm_page_t m, vm_offset_t va) { pv_entry_t pv; @@ -1745,6 +1711,6 @@ s = splvm(); - if (ppv->pv_list_count < pmap->pm_stats.resident_count) { - for (pv = TAILQ_FIRST(&ppv->pv_list); + if (m->md.pv_list_count < pmap->pm_stats.resident_count) { + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { @@ -1764,8 +1730,8 @@ if (pv) { rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; - if (TAILQ_FIRST(&ppv->pv_list) == NULL) - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; + if (TAILQ_FIRST(&m->md.pv_list) == NULL) + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); @@ -1782,10 +1748,9 @@ */ static void -pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_offset_t pa) +pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_page_t mpte, vm_page_t m) { int s; pv_entry_t pv; - pv_table_t *ppv; s = splvm(); @@ -1796,8 +1761,6 @@ TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); - - ppv = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count++; + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count++; splx(s); @@ -1811,5 +1774,5 @@ { pt_entry_t oldpte; - pv_table_t *ppv; + vm_page_t m; oldpte = *ptq; @@ -1821,6 +1784,6 @@ pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { - ppv = pa_to_pvh(pmap_pte_pa(&oldpte)); - return pmap_remove_entry(pmap, ppv, va); + m = PHYS_TO_VM_PAGE(pmap_pte_pa(&oldpte)); + return pmap_remove_entry(pmap, m, va); } else { return pmap_unuse_pt(pmap, va, NULL); @@ -1912,8 +1875,7 @@ static void -pmap_remove_all(vm_offset_t pa) +pmap_remove_all(vm_page_t m) { register pv_entry_t pv; - pv_table_t *ppv; pt_entry_t *pte, tpte; int nmodify; @@ -1926,5 +1888,5 @@ * pages! */ - if (!pmap_is_managed(pa)) { + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%lx", pa); } @@ -1932,12 +1894,11 @@ s = splvm(); - ppv = pa_to_pvh(pa); - while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { + while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pte = pmap_lev3pte(pv->pv_pmap, pv->pv_va); pv->pv_pmap->pm_stats.resident_count--; - if (pmap_pte_pa(pte) != pa) - panic("pmap_remove_all: pv_table for %lx is inconsistent", pa); + if (pmap_pte_pa(pte) != VM_PAGE_TO_PHYS(m)) + panic("pmap_remove_all: pv_table for %lx is inconsistent", VM_PAGE_TO_PHYS(m)); tpte = *pte; @@ -1951,11 +1912,11 @@ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); } - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); splx(s); @@ -2040,7 +2001,8 @@ */ void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { + vm_offset_t pa; pt_entry_t *pte; vm_offset_t opa; @@ -2077,5 +2039,5 @@ origpte = *pte; - pa &= ~PAGE_MASK; + pa = VM_PAGE_TO_PHYS(m) & ~PAGE_MASK; managed = 0; opa = pmap_pte_pa(pte); @@ -2117,10 +2079,10 @@ /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ - if (pmap_is_managed(pa)) { - pmap_insert_entry(pmap, va, mpte, pa); + if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) { + pmap_insert_entry(pmap, va, mpte, m); managed |= PG_MANAGED; } @@ -2140,13 +2102,13 @@ if (managed) { - pv_table_t* ppv; + vm_page_t om; /* * Set up referenced/modified emulation for the new mapping */ - ppv = pa_to_pvh(pa); - if ((ppv->pv_flags & PV_TABLE_REF) == 0) + om = PHYS_TO_VM_PAGE(pa); + if ((om->md.pv_flags & PV_TABLE_REF) == 0) newpte |= PG_FOR | PG_FOW | PG_FOE; - else if ((ppv->pv_flags & PV_TABLE_MOD) == 0) + else if ((om->md.pv_flags & PV_TABLE_MOD) == 0) newpte |= PG_FOW; } @@ -2181,5 +2143,5 @@ static vm_page_t -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_page_t mpte) +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t mpte) { register pt_entry_t *pte; @@ -2245,10 +2207,10 @@ /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ PMAP_DEBUG_VA(va); - pmap_insert_entry(pmap, va, mpte, pa); + pmap_insert_entry(pmap, va, mpte, m); /* @@ -2260,5 +2222,5 @@ * Now validate mapping with RO protection */ - *pte = pmap_phys_to_pte(pa) | PG_V | PG_KRE | PG_URE | PG_MANAGED; + *pte = pmap_phys_to_pte(VM_PAGE_TO_PHYS(m)) | PG_V | PG_KRE | PG_URE | PG_MANAGED; alpha_pal_imb(); /* XXX overkill? */ @@ -2322,6 +2284,5 @@ vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + alpha_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + alpha_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); @@ -2342,6 +2303,5 @@ vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + alpha_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + alpha_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); @@ -2438,6 +2398,5 @@ } vm_page_busy(m); - mpte = pmap_enter_quick(pmap, addr, - VM_PAGE_TO_PHYS(m), mpte); + mpte = pmap_enter_quick(pmap, addr, m, mpte); vm_page_flag_set(m, PG_MAPPED); vm_page_wakeup(m); @@ -2576,22 +2535,20 @@ */ boolean_t -pmap_page_exists(pmap, pa) +pmap_page_exists(pmap, m) pmap_t pmap; - vm_offset_t pa; + vm_page_t m; { register pv_entry_t pv; - pv_table_t *ppv; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; s = splvm(); - ppv = pa_to_pvh(pa); /* * Not found, check current mappings returning immediately if found. */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { @@ -2620,5 +2577,5 @@ { pt_entry_t *pte, tpte; - pv_table_t *ppv; + vm_page_t m; pv_entry_t pv, npv; int s; @@ -2661,5 +2618,5 @@ *pte = 0; - ppv = pa_to_pvh(pmap_pte_pa(&tpte)); + m = PHYS_TO_VM_PAGE(pmap_pte_pa(&tpte)); pv->pv_pmap->pm_stats.resident_count--; @@ -2668,8 +2625,8 @@ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - ppv->pv_list_count--; - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - if (TAILQ_FIRST(&ppv->pv_list) == NULL) { - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + m->md.pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + if (TAILQ_FIRST(&m->md.pv_list) == NULL) { + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); } @@ -2685,18 +2642,16 @@ */ static void -pmap_changebit(vm_offset_t pa, int bit, boolean_t setem) +pmap_changebit(vm_page_t m, int bit, boolean_t setem) { pv_entry_t pv; - pv_table_t *ppv; pt_entry_t *pte; int changed; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; s = splvm(); changed = 0; - ppv = pa_to_pvh(pa); /* @@ -2704,5 +2659,5 @@ * setting RO do we need to clear the VAC? */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { @@ -2748,11 +2703,11 @@ */ void -pmap_page_protect(vm_offset_t phys, vm_prot_t prot) +pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { - pmap_changebit(phys, PG_KWE|PG_UWE, FALSE); + pmap_changebit(m, PG_KWE|PG_UWE, FALSE); } else { - pmap_remove_all(phys); + pmap_remove_all(m); } } @@ -2773,16 +2728,12 @@ */ int -pmap_ts_referenced(vm_offset_t pa) +pmap_ts_referenced(vm_page_t m) { - pv_table_t *ppv; - - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return 0; - ppv = pa_to_pvh(pa); - - if (ppv->pv_flags & PV_TABLE_REF) { - pmap_changebit(pa, PG_FOR|PG_FOE|PG_FOW, TRUE); - ppv->pv_flags &= ~PV_TABLE_REF; + if (m->md.pv_flags & PV_TABLE_REF) { + pmap_changebit(m, PG_FOR|PG_FOE|PG_FOW, TRUE); + m->md.pv_flags &= ~PV_TABLE_REF; return 1; } @@ -2798,14 +2749,11 @@ */ boolean_t -pmap_is_modified(vm_offset_t pa) +pmap_is_modified(vm_page_t m) { - pv_table_t *ppv; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; - ppv = pa_to_pvh(pa); - - return (ppv->pv_flags & PV_TABLE_MOD) != 0; + return (m->md.pv_flags & PV_TABLE_MOD) != 0; } @@ -2814,16 +2762,12 @@ */ void -pmap_clear_modify(vm_offset_t pa) +pmap_clear_modify(vm_page_t m) { - pv_table_t *ppv; - - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; - ppv = pa_to_pvh(pa); - - if (ppv->pv_flags & PV_TABLE_MOD) { - pmap_changebit(pa, PG_FOW, TRUE); - ppv->pv_flags &= ~PV_TABLE_MOD; + if (m->md.pv_flags & PV_TABLE_MOD) { + pmap_changebit(m, PG_FOW, TRUE); + m->md.pv_flags &= ~PV_TABLE_MOD; } } @@ -2839,8 +2783,5 @@ pmap_page_is_free(vm_page_t m) { - pv_table_t *ppv; - - ppv = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - ppv->pv_flags = 0; + m->md.pv_flags = 0; } @@ -2851,16 +2792,12 @@ */ void -pmap_clear_reference(vm_offset_t pa) +pmap_clear_reference(vm_page_t m) { - pv_table_t *ppv; - - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; - ppv = pa_to_pvh(pa); - - if (ppv->pv_flags & PV_TABLE_REF) { - pmap_changebit(pa, PG_FOR|PG_FOE|PG_FOW, TRUE); - ppv->pv_flags &= ~PV_TABLE_REF; + if (m->md.pv_flags & PV_TABLE_REF) { + pmap_changebit(m, PG_FOR|PG_FOE|PG_FOW, TRUE); + m->md.pv_flags &= ~PV_TABLE_REF; } } @@ -2877,5 +2814,5 @@ pt_entry_t faultoff, *pte; vm_offset_t pa; - pv_table_t *ppv; + vm_page_t m; /* @@ -2933,14 +2870,14 @@ * (2) if it was a write fault, mark page as modified. */ - ppv = pa_to_pvh(pa); - ppv->pv_flags |= PV_TABLE_REF; + m = PHYS_TO_VM_PAGE(pa); + m->md.pv_flags |= PV_TABLE_REF; faultoff = PG_FOR | PG_FOE; - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); + vm_page_flag_set(m, PG_REFERENCED); if (write) { - ppv->pv_flags |= PV_TABLE_MOD; - vm_page_dirty(ppv->pv_vm_page); + m->md.pv_flags |= PV_TABLE_MOD; + vm_page_dirty(m); faultoff |= PG_FOW; } - pmap_changebit(pa, faultoff, FALSE); + pmap_changebit(m, faultoff, FALSE); if ((*pte & faultoff) != 0) { #if 1 @@ -3021,5 +2958,4 @@ pt_entry_t *pte; - vm_page_t m; int val = 0; @@ -3030,5 +2966,5 @@ if (pmap_pte_v(pte)) { - pv_table_t *ppv; + vm_page_t m; vm_offset_t pa; @@ -3039,21 +2975,20 @@ pa = pmap_pte_pa(pte); - ppv = pa_to_pvh(pa); - m = ppv->pv_vm_page; + m = PHYS_TO_VM_PAGE(pa); /* * Modified by us */ - if (ppv->pv_flags & PV_TABLE_MOD) + if (m->md.pv_flags & PV_TABLE_MOD) val |= MINCORE_MODIFIED|MINCORE_MODIFIED_OTHER; /* * Modified by someone */ - else if (m->dirty || pmap_is_modified(pa)) + else if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; /* * Referenced by us */ - if (ppv->pv_flags & PV_TABLE_REF) + if (m->md.pv_flags & PV_TABLE_REF) val |= MINCORE_REFERENCED|MINCORE_REFERENCED_OTHER; @@ -3061,5 +2996,5 @@ * Referenced by someone */ - else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) { + else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); @@ -3107,5 +3042,6 @@ vm_offset_t -pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { +pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) +{ return addr; @@ -3114,5 +3050,6 @@ #if 0 #if defined(PMAP_DEBUG) -pmap_pid_dump(int pid) { +pmap_pid_dump(int pid) +{ pmap_t pmap; struct proc *p; @@ -3148,5 +3085,5 @@ vm_page_t m; pa = *(int *)pte; - m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); + m = PHYS_TO_VM_PAGE(pa); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); @@ -3172,5 +3109,5 @@ static void pads __P((pmap_t pm)); -static void pmap_pvdump __P((vm_offset_t pa)); +static void pmap_pvdump __P((vm_page_t m)); /* print address space of pmap*/ @@ -3204,10 +3141,9 @@ vm_offset_t pa; { - pv_table_t *ppv; - register pv_entry_t pv; + pv_entry_t pv; printf("pa %x", pa); - ppv = pa_to_pvh(pa); - for (pv = TAILQ_FIRST(&ppv->pv_list); + m = PHYS_TO_VM_PAGE(pa); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/alpha/include/pmap.h ./alpha/include/pmap.h --- ../sys/alpha/include/pmap.h Wed Dec 29 02:57:44 1999 +++ ./alpha/include/pmap.h Fri May 12 18:18:18 2000 @@ -159,10 +159,10 @@ */ struct pv_entry; -typedef struct { + +struct md_page { int pv_list_count; - struct vm_page *pv_vm_page; int pv_flags; TAILQ_HEAD(,pv_entry) pv_list; -} pv_table_t; +}; #define PV_TABLE_MOD 0x01 /* modified */ diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/cam/scsi/scsi_da.c ./cam/scsi/scsi_da.c --- ../sys/cam/scsi/scsi_da.c Fri May 5 06:38:58 2000 +++ ./cam/scsi/scsi_da.c Wed May 10 19:05:00 2000 @@ -618,9 +618,7 @@ if (is_physical_memory(addr)) { - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(addr), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(addr)); } else { - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(0), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(0)); } diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/conf/files ./conf/files --- ../sys/conf/files Tue May 16 00:37:14 2000 +++ ./conf/files Tue May 16 14:06:29 2000 @@ -881,4 +881,5 @@ vm/default_pager.c standard vm/device_pager.c standard +vm/phys_pager.c standard vm/swap_pager.c standard vm/vm_fault.c standard diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/dev/ata/ata-disk.c ./dev/ata/ata-disk.c --- ../sys/dev/ata/ata-disk.c Fri May 5 06:38:58 2000 +++ ./dev/ata/ata-disk.c Fri May 12 18:18:25 2000 @@ -275,9 +275,7 @@ DELAY(1000); if (is_physical_memory(addr)) - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(addr), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(addr)); else - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(0), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(0)); bzero(&request, sizeof(struct ad_request)); diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/i386/i386/mem.c ./i386/i386/mem.c --- ../sys/i386/i386/mem.c Mon Apr 24 12:41:40 2000 +++ ./i386/i386/mem.c Wed May 10 18:46:28 2000 @@ -177,7 +177,5 @@ case 0: v = uio->uio_offset; - pmap_enter(kernel_pmap, (vm_offset_t)ptvmmap, v, - uio->uio_rw == UIO_READ ? VM_PROT_READ : VM_PROT_WRITE, - TRUE); + pmap_kenter((vm_offset_t)ptvmmap, v); o = (int)uio->uio_offset & PAGE_MASK; c = (u_int)(PAGE_SIZE - ((int)iov->iov_base & PAGE_MASK)); @@ -185,6 +183,5 @@ c = min(c, (u_int)iov->iov_len); error = uiomove((caddr_t)&ptvmmap[o], (int)c, uio); - pmap_remove(kernel_pmap, (vm_offset_t)ptvmmap, - (vm_offset_t)&ptvmmap[PAGE_SIZE]); + pmap_kremove((vm_offset_t)ptvmmap); continue; diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/i386/i386/pmap.c ./i386/i386/pmap.c --- ../sys/i386/i386/pmap.c Thu Apr 20 12:41:43 2000 +++ ./i386/i386/pmap.c Thu May 11 02:02:00 2000 @@ -145,7 +145,4 @@ static int protection_codes[8]; -#define pa_index(pa) atop((pa) - vm_first_phys) -#define pa_to_pvh(pa) (&pv_table[pa_index(pa)]) - static struct pmap kernel_pmap_store; pmap_t kernel_pmap; @@ -156,8 +153,6 @@ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ static boolean_t pmap_initialized = FALSE; /* Has pmap_init completed? */ -static vm_offset_t vm_first_phys; static int pgeflag; /* PG_G or-in */ static int pseflag; /* PG_PS or-in */ -static int pv_npg; static vm_object_t kptobj; @@ -181,5 +176,4 @@ pt_entry_t *CMAP1 = 0; static pt_entry_t *CMAP2, *ptmmap; -static pv_table_t *pv_table; caddr_t CADDR1 = 0, ptvmmap = 0; static caddr_t CADDR2; @@ -198,19 +192,17 @@ static pv_entry_t get_pv_entry __P((void)); static void i386_protection_init __P((void)); -static __inline void pmap_changebit __P((vm_offset_t pa, int bit, boolean_t setem)); -static void pmap_clearbit __P((vm_offset_t pa, int bit)); +static __inline void pmap_changebit __P((vm_page_t m, int bit, boolean_t setem)); -static PMAP_INLINE int pmap_is_managed __P((vm_offset_t pa)); -static void pmap_remove_all __P((vm_offset_t pa)); +static void pmap_remove_all __P((vm_page_t m)); static vm_page_t pmap_enter_quick __P((pmap_t pmap, vm_offset_t va, - vm_offset_t pa, vm_page_t mpte)); + vm_page_t m, vm_page_t mpte)); static int pmap_remove_pte __P((struct pmap *pmap, unsigned *ptq, vm_offset_t sva)); static void pmap_remove_page __P((struct pmap *pmap, vm_offset_t va)); -static int pmap_remove_entry __P((struct pmap *pmap, pv_table_t *pv, +static int pmap_remove_entry __P((struct pmap *pmap, vm_page_t m, vm_offset_t va)); -static boolean_t pmap_testbit __P((vm_offset_t pa, int bit)); +static boolean_t pmap_testbit __P((vm_page_t m, int bit)); static void pmap_insert_entry __P((pmap_t pmap, vm_offset_t va, - vm_page_t mpte, vm_offset_t pa)); + vm_page_t mpte, vm_page_t m)); static vm_page_t pmap_allocpte __P((pmap_t pmap, vm_offset_t va)); @@ -257,5 +249,6 @@ */ static vm_offset_t -pmap_kmem_choose(vm_offset_t addr) { +pmap_kmem_choose(vm_offset_t addr) +{ vm_offset_t newaddr = addr; #ifndef DISABLE_PSE @@ -489,6 +482,4 @@ vm_offset_t phys_start, phys_end; { - vm_offset_t addr; - vm_size_t s; int i; int initial_pvs; @@ -500,25 +491,14 @@ /* - * calculate the number of pv_entries needed - */ - vm_first_phys = phys_avail[0]; - for (i = 0; phys_avail[i + 1]; i += 2); - pv_npg = (phys_avail[(i - 2) + 1] - vm_first_phys) / PAGE_SIZE; - - /* * Allocate memory for random pmap data structures. Includes the * pv_head_table. */ - s = (vm_size_t) (sizeof(pv_table_t) * pv_npg); - s = round_page(s); - addr = (vm_offset_t) kmem_alloc(kernel_map, s); - pv_table = (pv_table_t *) addr; - for(i = 0; i < pv_npg; i++) { - vm_offset_t pa; - TAILQ_INIT(&pv_table[i].pv_list); - pv_table[i].pv_list_count = 0; - pa = vm_first_phys + i * PAGE_SIZE; - pv_table[i].pv_vm_page = PHYS_TO_VM_PAGE(pa); + for(i = 0; i < vm_page_array_size; i++) { + vm_page_t m; + + m = &vm_page_array[i]; + TAILQ_INIT(&m->md.pv_list); + m->md.pv_list_count = 0; } @@ -526,5 +506,5 @@ * init the pv free list */ - initial_pvs = pv_npg; + initial_pvs = vm_page_array_size; if (initial_pvs < MINPV) initial_pvs = MINPV; @@ -532,5 +512,6 @@ pvinit = (struct pv_entry *) kmem_alloc(kernel_map, initial_pvs * sizeof (struct pv_entry)); - zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, pv_npg); + zbootinit(pvzone, "PV ENTRY", sizeof (struct pv_entry), pvinit, + vm_page_array_size); /* @@ -546,6 +527,7 @@ */ void -pmap_init2() { - pv_entry_max = PMAP_SHPGPERPROC * maxproc + pv_npg; +pmap_init2() +{ + pv_entry_max = PMAP_SHPGPERPROC * maxproc + vm_page_array_size; pv_entry_high_water = 9 * (pv_entry_max / 10); zinitna(pvzone, &pvzone_obj, NULL, 0, pv_entry_max, ZONE_INTERRUPT, 1); @@ -567,5 +549,5 @@ { while (start < end) { - pmap_enter(kernel_pmap, virt, start, prot, FALSE); + pmap_kenterp(virt, start); virt += PAGE_SIZE; start += PAGE_SIZE; @@ -586,5 +568,6 @@ */ static int -pmap_nw_modified(pt_entry_t ptea) { +pmap_nw_modified(pt_entry_t ptea) +{ int pte; @@ -604,5 +587,6 @@ */ static PMAP_INLINE int -pmap_track_modified( vm_offset_t va) { +pmap_track_modified( vm_offset_t va) +{ if ((va < clean_sva) || (va >= clean_eva)) return 1; @@ -612,5 +596,6 @@ static PMAP_INLINE void -invltlb_1pg( vm_offset_t va) { +invltlb_1pg( vm_offset_t va) +{ #if defined(I386_CPU) if (cpu_class == CPUCLASS_386) { @@ -743,24 +728,4 @@ } -/* - * determine if a page is managed (memory vs. device) - */ -static PMAP_INLINE int -pmap_is_managed(pa) - vm_offset_t pa; -{ - int i; - - if (!pmap_initialized) - return 0; - - for (i = 0; phys_avail[i + 1]; i += 2) { - if (pa < phys_avail[i + 1] && pa >= phys_avail[i]) - return 1; - } - return 0; -} - - /*************************************************** * Low level mapping routines..... @@ -788,4 +753,13 @@ } +void +pmap_kenterp(va, pa) + vm_offset_t va; + vm_offset_t pa; +{ + /* XXX dyson does special stuff here */ + pmap_kenter(va, pa); +} + /* * remove a page from the kernel pagetables @@ -1079,5 +1053,6 @@ static PMAP_INLINE int -pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) { +pmap_unwire_pte_hold(pmap_t pmap, vm_page_t m) +{ vm_page_unhold(m); if (m->hold_count == 0) @@ -1540,8 +1515,7 @@ */ void -pmap_collect() { - pv_table_t *ppv; +pmap_collect() +{ int i; - vm_offset_t pa; vm_page_t m; static int warningdone=0; @@ -1555,14 +1529,12 @@ } - for(i = 0; i < pv_npg; i++) { - if ((ppv = &pv_table[i]) == 0) - continue; - m = ppv->pv_vm_page; - if ((pa = VM_PAGE_TO_PHYS(m)) == 0) - continue; + for(i = 0; i < vm_page_array_size; i++) { + m = &vm_page_array[i]; if (m->wire_count || m->hold_count || m->busy || - (m->flags & PG_BUSY)) + (m->flags & PG_BUSY)) continue; - pmap_remove_all(pa); +/* XXX dyson m->m_flags |= PG_BUSY */ + pmap_remove_all(m); +/* XXX dyson vm_page_wakeup(m); */ } pmap_pagedaemon_waken = 0; @@ -1578,7 +1550,7 @@ static int -pmap_remove_entry(pmap, ppv, va) +pmap_remove_entry(pmap, m, va) struct pmap *pmap; - pv_table_t *ppv; + vm_page_t m; vm_offset_t va; { @@ -1588,6 +1560,6 @@ s = splvm(); - if (ppv->pv_list_count < pmap->pm_stats.resident_count) { - for (pv = TAILQ_FIRST(&ppv->pv_list); + if (m->md.pv_list_count < pmap->pm_stats.resident_count) { + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { @@ -1608,8 +1580,8 @@ rtval = pmap_unuse_pt(pmap, va, pv->pv_ptem); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; - if (TAILQ_FIRST(&ppv->pv_list) == NULL) - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; + if (TAILQ_FIRST(&m->md.pv_list) == NULL) + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); TAILQ_REMOVE(&pmap->pm_pvlist, pv, pv_plist); @@ -1626,14 +1598,13 @@ */ static void -pmap_insert_entry(pmap, va, mpte, pa) +pmap_insert_entry(pmap, va, mpte, m) pmap_t pmap; vm_offset_t va; vm_page_t mpte; - vm_offset_t pa; + vm_page_t m; { int s; pv_entry_t pv; - pv_table_t *ppv; s = splvm(); @@ -1644,8 +1615,6 @@ TAILQ_INSERT_TAIL(&pmap->pm_pvlist, pv, pv_plist); - - ppv = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count++; + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count++; splx(s); @@ -1662,5 +1631,5 @@ { unsigned oldpte; - pv_table_t *ppv; + vm_page_t m; oldpte = loadandclear(ptq); @@ -1675,5 +1644,5 @@ pmap->pm_stats.resident_count -= 1; if (oldpte & PG_MANAGED) { - ppv = pa_to_pvh(oldpte); + m = PHYS_TO_VM_PAGE(oldpte); if (oldpte & PG_M) { #if defined(PMAP_DIAGNOSTIC) @@ -1685,9 +1654,9 @@ #endif if (pmap_track_modified(va)) - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } if (oldpte & PG_A) - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); - return pmap_remove_entry(pmap, ppv, va); + vm_page_flag_set(m, PG_REFERENCED); + return pmap_remove_entry(pmap, m, va); } else { return pmap_unuse_pt(pmap, va, NULL); @@ -1837,9 +1806,8 @@ static void -pmap_remove_all(pa) - vm_offset_t pa; +pmap_remove_all(m) + vm_page_t m; { register pv_entry_t pv; - pv_table_t *ppv; register unsigned *pte, tpte; int s; @@ -1850,5 +1818,5 @@ * pages! */ - if (!pmap_is_managed(pa)) { + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) { panic("pmap_page_protect: illegal for unmanaged page, va: 0x%x", pa); } @@ -1856,6 +1824,5 @@ s = splvm(); - ppv = pa_to_pvh(pa); - while ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { + while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pv->pv_pmap->pm_stats.resident_count--; @@ -1867,5 +1834,5 @@ if (tpte & PG_A) - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); + vm_page_flag_set(m, PG_REFERENCED); /* @@ -1881,16 +1848,16 @@ #endif if (pmap_track_modified(pv->pv_va)) - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } pmap_TLB_invalidate(pv->pv_pmap, pv->pv_va); TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - ppv->pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + m->md.pv_list_count--; pmap_unuse_pt(pv->pv_pmap, pv->pv_va, pv->pv_ptem); free_pv_entry(pv); } - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); splx(s); @@ -1909,5 +1876,4 @@ int anychanged; - if (pmap == NULL) return; @@ -1956,20 +1922,20 @@ unsigned pbits; - pv_table_t *ppv; + vm_page_t m; pbits = ptbase[sindex]; if (pbits & PG_MANAGED) { - ppv = NULL; + m = NULL; if (pbits & PG_A) { - ppv = pa_to_pvh(pbits); - vm_page_flag_set(ppv->pv_vm_page, PG_REFERENCED); + m = PHYS_TO_VM_PAGE(pbits); + vm_page_flag_set(m, PG_REFERENCED); pbits &= ~PG_A; } if (pbits & PG_M) { if (pmap_track_modified(i386_ptob(sindex))) { - if (ppv == NULL) - ppv = pa_to_pvh(pbits); - vm_page_dirty(ppv->pv_vm_page); + if (m == NULL) + m = PHYS_TO_VM_PAGE(pbits); + vm_page_dirty(m); pbits &= ~PG_M; } @@ -2002,7 +1968,8 @@ */ void -pmap_enter(pmap_t pmap, vm_offset_t va, vm_offset_t pa, vm_prot_t prot, +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, boolean_t wired) { + vm_offset_t pa; register unsigned *pte; vm_offset_t opa; @@ -2059,6 +2026,6 @@ } + pa = VM_PAGE_TO_PHYS(m) & PG_FRAME; origpte = *(vm_offset_t *)pte; - pa &= PG_FRAME; opa = origpte & PG_FRAME; @@ -2115,7 +2082,7 @@ if (origpte & PG_MANAGED) { if ((origpte & PG_M) && pmap_track_modified(va)) { - pv_table_t *ppv; - ppv = pa_to_pvh(opa); - vm_page_dirty(ppv->pv_vm_page); + vm_page_t om; + om = PHYS_TO_VM_PAGE(opa); + vm_page_dirty(om); } pa |= PG_MANAGED; @@ -2135,10 +2102,10 @@ /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ - if (pmap_is_managed(pa)) { - pmap_insert_entry(pmap, va, mpte, pa); + if (pmap_initialized && (m->flags & PG_FICTITIOUS) == 0) { + pmap_insert_entry(pmap, va, mpte, m); pa |= PG_MANAGED; } @@ -2194,11 +2161,12 @@ static vm_page_t -pmap_enter_quick(pmap, va, pa, mpte) +pmap_enter_quick(pmap, va, m, mpte) register pmap_t pmap; vm_offset_t va; - register vm_offset_t pa; + vm_page_t m; vm_page_t mpte; { - register unsigned *pte; + unsigned *pte; + vm_offset_t pa; /* @@ -2262,9 +2230,9 @@ /* - * Enter on the PV list if part of our managed memory Note that we + * Enter on the PV list if part of our managed memory. Note that we * raise IPL while manipulating pv_table since pmap_enter can be * called at interrupt time. */ - pmap_insert_entry(pmap, va, mpte, pa); + pmap_insert_entry(pmap, va, mpte, m); /* @@ -2273,8 +2241,12 @@ pmap->pm_stats.resident_count++; + pa = VM_PAGE_TO_PHYS(m); +/* XXX dyson m->flags |= PG_MAPPED; */ + /* * Now validate mapping with RO protection */ *pte = pa | PG_V | PG_U | PG_MANAGED; +/* XXX dyson */ return mpte; @@ -2400,6 +2372,5 @@ vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + i386_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + i386_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); @@ -2421,6 +2392,5 @@ vm_page_busy(p); mpte = pmap_enter_quick(pmap, - addr + i386_ptob(tmpidx), - VM_PAGE_TO_PHYS(p), mpte); + addr + i386_ptob(tmpidx), p, mpte); vm_page_flag_set(p, PG_MAPPED); vm_page_wakeup(p); @@ -2517,6 +2487,5 @@ } vm_page_busy(m); - mpte = pmap_enter_quick(pmap, addr, - VM_PAGE_TO_PHYS(m), mpte); + mpte = pmap_enter_quick(pmap, addr, m, mpte); vm_page_flag_set(m, PG_MAPPED); vm_page_wakeup(m); @@ -2578,4 +2547,5 @@ vm_offset_t pdnxt; unsigned src_frame, dst_frame; + vm_page_t m; if (dst_addr != src_addr) @@ -2660,9 +2630,9 @@ * during the copy. */ + m = PHYS_TO_VM_PAGE(ptetemp); *dst_pte = ptetemp & ~(PG_M | PG_A); dst_pmap->pm_stats.resident_count++; pmap_insert_entry(dst_pmap, addr, - dstmpte, - (ptetemp & PG_FRAME)); + dstmpte, m); } else { pmap_unwire_pte_hold(dst_pmap, dstmpte); @@ -2851,22 +2821,20 @@ */ boolean_t -pmap_page_exists(pmap, pa) +pmap_page_exists(pmap, m) pmap_t pmap; - vm_offset_t pa; + vm_page_t m; { register pv_entry_t pv; - pv_table_t *ppv; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; s = splvm(); - ppv = pa_to_pvh(pa); /* * Not found, check current mappings returning immediately if found. */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { @@ -2895,7 +2863,7 @@ { unsigned *pte, tpte; - pv_table_t *ppv; pv_entry_t pv, npv; int s; + vm_page_t m; #ifdef PMAP_REMOVE_PAGES_CURPROC_ONLY @@ -2932,7 +2900,7 @@ *pte = 0; - ppv = pa_to_pvh(tpte); + m = PHYS_TO_VM_PAGE(tpte); - KASSERT(ppv < &pv_table[pv_npg], + KASSERT(m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %x", tpte)); @@ -2943,5 +2911,5 @@ */ if (tpte & PG_M) { - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } @@ -2950,8 +2918,8 @@ TAILQ_REMOVE(&pv->pv_pmap->pm_pvlist, pv, pv_plist); - ppv->pv_list_count--; - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); - if (TAILQ_FIRST(&ppv->pv_list) == NULL) { - vm_page_flag_clear(ppv->pv_vm_page, PG_MAPPED | PG_WRITEABLE); + m->md.pv_list_count--; + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + if (TAILQ_FIRST(&m->md.pv_list) == NULL) { + vm_page_flag_clear(m, PG_MAPPED | PG_WRITEABLE); } @@ -2969,23 +2937,21 @@ */ static boolean_t -pmap_testbit(pa, bit) - register vm_offset_t pa; +pmap_testbit(m, bit) + vm_page_t m; int bit; { - register pv_entry_t pv; - pv_table_t *ppv; + pv_entry_t pv; unsigned *pte; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return FALSE; - ppv = pa_to_pvh(pa); - if (TAILQ_FIRST(&ppv->pv_list) == NULL) + if (TAILQ_FIRST(&m->md.pv_list) == NULL) return FALSE; s = splvm(); - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { @@ -3021,19 +2987,17 @@ */ static __inline void -pmap_changebit(pa, bit, setem) - vm_offset_t pa; +pmap_changebit(m, bit, setem) + vm_page_t m; int bit; boolean_t setem; { register pv_entry_t pv; - pv_table_t *ppv; register unsigned *pte; int s; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return; s = splvm(); - ppv = pa_to_pvh(pa); /* @@ -3041,5 +3005,5 @@ * setting RO do we need to clear the VAC? */ - for (pv = TAILQ_FIRST(&ppv->pv_list); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { @@ -3070,5 +3034,5 @@ if (bit == PG_RW) { if (pbits & PG_M) { - vm_page_dirty(ppv->pv_vm_page); + vm_page_dirty(m); } *(int *)pte = pbits & ~(PG_M|PG_RW); @@ -3084,18 +3048,4 @@ /* - * pmap_clearbit: - * - * Clear a bit/bits in every pte mapping a given physical page. Making - * this inline allows the pmap_changebit inline to be well optimized. - */ -static __inline void -pmap_clearbit( - vm_offset_t pa, - int bit) -{ - pmap_changebit(pa, bit, FALSE); -} - -/* * pmap_page_protect: * @@ -3103,11 +3053,11 @@ */ void -pmap_page_protect(vm_offset_t phys, vm_prot_t prot) +pmap_page_protect(vm_page_t m, vm_prot_t prot) { if ((prot & VM_PROT_WRITE) == 0) { if (prot & (VM_PROT_READ | VM_PROT_EXECUTE)) { - pmap_clearbit(phys, PG_RW); + pmap_changebit(m, PG_RW, FALSE); } else { - pmap_remove_all(phys); + pmap_remove_all(m); } } @@ -3127,20 +3077,17 @@ */ int -pmap_ts_referenced(vm_offset_t pa) +pmap_ts_referenced(vm_page_t m) { register pv_entry_t pv, pvf, pvn; - pv_table_t *ppv; unsigned *pte; int s; int rtval = 0; - if (!pmap_is_managed(pa)) + if (!pmap_initialized || (m->flags & PG_FICTITIOUS)) return (rtval); s = splvm(); - ppv = pa_to_pvh(pa); - - if ((pv = TAILQ_FIRST(&ppv->pv_list)) != NULL) { + if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pvf = pv; @@ -3149,7 +3096,7 @@ pvn = TAILQ_NEXT(pv, pv_list); - TAILQ_REMOVE(&ppv->pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); - TAILQ_INSERT_TAIL(&ppv->pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); if (!pmap_track_modified(pv->pv_va)) @@ -3182,7 +3129,7 @@ */ boolean_t -pmap_is_modified(vm_offset_t pa) +pmap_is_modified(vm_page_t m) { - return pmap_testbit((pa), PG_M); + return pmap_testbit(m, PG_M); } @@ -3191,7 +3138,7 @@ */ void -pmap_clear_modify(vm_offset_t pa) +pmap_clear_modify(vm_page_t m) { - pmap_clearbit(pa, PG_M); + pmap_changebit(m, PG_M, FALSE); } @@ -3202,7 +3149,7 @@ */ void -pmap_clear_reference(vm_offset_t pa) +pmap_clear_reference(vm_page_t m) { - pmap_clearbit(pa, PG_A); + pmap_changebit(m, PG_A, FALSE); } @@ -3305,5 +3252,4 @@ if ((pte = *ptep) != 0) { - pv_table_t *ppv; vm_offset_t pa; @@ -3314,6 +3260,5 @@ pa = pte & PG_FRAME; - ppv = pa_to_pvh((pa & PG_FRAME)); - m = ppv->pv_vm_page; + m = PHYS_TO_VM_PAGE(pa); /* @@ -3325,5 +3270,5 @@ * Modified by someone */ - else if (m->dirty || pmap_is_modified(pa)) + else if (m->dirty || pmap_is_modified(m)) val |= MINCORE_MODIFIED_OTHER; /* @@ -3336,5 +3281,5 @@ * Referenced by someone */ - else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(pa)) { + else if ((m->flags & PG_REFERENCED) || pmap_ts_referenced(m)) { val |= MINCORE_REFERENCED_OTHER; vm_page_flag_set(m, PG_REFERENCED); @@ -3362,5 +3307,6 @@ vm_offset_t -pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) { +pmap_addr_hint(vm_object_t obj, vm_offset_t addr, vm_size_t size) +{ if ((obj == NULL) || (size < NBPDR) || (obj->type != OBJT_DEVICE)) { @@ -3374,5 +3320,6 @@ #if defined(PMAP_DEBUG) -pmap_pid_dump(int pid) { +pmap_pid_dump(int pid) +{ pmap_t pmap; struct proc *p; @@ -3408,5 +3355,5 @@ vm_page_t m; pa = *(int *)pte; - m = PHYS_TO_VM_PAGE((pa & PG_FRAME)); + m = PHYS_TO_VM_PAGE(pa); printf("va: 0x%x, pt: 0x%x, h: %d, w: %d, f: 0x%x", va, pa, m->hold_count, m->wire_count, m->flags); @@ -3463,10 +3410,10 @@ vm_offset_t pa; { - pv_table_t *ppv; register pv_entry_t pv; + vm_page_t m; printf("pa %x", pa); - ppv = pa_to_pvh(pa); - for (pv = TAILQ_FIRST(&ppv->pv_list); + m = PHYS_TO_VM_PAGE(pa); + for (pv = TAILQ_FIRST(&m->md.pv_list); pv; pv = TAILQ_NEXT(pv, pv_list)) { diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/i386/include/pmap.h ./i386/include/pmap.h --- ../sys/i386/include/pmap.h Wed Dec 29 02:57:44 1999 +++ ./i386/include/pmap.h Thu May 11 01:29:37 2000 @@ -189,9 +189,9 @@ */ struct pv_entry; -typedef struct { + +struct md_page { int pv_list_count; - struct vm_page *pv_vm_page; TAILQ_HEAD(,pv_entry) pv_list; -} pv_table_t; +}; struct pmap { diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/kern/sysv_shm.c ./kern/sysv_shm.c --- ../sys/kern/sysv_shm.c Thu Mar 30 00:42:55 2000 +++ ./kern/sysv_shm.c Sat May 13 19:05:34 2000 @@ -530,5 +530,5 @@ */ shm_handle->shm_object = - vm_pager_allocate(OBJT_SWAP, 0, size, VM_PROT_DEFAULT, 0); + vm_pager_allocate(OBJT_PHYS, 0, size, VM_PROT_DEFAULT, 0); vm_object_clear_flag(shm_handle->shm_object, OBJ_ONEMAPPING); vm_object_set_flag(shm_handle->shm_object, OBJ_NOSPLIT); diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/pc98/pc98/wd.c ./pc98/pc98/wd.c --- ../sys/pc98/pc98/wd.c Fri May 5 06:38:58 2000 +++ ./pc98/pc98/wd.c Fri May 12 18:20:12 2000 @@ -2168,9 +2168,8 @@ while (blkcnt != 0) { if (is_physical_memory((vm_offset_t)addr)) - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page((vm_offset_t)addr), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, + trunc_page((vm_offset_t)addr)); else - pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, - trunc_page(0), VM_PROT_READ, TRUE); + pmap_kenter((vm_offset_t)CADDR1, trunc_page(0)); /* Ready to send data? */ diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/phys_pager.c ./vm/phys_pager.c --- ../sys/vm/phys_pager.c Wed Dec 31 16:00:00 1969 +++ ./vm/phys_pager.c Tue May 16 16:01:11 2000 @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2000 Peter Wemm + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +static void phys_pager_init __P((void)); +static vm_object_t phys_pager_alloc __P((void *, vm_ooffset_t, vm_prot_t, + vm_ooffset_t)); +static void phys_pager_dealloc __P((vm_object_t)); +static int phys_pager_getpages __P((vm_object_t, vm_page_t *, int, int)); +static void phys_pager_putpages __P((vm_object_t, vm_page_t *, int, + boolean_t, int *)); +static boolean_t phys_pager_haspage __P((vm_object_t, vm_pindex_t, int *, + int *)); + +/* list of device pager objects */ +static struct pagerlst phys_pager_object_list; + +static int phys_pager_alloc_lock, phys_pager_alloc_lock_want; + +struct pagerops physpagerops = { + phys_pager_init, + phys_pager_alloc, + phys_pager_dealloc, + phys_pager_getpages, + phys_pager_putpages, + phys_pager_haspage, + NULL +}; + +static void +phys_pager_init() +{ + TAILQ_INIT(&phys_pager_object_list); +} + +static vm_object_t +phys_pager_alloc(void *handle, vm_ooffset_t size, vm_prot_t prot, vm_ooffset_t foff) +{ + vm_object_t object; + + /* + * Offset should be page aligned. + */ + if (foff & PAGE_MASK) + return (NULL); + + size = round_page(size); + + /* + * Lock to prevent object creation race condition. + */ + while (phys_pager_alloc_lock) { + phys_pager_alloc_lock_want++; + tsleep(&phys_pager_alloc_lock, PVM, "ppall", 0); + phys_pager_alloc_lock_want--; + } + phys_pager_alloc_lock = 1; + + /* + * Look up pager, creating as necessary. + */ + object = vm_pager_object_lookup(&phys_pager_object_list, handle); + if (object == NULL) { + /* + * Allocate object and associate it with the pager. + */ + object = vm_object_allocate(OBJT_PHYS, + OFF_TO_IDX(foff + size)); + object->handle = handle; + TAILQ_INIT(&object->un_pager.physp.physp_pglist); + TAILQ_INSERT_TAIL(&phys_pager_object_list, object, + pager_object_list); + } else { + /* + * Gain a reference to the object. + */ + vm_object_reference(object); + if (OFF_TO_IDX(foff + size) > object->size) + object->size = OFF_TO_IDX(foff + size); + } + + phys_pager_alloc_lock = 0; + if (phys_pager_alloc_lock_want) + wakeup(&phys_pager_alloc_lock); + + return (object); +} + +static void +phys_pager_dealloc(object) + vm_object_t object; +{ + vm_page_t m; + int s; + + TAILQ_REMOVE(&phys_pager_object_list, object, pager_object_list); + /* + * Free up our fake pages. + */ + s = splvm(); + while ((m = TAILQ_FIRST(&object->un_pager.physp.physp_pglist)) != 0) { + TAILQ_REMOVE(&object->un_pager.physp.physp_pglist, m, pageq); + m->flags &= ~PG_FICTITIOUS; + m->dirty = 0; + vm_page_unwire(m, 0); + vm_page_flag_clear(m, PG_ZERO); + vm_page_free(m); + } + splx(s); +} + +static int +phys_pager_getpages(object, m, count, reqpage) + vm_object_t object; + vm_page_t *m; + int count; + int reqpage; +{ + vm_page_t page; + int i, s; + + s = splvm(); + /* + * In theory, vm_fault_additional_pages() has ensured we only ever + * get one page, but it's better safe than sorry. + */ + for (i = 0; i < count; i++) + if (i != reqpage) + vm_page_free(m[i]); + page = m[reqpage]; + if ((page->flags & PG_ZERO) == 0) + vm_page_zero_fill(page); + vm_page_flag_set(page, PG_ZERO); + vm_page_wire(page); /* XXX just to count it somewhere */ + page->flags |= PG_FICTITIOUS; /* Switch off pv_entries */ + page->valid = VM_PAGE_BITS_ALL; + TAILQ_INSERT_TAIL(&object->un_pager.physp.physp_pglist, page, pageq); + splx(s); + + return (VM_PAGER_OK); +} + +static void +phys_pager_putpages(object, m, count, sync, rtvals) + vm_object_t object; + vm_page_t *m; + int count; + boolean_t sync; + int *rtvals; +{ + panic("phys_pager_putpage called"); +} + +static boolean_t +phys_pager_haspage(object, pindex, before, after) + vm_object_t object; + vm_pindex_t pindex; + int *before; + int *after; +{ + if (before != NULL) + *before = 0; + if (after != NULL) + *after = 0; + return (TRUE); +} diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/pmap.h ./vm/pmap.h --- ../sys/vm/pmap.h Wed Dec 29 02:57:44 1999 +++ ./vm/pmap.h Wed May 10 18:46:28 2000 @@ -95,6 +95,6 @@ #endif void pmap_change_wiring __P((pmap_t, vm_offset_t, boolean_t)); -void pmap_clear_modify __P((vm_offset_t pa)); -void pmap_clear_reference __P((vm_offset_t pa)); +void pmap_clear_modify __P((vm_page_t m)); +void pmap_clear_reference __P((vm_page_t m)); void pmap_collect __P((void)); void pmap_copy __P((pmap_t, pmap_t, vm_offset_t, vm_size_t, @@ -102,12 +102,13 @@ void pmap_copy_page __P((vm_offset_t, vm_offset_t)); void pmap_destroy __P((pmap_t)); -void pmap_enter __P((pmap_t, vm_offset_t, vm_offset_t, vm_prot_t, +void pmap_enter __P((pmap_t, vm_offset_t, vm_page_t, vm_prot_t, boolean_t)); -vm_offset_t pmap_extract __P((pmap_t, vm_offset_t)); +vm_offset_t pmap_extract __P((pmap_t pmap, vm_offset_t va)); void pmap_growkernel __P((vm_offset_t)); void pmap_init __P((vm_offset_t, vm_offset_t)); -boolean_t pmap_is_modified __P((vm_offset_t pa)); -boolean_t pmap_ts_referenced __P((vm_offset_t pa)); -void pmap_kenter __P((vm_offset_t, vm_offset_t)); +boolean_t pmap_is_modified __P((vm_page_t m)); +boolean_t pmap_ts_referenced __P((vm_page_t m)); +void pmap_kenter __P((vm_offset_t va, vm_offset_t pa)); +void pmap_kenterp __P((vm_offset_t va, vm_offset_t pa)); void pmap_kremove __P((vm_offset_t)); vm_offset_t pmap_map __P((vm_offset_t, vm_offset_t, vm_offset_t, int)); @@ -115,6 +116,6 @@ vm_object_t object, vm_pindex_t pindex, vm_offset_t size, int pagelimit)); -boolean_t pmap_page_exists __P((pmap_t, vm_offset_t)); -void pmap_page_protect __P((vm_offset_t, vm_prot_t)); +boolean_t pmap_page_exists __P((pmap_t pmap, vm_page_t m)); +void pmap_page_protect __P((vm_page_t m, vm_prot_t prot)); void pmap_pageable __P((pmap_t, vm_offset_t, vm_offset_t, boolean_t)); @@ -141,5 +142,5 @@ void pmap_activate __P((struct proc *p)); vm_offset_t pmap_addr_hint __P((vm_object_t obj, vm_offset_t addr, vm_size_t size)); -void pmap_init2 __P((void)); +void pmap_init2 __P((void)); #endif /* _KERNEL */ diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/swap_pager.c ./vm/swap_pager.c --- ../sys/vm/swap_pager.c Fri May 5 06:38:58 2000 +++ ./vm/swap_pager.c Wed May 10 18:46:28 2000 @@ -1593,5 +1593,5 @@ */ - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); m->valid = VM_PAGE_BITS_ALL; vm_page_undirty(m); @@ -1619,5 +1619,5 @@ */ vm_page_protect(m, VM_PROT_READ); - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_undirty(m); vm_page_io_finish(m); diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_fault.c ./vm/vm_fault.c --- ../sys/vm/vm_fault.c Tue Mar 28 06:44:44 2000 +++ ./vm/vm_fault.c Sat May 13 18:13:16 2000 @@ -398,4 +398,5 @@ if ((fs.first_object->type != OBJT_DEVICE) && + (fs.first_object->type != OBJT_PHYS) && (behavior == MAP_ENTRY_BEHAV_SEQUENTIAL || (behavior != MAP_ENTRY_BEHAV_RANDOM && @@ -827,5 +828,5 @@ } - pmap_enter(fs.map->pmap, vaddr, VM_PAGE_TO_PHYS(fs.m), prot, wired); + pmap_enter(fs.map->pmap, vaddr, fs.m, prot, wired); if (((fault_flags & VM_FAULT_WIRE_MASK) == 0) && (wired == 0)) { @@ -1076,6 +1077,5 @@ vm_page_flag_clear(dst_m, PG_ZERO); - pmap_enter(dst_map->pmap, vaddr, VM_PAGE_TO_PHYS(dst_m), - prot, FALSE); + pmap_enter(dst_map->pmap, vaddr, dst_m, prot, FALSE); vm_page_flag_set(dst_m, PG_WRITEABLE|PG_MAPPED); @@ -1123,5 +1123,5 @@ * we don't fault-ahead for device pager */ - if (object->type == OBJT_DEVICE) { + if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) { *reqpage = 0; marray[0] = m; diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_kern.c ./vm/vm_kern.c --- ../sys/vm/vm_kern.c Tue Mar 28 06:44:44 2000 +++ ./vm/vm_kern.c Wed May 10 18:46:28 2000 @@ -400,6 +400,5 @@ * Because this is kernel_pmap, this call will not block. */ - pmap_enter(kernel_pmap, addr + i, VM_PAGE_TO_PHYS(m), - VM_PROT_ALL, 1); + pmap_enter(kernel_pmap, addr + i, m, VM_PROT_ALL, 1); vm_page_flag_set(m, PG_MAPPED | PG_WRITEABLE | PG_REFERENCED); } diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_mmap.c ./vm/vm_mmap.c --- ../sys/vm/vm_mmap.c Sat Apr 22 12:42:08 2000 +++ ./vm/vm_mmap.c Sat May 13 18:11:40 2000 @@ -810,8 +810,8 @@ mincoreinfo = MINCORE_INCORE; if (m->dirty || - pmap_is_modified(VM_PAGE_TO_PHYS(m))) + pmap_is_modified(m)) mincoreinfo |= MINCORE_MODIFIED_OTHER; if ((m->flags & PG_REFERENCED) || - pmap_ts_referenced(VM_PAGE_TO_PHYS(m))) { + pmap_ts_referenced(m)) { vm_page_flag_set(m, PG_REFERENCED); mincoreinfo |= MINCORE_REFERENCED_OTHER; @@ -1097,5 +1097,5 @@ * Force device mappings to be shared. */ - if (type == OBJT_DEVICE) { + if (type == OBJT_DEVICE || type == OBJT_PHYS) { flags &= ~(MAP_PRIVATE|MAP_COPY); flags |= MAP_SHARED; diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_object.c ./vm/vm_object.c --- ../sys/vm/vm_object.c Wed Apr 19 12:42:01 2000 +++ ./vm/vm_object.c Wed May 10 18:46:28 2000 @@ -868,5 +868,5 @@ * it. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); m->dirty = 0; m->act_count = 0; diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_object.h ./vm/vm_object.h --- ../sys/vm/vm_object.h Wed Dec 29 02:57:44 1999 +++ ./vm/vm_object.h Sat May 13 18:29:40 2000 @@ -75,5 +75,6 @@ #include -enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_DEAD }; +enum obj_type { OBJT_DEFAULT, OBJT_SWAP, OBJT_VNODE, OBJT_DEVICE, OBJT_PHYS, + OBJT_DEAD }; typedef u_char objtype_t; @@ -122,4 +123,13 @@ TAILQ_HEAD(, vm_page) devp_pglist; } devp; + + /* + * Physmem pager + * + * physp_pglist - list of allocated pages + */ + struct { + TAILQ_HEAD(, vm_page) physp_pglist; + } physp; /* diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_page.c ./vm/vm_page.c --- ../sys/vm/vm_page.c Tue Mar 28 06:44:44 2000 +++ ./vm/vm_page.c Thu May 11 03:12:05 2000 @@ -120,5 +120,5 @@ vm_page_t vm_page_array = 0; -static int vm_page_array_size = 0; +int vm_page_array_size = 0; long first_page = 0; int vm_page_zero_count = 0; @@ -144,4 +144,28 @@ /* + * vm_add_new_page: + * + * Add a new page to the freelist for use by the system. + * Must be called at splhigh(). + */ +vm_page_t +vm_add_new_page(pa) + vm_offset_t pa; +{ + vm_page_t m; + + ++cnt.v_page_count; + ++cnt.v_free_count; + m = PHYS_TO_VM_PAGE(pa); + m->phys_addr = pa; + m->flags = 0; + m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; + m->queue = m->pc + PQ_FREE; + TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); + vm_page_queues[m->queue].lcnt++; + return (m); +} + +/* * vm_page_startup: * @@ -160,5 +184,4 @@ { register vm_offset_t mapped; - register vm_page_t m; register struct vm_page **bucket; vm_size_t npages, page_range; @@ -297,13 +320,5 @@ pa = phys_avail[i]; while (pa < phys_avail[i + 1] && npages-- > 0) { - ++cnt.v_page_count; - ++cnt.v_free_count; - m = PHYS_TO_VM_PAGE(pa); - m->phys_addr = pa; - m->flags = 0; - m->pc = (pa >> PAGE_SHIFT) & PQ_L2_MASK; - m->queue = m->pc + PQ_FREE; - TAILQ_INSERT_HEAD(&vm_page_queues[m->queue].pl, m, pageq); - vm_page_queues[m->queue].lcnt++; + vm_add_new_page(pa); pa += PAGE_SIZE; } @@ -1519,5 +1534,5 @@ m->dirty &= ~pagebits; if (base == 0 && size == PAGE_SIZE) { - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_flag_clear(m, PG_NOSYNC); } @@ -1650,6 +1665,5 @@ vm_page_t m; { - if ((m->dirty != VM_PAGE_BITS_ALL) && - pmap_is_modified(VM_PAGE_TO_PHYS(m))) { + if ((m->dirty != VM_PAGE_BITS_ALL) && pmap_is_modified(m)) { vm_page_dirty(m); } diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_page.h ./vm/vm_page.h --- ../sys/vm/vm_page.h Wed Dec 29 02:57:44 1999 +++ ./vm/vm_page.h Thu May 11 02:59:06 2000 @@ -118,4 +118,5 @@ vm_pindex_t pindex; /* offset into object (O,P) */ vm_offset_t phys_addr; /* physical address of page */ + struct md_page md; /* machine dependant stuff */ u_short queue; /* page queue index */ u_short flags, /* see below */ @@ -279,4 +280,5 @@ extern vm_page_t vm_page_array; /* First resident page in table */ +extern int vm_page_array_size; /* number of vm_page_t's */ extern long first_page; /* first physical page number */ @@ -397,4 +399,5 @@ void vm_page_rename __P((vm_page_t, vm_object_t, vm_pindex_t)); vm_offset_t vm_page_startup __P((vm_offset_t, vm_offset_t, vm_offset_t)); +vm_page_t vm_add_new_page __P((vm_offset_t pa)); void vm_page_unwire __P((vm_page_t, int)); void vm_page_wire __P((vm_page_t)); @@ -447,11 +450,16 @@ vm_page_protect(vm_page_t mem, int prot) { +#if 0 + /* XXX dyson does this */ + if (mem->flags & PG_FICTITIOUS) + return; +#endif if (prot == VM_PROT_NONE) { if (mem->flags & (PG_WRITEABLE|PG_MAPPED)) { - pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_NONE); + pmap_page_protect(mem, VM_PROT_NONE); vm_page_flag_clear(mem, PG_WRITEABLE|PG_MAPPED); } } else if ((prot == VM_PROT_READ) && (mem->flags & PG_WRITEABLE)) { - pmap_page_protect(VM_PAGE_TO_PHYS(mem), VM_PROT_READ); + pmap_page_protect(mem, VM_PROT_READ); vm_page_flag_clear(mem, PG_WRITEABLE); } diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_pageout.c ./vm/vm_pageout.c --- ../sys/vm/vm_pageout.c Tue Mar 28 06:44:44 2000 +++ ./vm/vm_pageout.c Sat May 13 18:10:08 2000 @@ -396,5 +396,5 @@ * worked. */ - pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); + pmap_clear_modify(mt); vm_page_undirty(mt); break; @@ -449,5 +449,5 @@ int s; - if (object->type == OBJT_DEVICE) + if (object->type == OBJT_DEVICE || object->type == OBJT_PHYS) return; @@ -475,11 +475,11 @@ p->hold_count != 0 || p->busy != 0 || - (p->flags & PG_BUSY) || - !pmap_page_exists(vm_map_pmap(map), VM_PAGE_TO_PHYS(p))) { + (p->flags & PG_BUSY) || /* XXX dyson */ + !pmap_page_exists(vm_map_pmap(map), p)) { p = next; continue; } - actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(p)); + actcount = pmap_ts_referenced(p); if (actcount) { vm_page_flag_set(p, PG_REFERENCED); @@ -710,5 +710,5 @@ if (m->object->ref_count == 0) { vm_page_flag_clear(m, PG_REFERENCED); - pmap_clear_reference(VM_PAGE_TO_PHYS(m)); + pmap_clear_reference(m); /* @@ -722,5 +722,5 @@ */ } else if (((m->flags & PG_REFERENCED) == 0) && - (actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)))) { + (actcount = pmap_ts_referenced(m))) { vm_page_activate(m); m->act_count += (actcount + ACT_ADVANCE); @@ -736,5 +736,5 @@ if ((m->flags & PG_REFERENCED) != 0) { vm_page_flag_clear(m, PG_REFERENCED); - actcount = pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount = pmap_ts_referenced(m); vm_page_activate(m); m->act_count += (actcount + ACT_ADVANCE + 1); @@ -988,5 +988,5 @@ actcount += 1; } - actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount += pmap_ts_referenced(m); if (actcount) { m->act_count += ACT_ADVANCE + actcount; @@ -1200,5 +1200,5 @@ } - actcount += pmap_ts_referenced(VM_PAGE_TO_PHYS(m)); + actcount += pmap_ts_referenced(m); if (actcount) { m->act_count += ACT_ADVANCE + actcount; diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vm_pager.c ./vm/vm_pager.c --- ../sys/vm/vm_pager.c Fri May 5 06:38:58 2000 +++ ./vm/vm_pager.c Sat May 13 18:09:15 2000 @@ -93,4 +93,5 @@ extern struct pagerops vnodepagerops; extern struct pagerops devicepagerops; +extern struct pagerops physpagerops; int cluster_pbuf_freecnt = -1; /* unlimited to begin with */ @@ -174,4 +175,5 @@ &vnodepagerops, /* OBJT_VNODE */ &devicepagerops, /* OBJT_DEVICE */ + &physpagerops, /* OBJT_PHYS */ &deadpagerops /* OBJT_DEAD */ }; diff -ru2N --exclude=compile --exclude=ASHBURTON ../sys/vm/vnode_pager.c ./vm/vnode_pager.c --- ../sys/vm/vnode_pager.c Fri May 5 06:38:58 2000 +++ ./vm/vnode_pager.c Wed May 10 18:46:28 2000 @@ -453,5 +453,5 @@ } vm_pager_unmap_page(kva); - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_flag_clear(m, PG_ZERO); if (error) { @@ -516,5 +516,5 @@ vm_pager_unmap_page(kva); } - pmap_clear_modify(VM_PAGE_TO_PHYS(m)); + pmap_clear_modify(m); vm_page_undirty(m); vm_page_flag_clear(m, PG_ZERO); @@ -783,5 +783,5 @@ mt->valid = VM_PAGE_BITS_ALL; vm_page_undirty(mt); /* should be an assert? XXX */ - pmap_clear_modify(VM_PAGE_TO_PHYS(mt)); + pmap_clear_modify(mt); } else { /*