Index: sys/sparc64/sparc64/pmap.c =================================================================== --- sys/sparc64/sparc64/pmap.c (revision 252262) +++ sys/sparc64/sparc64/pmap.c (working copy) @@ -765,7 +765,6 @@ pmap_page_init(vm_page_t m) TAILQ_INIT(&m->md.tte_list); m->md.color = DCACHE_COLOR(VM_PAGE_TO_PHYS(m)); - m->md.flags = 0; m->md.pmap = NULL; } Index: sys/sparc64/include/pmap.h =================================================================== --- sys/sparc64/include/pmap.h (revision 252262) +++ sys/sparc64/include/pmap.h (working copy) @@ -56,7 +56,6 @@ struct md_page { struct pmap *pmap; uint32_t colors[DCACHE_COLORS]; int32_t color; - uint32_t flags; }; struct pmap { Index: sys/ia64/ia64/pmap.c =================================================================== --- sys/ia64/ia64/pmap.c (revision 252262) +++ sys/ia64/ia64/pmap.c (working copy) @@ -473,7 +473,7 @@ pmap_page_to_va(vm_page_t m) vm_offset_t va; pa = VM_PAGE_TO_PHYS(m); - va = (m->md.memattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) : + va = (m->mdmemattr == VM_MEMATTR_UNCACHEABLE) ? IA64_PHYS_TO_RR6(pa) : IA64_PHYS_TO_RR7(pa); return (va); } @@ -486,7 +486,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); - m->md.memattr = VM_MEMATTR_DEFAULT; + m->mdmemattr = VM_MEMATTR_DEFAULT; } /* @@ -1439,7 +1439,7 @@ pmap_qenter(vm_offset_t va, vm_page_t *m, int coun else pmap_enter_vhpt(pte, va); pmap_pte_prot(kernel_pmap, pte, VM_PROT_ALL); - pmap_pte_attr(pte, m[i]->md.memattr); + pmap_pte_attr(pte, m[i]->mdmemattr); pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m[i]), FALSE, FALSE); va += PAGE_SIZE; } @@ -1768,7 +1768,7 @@ validate: * adds the pte to the VHPT if necessary. */ pmap_pte_prot(pmap, pte, prot); - pmap_pte_attr(pte, m->md.memattr); + pmap_pte_attr(pte, m->mdmemattr); pmap_set_pte(pte, va, pa, wired, managed); /* Invalidate the I-cache when needed. */ @@ -1875,7 +1875,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t v pmap_enter_vhpt(pte, va); pmap_pte_prot(pmap, pte, prot & (VM_PROT_READ | VM_PROT_EXECUTE)); - pmap_pte_attr(pte, m->md.memattr); + pmap_pte_attr(pte, m->mdmemattr); pmap_set_pte(pte, va, VM_PAGE_TO_PHYS(m), FALSE, managed); if (prot & VM_PROT_EXECUTE) @@ -2419,7 +2419,7 @@ pmap_remove_write(vm_page_t m) } prot &= ~VM_PROT_WRITE; pmap_pte_prot(pmap, pte, prot); - pmap_pte_attr(pte, m->md.memattr); + pmap_pte_attr(pte, m->mdmemattr); pmap_invalidate_page(pv->pv_va); } pmap_switch(oldpmap); @@ -2501,7 +2501,7 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma void *va; rw_wlock(&pvh_global_lock); - m->md.memattr = ma; + m->mdmemattr = ma; TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h (revision 252262) +++ sys/vm/vm_page.h (working copy) @@ -133,6 +133,7 @@ struct vm_page { vm_pindex_t pindex; /* offset into object (O,P) */ vm_paddr_t phys_addr; /* physical address of page */ struct md_page md; /* machine dependant stuff */ + vm_memattr_t mdmemattr; /* arch specific memory attribute */ uint8_t queue; /* page queue index (P,Q) */ int8_t segind; short hold_count; /* page hold count (P) */ Index: sys/i386/include/pmap.h =================================================================== --- sys/i386/include/pmap.h (revision 252262) +++ sys/i386/include/pmap.h (working copy) @@ -429,7 +429,6 @@ struct pv_chunk; struct md_page { TAILQ_HEAD(,pv_entry) pv_list; - int pat_mode; }; struct pmap { @@ -499,7 +498,7 @@ extern char *ptvmmap; /* poor name! */ extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; -#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) +#define pmap_page_get_memattr(m) ((m)->mdmemattr) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) Index: sys/i386/i386/vm_machdep.c =================================================================== --- sys/i386/i386/vm_machdep.c (revision 252262) +++ sys/i386/i386/vm_machdep.c (working copy) @@ -864,10 +864,10 @@ sf_buf_alloc(struct vm_page *m, int flags) opte = *ptep; #ifdef XEN PT_SET_MA(sf->kva, xpmap_ptom(VM_PAGE_TO_PHYS(m)) | pgeflag - | PG_RW | PG_V | pmap_cache_bits(m->md.pat_mode, 0)); + | PG_RW | PG_V | pmap_cache_bits(m->mdmemattr, 0)); #else *ptep = VM_PAGE_TO_PHYS(m) | pgeflag | PG_RW | PG_V | - pmap_cache_bits(m->md.pat_mode, 0); + pmap_cache_bits(m->mdmemattr, 0); #endif /* Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c (revision 252262) +++ sys/i386/i386/pmap.c (working copy) @@ -645,7 +645,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); - m->md.pat_mode = PAT_WRITE_BACK; + m->mdmemattr = PAT_WRITE_BACK; } #ifdef PAE @@ -1534,7 +1534,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int co endpte = pte + count; while (pte < endpte) { m = *ma++; - pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); + pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0); if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { oldpte |= *pte; pte_store(pte, pa | pgeflag | PG_RW | PG_V); @@ -3531,7 +3531,7 @@ validate: /* * Now validate mapping with desired protection/wiring. */ - newpte = (pt_entry_t)(pa | pmap_cache_bits(m->md.pat_mode, 0) | PG_V); + newpte = (pt_entry_t)(pa | pmap_cache_bits(m->mdmemattr, 0) | PG_V); if ((prot & VM_PROT_WRITE) != 0) { newpte |= PG_RW; if ((newpte & PG_MANAGED) != 0) @@ -3620,7 +3620,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_pag " in pmap %p", va, pmap); return (FALSE); } - newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | + newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 1) | PG_PS | PG_V; if ((m->oflags & VPO_UNMANAGED) == 0) { newpde |= PG_MANAGED; @@ -3811,7 +3811,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t v */ pmap->pm_stats.resident_count++; - pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); + pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0); #ifdef PAE if ((prot & VM_PROT_EXECUTE) == 0) pa |= pg_nx; @@ -3854,7 +3854,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, pd_entry_t *pde; vm_paddr_t pa, ptepa; vm_page_t p; - int pat_mode; + vm_memattr_t pat_mode; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, @@ -3866,7 +3866,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, p = vm_page_lookup(object, pindex); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); - pat_mode = p->md.pat_mode; + pat_mode = p->mdmemattr; /* * Abort the mapping if the first page is not physically @@ -3887,7 +3887,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || - pat_mode != p->md.pat_mode) + pat_mode != p->mdmemattr) return; p = TAILQ_NEXT(p, listq); } @@ -4118,7 +4118,7 @@ pmap_zero_page(vm_page_t m) panic("pmap_zero_page: CMAP2 busy"); sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(m->md.pat_mode, 0); + pmap_cache_bits(m->mdmemattr, 0); invlcaddr(sysmaps->CADDR2); pagezero(sysmaps->CADDR2); *sysmaps->CMAP2 = 0; @@ -4143,7 +4143,7 @@ pmap_zero_page_area(vm_page_t m, int off, int size panic("pmap_zero_page_area: CMAP2 busy"); sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(m->md.pat_mode, 0); + pmap_cache_bits(m->mdmemattr, 0); invlcaddr(sysmaps->CADDR2); if (off == 0 && size == PAGE_SIZE) pagezero(sysmaps->CADDR2); @@ -4168,7 +4168,7 @@ pmap_zero_page_idle(vm_page_t m) panic("pmap_zero_page_idle: CMAP3 busy"); sched_pin(); *CMAP3 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | PG_A | PG_M | - pmap_cache_bits(m->md.pat_mode, 0); + pmap_cache_bits(m->mdmemattr, 0); invlcaddr(CADDR3); pagezero(CADDR3); *CMAP3 = 0; @@ -4196,9 +4196,9 @@ pmap_copy_page(vm_page_t src, vm_page_t dst) invlpg((u_int)sysmaps->CADDR1); invlpg((u_int)sysmaps->CADDR2); *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(src) | PG_A | - pmap_cache_bits(src->md.pat_mode, 0); + pmap_cache_bits(src->mdmemattr, 0); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(dst) | PG_A | PG_M | - pmap_cache_bits(dst->md.pat_mode, 0); + pmap_cache_bits(dst->mdmemattr, 0); bcopy(sysmaps->CADDR1, sysmaps->CADDR2, PAGE_SIZE); *sysmaps->CMAP1 = 0; *sysmaps->CMAP2 = 0; @@ -4235,9 +4235,9 @@ pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offs b_pg_offset = b_offset & PAGE_MASK; cnt = min(cnt, PAGE_SIZE - b_pg_offset); *sysmaps->CMAP1 = PG_V | VM_PAGE_TO_PHYS(a_pg) | PG_A | - pmap_cache_bits(b_pg->md.pat_mode, 0); + pmap_cache_bits(b_pg->mdmemattr, 0); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(b_pg) | PG_A | - PG_M | pmap_cache_bits(b_pg->md.pat_mode, 0); + PG_M | pmap_cache_bits(b_pg->mdmemattr, 0); a_cp = sysmaps->CADDR1 + a_pg_offset; b_cp = sysmaps->CADDR2 + b_pg_offset; bcopy(a_cp, b_cp, cnt); @@ -5051,7 +5051,7 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { - m->md.pat_mode = ma; + m->mdmemattr = ma; if ((m->flags & PG_FICTITIOUS) != 0) return; @@ -5089,7 +5089,7 @@ pmap_flush_page(vm_page_t m) panic("pmap_flush_page: CMAP2 busy"); sched_pin(); *sysmaps->CMAP2 = PG_V | PG_RW | VM_PAGE_TO_PHYS(m) | - PG_A | PG_M | pmap_cache_bits(m->md.pat_mode, 0); + PG_A | PG_M | pmap_cache_bits(m->mdmemattr, 0); invlcaddr(sysmaps->CADDR2); sva = (vm_offset_t)sysmaps->CADDR2; eva = sva + PAGE_SIZE; Index: sys/i386/xen/pmap.c =================================================================== --- sys/i386/xen/pmap.c (revision 252262) +++ sys/i386/xen/pmap.c (working copy) @@ -536,7 +536,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); - m->md.pat_mode = PAT_WRITE_BACK; + m->mdmemattr = PAT_WRITE_BACK; } /* @@ -3110,7 +3110,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, pd_entry_t *pde; vm_paddr_t pa, ptepa; vm_page_t p; - int pat_mode; + vm_memattr_t pat_mode; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, @@ -3122,7 +3122,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, p = vm_page_lookup(object, pindex); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); - pat_mode = p->md.pat_mode; + pat_mode = p->mdmemattr; /* * Abort the mapping if the first page is not physically @@ -3143,7 +3143,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || - pat_mode != p->md.pat_mode) + pat_mode != p->mdmemattr) return; p = TAILQ_NEXT(p, listq); } @@ -4067,7 +4067,7 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { - m->md.pat_mode = ma; + m->mdmemattr = ma; if ((m->flags & PG_FICTITIOUS) != 0) return; @@ -4106,7 +4106,7 @@ pmap_flush_page(vm_page_t m) sched_pin(); PT_SET_MA(sysmaps->CADDR2, PG_V | PG_RW | VM_PAGE_TO_MACH(m) | PG_A | PG_M | - pmap_cache_bits(m->md.pat_mode, 0)); + pmap_cache_bits(m->mdmemattr, 0)); invlcaddr(sysmaps->CADDR2); sva = (vm_offset_t)sysmaps->CADDR2; eva = sva + PAGE_SIZE; Index: sys/amd64/include/pmap.h =================================================================== --- sys/amd64/include/pmap.h (revision 252262) +++ sys/amd64/include/pmap.h (working copy) @@ -233,7 +233,6 @@ struct pv_chunk; struct md_page { TAILQ_HEAD(,pv_entry) pv_list; - int pat_mode; }; /* @@ -300,7 +299,7 @@ extern vm_paddr_t dump_avail[]; extern vm_offset_t virtual_avail; extern vm_offset_t virtual_end; -#define pmap_page_get_memattr(m) ((vm_memattr_t)(m)->md.pat_mode) +#define pmap_page_get_memattr(m) ((m)->mdmemattr) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) #define pmap_unmapbios(va, sz) pmap_unmapdev((va), (sz)) Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c (revision 252262) +++ sys/amd64/amd64/pmap.c (working copy) @@ -791,7 +791,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); - m->md.pat_mode = PAT_WRITE_BACK; + m->mdmemattr = PAT_WRITE_BACK; } /* @@ -1454,7 +1454,7 @@ pmap_qenter(vm_offset_t sva, vm_page_t *ma, int co endpte = pte + count; while (pte < endpte) { m = *ma++; - pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); + pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0); if ((*pte & (PG_FRAME | PG_PTE_CACHE)) != pa) { oldpte |= *pte; pte_store(pte, pa | PG_G | PG_RW | PG_V); @@ -3469,7 +3469,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t newpte |= PG_U; if (pmap == kernel_pmap) newpte |= PG_G; - newpte |= pmap_cache_bits(m->md.pat_mode, 0); + newpte |= pmap_cache_bits(m->mdmemattr, 0); mpte = NULL; @@ -3657,7 +3657,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_pag " in pmap %p", va, pmap); return (FALSE); } - newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | + newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 1) | PG_PS | PG_V; if ((m->oflags & VPO_UNMANAGED) == 0) { newpde |= PG_MANAGED; @@ -3859,7 +3859,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t v */ pmap_resident_count_inc(pmap, 1); - pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 0); + pa = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->mdmemattr, 0); if ((prot & VM_PROT_EXECUTE) == 0) pa |= pg_nx; @@ -3900,7 +3900,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, pd_entry_t *pde; vm_paddr_t pa, ptepa; vm_page_t p, pdpg; - int pat_mode; + vm_memattr_t pat_mode; VM_OBJECT_ASSERT_WLOCKED(object); KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, @@ -3911,7 +3911,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, p = vm_page_lookup(object, pindex); KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); - pat_mode = p->md.pat_mode; + pat_mode = p->mdmemattr; /* * Abort the mapping if the first page is not physically @@ -3932,7 +3932,7 @@ pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, KASSERT(p->valid == VM_PAGE_BITS_ALL, ("pmap_object_init_pt: invalid page %p", p)); if (pa != VM_PAGE_TO_PHYS(p) || - pat_mode != p->md.pat_mode) + pat_mode != p->mdmemattr) return; p = TAILQ_NEXT(p, listq); } @@ -5101,7 +5101,7 @@ void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) { - m->md.pat_mode = ma; + m->mdmemattr = ma; /* * If "m" is a normal page, update its direct mapping. This update @@ -5110,7 +5110,7 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma */ if ((m->flags & PG_FICTITIOUS) == 0 && pmap_change_attr(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)), PAGE_SIZE, - m->md.pat_mode)) + m->mdmemattr)) panic("memory attribute change on the direct map failed"); } Index: sys/powerpc/include/pmap.h =================================================================== --- sys/powerpc/include/pmap.h (revision 252262) +++ sys/powerpc/include/pmap.h (working copy) @@ -144,11 +144,10 @@ struct pmap { struct md_page { u_int64_t mdpg_attrs; - vm_memattr_t mdpg_cache_attrs; struct pvo_head mdpg_pvoh; }; -#define pmap_page_get_memattr(m) ((m)->md.mdpg_cache_attrs) +#define pmap_page_get_memattr(m) ((m)->mdmemattr) #define pmap_page_is_mapped(m) (!LIST_EMPTY(&(m)->md.mdpg_pvoh)) /* Index: sys/powerpc/aim/mmu_oea.c =================================================================== --- sys/powerpc/aim/mmu_oea.c (revision 252262) +++ sys/powerpc/aim/mmu_oea.c (working copy) @@ -1476,7 +1476,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_m u_int lo; if ((m->oflags & VPO_UNMANAGED) != 0) { - m->md.mdpg_cache_attrs = ma; + m->mdmemattr = ma; return; } @@ -1499,7 +1499,7 @@ moea_page_set_memattr(mmu_t mmu, vm_page_t m, vm_m mtx_unlock(&moea_table_mutex); PMAP_UNLOCK(pmap); } - m->md.mdpg_cache_attrs = ma; + m->mdmemattr = ma; rw_wunlock(&pvh_global_lock); } Index: sys/powerpc/aim/mmu_oea64.c =================================================================== --- sys/powerpc/aim/mmu_oea64.c (revision 252262) +++ sys/powerpc/aim/mmu_oea64.c (working copy) @@ -1659,7 +1659,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm uint64_t lo; if ((m->oflags & VPO_UNMANAGED) != 0) { - m->md.mdpg_cache_attrs = ma; + m->mdmemattr = ma; return; } @@ -1681,7 +1681,7 @@ moea64_page_set_memattr(mmu_t mmu, vm_page_t m, vm PMAP_UNLOCK(pmap); } UNLOCK_TABLE_RD(); - m->md.mdpg_cache_attrs = ma; + m->mdmemattr = ma; } /* Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c (revision 252262) +++ sys/arm/arm/pmap-v6.c (working copy) @@ -1183,7 +1183,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); - m->md.pv_memattr = VM_MEMATTR_DEFAULT; + m->mdmemattr = VM_MEMATTR_DEFAULT; } static vm_offset_t @@ -2819,7 +2819,7 @@ validate: if (!(prot & VM_PROT_EXECUTE)) npte |= L2_XN; - if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + if (m->mdmemattr != VM_MEMATTR_UNCACHEABLE) npte |= pte_l2_s_cache_mode; } @@ -4377,7 +4377,7 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma * Remember the memattr in a field that gets used to set the appropriate * bits in the PTEs as mappings are established. */ - m->md.pv_memattr = ma; + m->mdmemattr = ma; /* * It appears that this function can only be called before any mappings Index: sys/arm/arm/pmap.c =================================================================== --- sys/arm/arm/pmap.c (revision 252262) +++ sys/arm/arm/pmap.c (working copy) @@ -1380,7 +1380,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_o (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~PVF_NC; - if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + if (pg->mdmemattr != VM_MEMATTR_UNCACHEABLE) pmap_set_cache_entry(pv, pm, va, 1); continue; } @@ -1390,7 +1390,7 @@ pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_o !pmwc && (pv->pv_flags & PVF_NC)) { pv->pv_flags &= ~(PVF_NC | PVF_MWC); - if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + if (pg->mdmemattr != VM_MEMATTR_UNCACHEABLE) pmap_set_cache_entry(pv, pm, va, 1); } } @@ -1442,8 +1442,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits) if (!(oflags & maskbits)) { if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { - if (pg->md.pv_memattr != - VM_MEMATTR_UNCACHEABLE) { + if (pg->mdmemattr != VM_MEMATTR_UNCACHEABLE) { PMAP_LOCK(pm); l2b = pmap_get_l2_bucket(pm, va); ptep = &l2b->l2b_kva[l2pte_index(va)]; @@ -1480,7 +1479,7 @@ pmap_clearbit(struct vm_page *pg, u_int maskbits) * permission. */ if (maskbits & PVF_WRITE) { - if (pg->md.pv_memattr != + if (pg->mdmemattr != VM_MEMATTR_UNCACHEABLE) npte |= pte_l2_s_cache_mode; pv->pv_flags &= ~(PVF_NC | PVF_MWC); @@ -1811,7 +1810,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); - m->md.pv_memattr = VM_MEMATTR_DEFAULT; + m->mdmemattr = VM_MEMATTR_DEFAULT; } /* @@ -3412,7 +3411,7 @@ do_l2b_alloc: (m->oflags & VPO_UNMANAGED) == 0) vm_page_aflag_set(m, PGA_WRITEABLE); } - if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + if (m->mdmemattr != VM_MEMATTR_UNCACHEABLE) npte |= pte_l2_s_cache_mode; if (m && m == opg) { /* @@ -5038,7 +5037,7 @@ pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma * Remember the memattr in a field that gets used to set the appropriate * bits in the PTEs as mappings are established. */ - m->md.pv_memattr = ma; + m->mdmemattr = ma; /* * It appears that this function can only be called before any mappings Index: sys/arm/include/pmap.h =================================================================== --- sys/arm/include/pmap.h (revision 252262) +++ sys/arm/include/pmap.h (working copy) @@ -96,7 +96,7 @@ enum mem_type { #endif -#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) +#define pmap_page_get_memattr(m) ((m)->mdmemattr) #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) #define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); @@ -120,7 +120,6 @@ struct pv_chunk; struct md_page { int pvh_attrs; - vm_memattr_t pv_memattr; vm_offset_t pv_kva; /* first kernel VA mapping */ TAILQ_HEAD(,pv_entry) pv_list; }; Index: sys/mips/mips/pmap.c =================================================================== --- sys/mips/mips/pmap.c (revision 252262) +++ sys/mips/mips/pmap.c (working copy) @@ -594,7 +594,7 @@ pmap_page_init(vm_page_t m) { TAILQ_INIT(&m->md.pv_list); - m->md.pv_flags = 0; + m->mdmemattr = 0; } /* @@ -1435,9 +1435,9 @@ pmap_pv_reclaim(pmap_t locked_pmap) m = PHYS_TO_VM_PAGE(TLBLO_PTE_TO_PA(oldpte)); if (pte_test(&oldpte, PTE_D)) vm_page_dirty(m); - if (m->md.pv_flags & PV_TABLE_REF) + if (m->mdmemattr & PV_TABLE_REF) vm_page_aflag_set(m, PGA_REFERENCED); - m->md.pv_flags &= ~PV_TABLE_REF; + m->mdmemattr &= ~PV_TABLE_REF; TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); if (TAILQ_EMPTY(&m->md.pv_list)) vm_page_aflag_clear(m, PGA_WRITEABLE); @@ -1705,9 +1705,9 @@ pmap_remove_pte(struct pmap *pmap, pt_entry_t *ptq __func__, (void *)va, (uintmax_t)oldpte)); vm_page_dirty(m); } - if (m->md.pv_flags & PV_TABLE_REF) + if (m->mdmemattr & PV_TABLE_REF) vm_page_aflag_set(m, PGA_REFERENCED); - m->md.pv_flags &= ~PV_TABLE_REF; + m->mdmemattr &= ~PV_TABLE_REF; pmap_remove_entry(pmap, m, va); } @@ -1846,7 +1846,7 @@ pmap_remove_all(vm_page_t m) ("pmap_remove_all: page %p is not managed", m)); rw_wlock(&pvh_global_lock); - if (m->md.pv_flags & PV_TABLE_REF) + if (m->mdmemattr & PV_TABLE_REF) vm_page_aflag_set(m, PGA_REFERENCED); while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { @@ -1893,7 +1893,7 @@ pmap_remove_all(vm_page_t m) } vm_page_aflag_clear(m, PGA_WRITEABLE); - m->md.pv_flags &= ~PV_TABLE_REF; + m->mdmemattr &= ~PV_TABLE_REF; rw_wunlock(&pvh_global_lock); } @@ -2078,7 +2078,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t mpte->wire_count--; if (pte_test(&origpte, PTE_MANAGED)) { - m->md.pv_flags |= PV_TABLE_REF; + m->mdmemattr |= PV_TABLE_REF; om = m; newpte |= PTE_MANAGED; if (!pte_test(&newpte, PTE_RO)) @@ -2114,7 +2114,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t * Enter on the PV list if part of our managed memory. */ if ((m->oflags & VPO_UNMANAGED) == 0) { - m->md.pv_flags |= PV_TABLE_REF; + m->mdmemattr |= PV_TABLE_REF; if (pv == NULL) pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; @@ -2145,9 +2145,9 @@ validate: *pte = newpte; if (pte_test(&origpte, PTE_V)) { if (pte_test(&origpte, PTE_MANAGED) && opa != pa) { - if (om->md.pv_flags & PV_TABLE_REF) + if (om->mdmemattr & PV_TABLE_REF) vm_page_aflag_set(om, PGA_REFERENCED); - om->md.pv_flags &= ~PV_TABLE_REF; + om->mdmemattr &= ~PV_TABLE_REF; } if (pte_test(&origpte, PTE_D)) { KASSERT(!pte_test(&origpte, PTE_RO), @@ -2854,9 +2854,9 @@ pmap_ts_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); - if (m->md.pv_flags & PV_TABLE_REF) { + if (m->mdmemattr & PV_TABLE_REF) { rw_wlock(&pvh_global_lock); - m->md.pv_flags &= ~PV_TABLE_REF; + m->mdmemattr &= ~PV_TABLE_REF; rw_wunlock(&pvh_global_lock); return (1); } @@ -2967,7 +2967,7 @@ pmap_is_referenced(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); - return ((m->md.pv_flags & PV_TABLE_REF) != 0); + return ((m->mdmemattr & PV_TABLE_REF) != 0); } /* @@ -2982,8 +2982,8 @@ pmap_clear_reference(vm_page_t m) KASSERT((m->oflags & VPO_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); rw_wlock(&pvh_global_lock); - if (m->md.pv_flags & PV_TABLE_REF) { - m->md.pv_flags &= ~PV_TABLE_REF; + if (m->mdmemattr & PV_TABLE_REF) { + m->mdmemattr &= ~PV_TABLE_REF; } rw_wunlock(&pvh_global_lock); } Index: sys/mips/include/pmap.h =================================================================== --- sys/mips/include/pmap.h (revision 252262) +++ sys/mips/include/pmap.h (working copy) @@ -69,7 +69,6 @@ struct pv_entry; struct pv_chunk; struct md_page { - int pv_flags; TAILQ_HEAD(, pv_entry) pv_list; };