From 4276a93ae7b10159e9d73f5200a20f32d9a0f80b Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Wed, 16 Nov 2022 11:04:15 -0500 Subject: [PATCH 01/52] mips: Fix whitespace in pmap_enter() No functional change intended. --- sys/mips/mips/pmap_mips64.c | 393 ++++++++++++++++++------------------ 1 file changed, 196 insertions(+), 197 deletions(-) diff --git a/sys/mips/mips/pmap_mips64.c b/sys/mips/mips/pmap_mips64.c index 2399ecc80808..387ed3bb30f5 100644 --- a/sys/mips/mips/pmap_mips64.c +++ b/sys/mips/mips/pmap_mips64.c @@ -2984,91 +2984,91 @@ int pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, u_int flags, int8_t psind __unused) { - struct rwlock *lock; - vm_paddr_t pa, opa; - pd_entry_t *pde; - pt_entry_t *pte; - pt_entry_t origpte, newpte; - pv_entry_t pv; - vm_page_t mpte, om; - boolean_t nosleep; - - va = trunc_page(va); - KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); - KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || - va >= kmi.clean_eva, - ("pmap_enter: managed mapping within the clean submap")); - if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) - VM_OBJECT_ASSERT_LOCKED(m->object); - - mpte = NULL; - - lock = NULL; - rw_rlock(&pvh_global_lock); - PMAP_LOCK(pmap); - - /* - * In the case that a page table page is not resident, we are - * creating it here. - */ + struct rwlock *lock; + vm_paddr_t pa, opa; + pd_entry_t *pde; + pt_entry_t *pte; + pt_entry_t origpte, newpte; + pv_entry_t pv; + vm_page_t mpte, om; + boolean_t nosleep; + + va = trunc_page(va); + KASSERT(va <= VM_MAX_KERNEL_ADDRESS, ("pmap_enter: toobig")); + KASSERT((m->oflags & VPO_UNMANAGED) != 0 || va < kmi.clean_sva || + va >= kmi.clean_eva, + ("pmap_enter: managed mapping within the clean submap")); + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) + VM_OBJECT_ASSERT_LOCKED(m->object); + + mpte = NULL; + + lock = NULL; + rw_rlock(&pvh_global_lock); + PMAP_LOCK(pmap); + + /* + * In the case that a page table page is not resident, we are + * creating it here. + */ if (va < VM_MAXUSER_ADDRESS) { - /* - * Here if the pte page isn't mapped, or if it has been - * deallocated. - */ - nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; - mpte = pmap_allocpte(pmap, va, nosleep ? NULL : &lock); - if (mpte == NULL) { - KASSERT(nosleep != 0, - ("pmap_allocpte failed with sleep allowed")); - if (lock != NULL) - rw_wunlock(lock); - rw_runlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); - return (KERN_RESOURCE_SHORTAGE); - } - } + /* + * Here if the pte page isn't mapped, or if it has been + * deallocated. + */ + nosleep = (flags & PMAP_ENTER_NOSLEEP) != 0; + mpte = pmap_allocpte(pmap, va, nosleep ? NULL : &lock); + if (mpte == NULL) { + KASSERT(nosleep != 0, + ("pmap_allocpte failed with sleep allowed")); + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_RESOURCE_SHORTAGE); + } + } pde = pmap_pde(pmap, va); if (pde_is_1m_superpage(pde)) { panic("%s: attempted pmap_enter on superpage", __func__); } pte = pmap_pde_to_pte(pde, va); - /* - * Page Directory table entry not valid, we need a new PT page - */ - if (pte == NULL) { - panic("pmap_enter: invalid page directory, pdir=%p, va=%p", - (void *)pmap->pm_segtab, (void *)va); - } + /* + * Page Directory table entry not valid, we need a new PT page + */ + if (pte == NULL) { + panic("pmap_enter: invalid page directory, pdir=%p, va=%p", + (void *)pmap->pm_segtab, (void *)va); + } - pa = VM_PAGE_TO_PHYS(m); - om = NULL; - origpte = *pte; + pa = VM_PAGE_TO_PHYS(m); + om = NULL; + origpte = *pte; opa = TLBLO_PTE_TO_PA(origpte); - newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot); - /* - * pmap_enter() is called during a fault or simulated fault so - * set the reference bit now to avoid a fault. - */ - pte_ref_set(&newpte); - if ((flags & PMAP_ENTER_WIRED) != 0) - newpte |= PTE_W; - if (is_kernel_pmap(pmap)) - newpte |= PTE_G; - if (is_cacheable_mem(pa)) { - if (m->md.pv_memattr == VM_MEMATTR_UNCACHEABLE) - newpte |= PTE_C_UNCACHED; - else - newpte |= PTE_C_CACHE; - } else - newpte |= PTE_C_UNCACHED; + newpte = TLBLO_PA_TO_PFN(pa) | init_pte_prot(m, flags, prot); + /* + * pmap_enter() is called during a fault or simulated fault so + * set the reference bit now to avoid a fault. + */ + pte_ref_set(&newpte); + if ((flags & PMAP_ENTER_WIRED) != 0) + newpte |= PTE_W; + if (is_kernel_pmap(pmap)) + newpte |= PTE_G; + if (is_cacheable_mem(pa)) { + if (m->md.pv_memattr == VM_MEMATTR_UNCACHEABLE) + newpte |= PTE_C_UNCACHED; + else + newpte |= PTE_C_CACHE; + } else + newpte |= PTE_C_UNCACHED; #ifdef CPU_CHERI - if ((flags & PMAP_ENTER_NOLOADTAGS) != 0) - newpte |= PTE_LC; - if ((flags & PMAP_ENTER_NOSTORETAGS) != 0) - newpte |= PTE_SC; + if ((flags & PMAP_ENTER_NOLOADTAGS) != 0) + newpte |= PTE_LC; + if ((flags & PMAP_ENTER_NOSTORETAGS) != 0) + newpte |= PTE_SC; #endif /* @@ -3081,35 +3081,34 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, newpte |= PTE_D; } - /* - * Mapping has not changed, must be protection or wiring change. - */ - if (pte_is_valid(&origpte) && opa == pa) { - /* - * Wiring change, just update stats. We don't worry about - * wiring PT pages as they remain resident as long as there - * are valid mappings in them. Hence, if a user page is - * wired, the PT page will be also. - */ - if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W)) - pmap->pm_stats.wired_count++; - else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte, - PTE_W)) - pmap->pm_stats.wired_count--; - - KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), - ("%s: modified page not writable: va: %p, pte: %#jx", - __func__, (void *)va, (uintmax_t)origpte)); - - /* - * Remove the extra PT page reference - */ - if (mpte != NULL) { - mpte->wire_count--; - KASSERT(mpte->wire_count > 0, - ("pmap_enter: missing reference to page table page," - " va: 0x%lx", va)); - } + /* + * Mapping has not changed, must be protection or wiring change. + */ + if (pte_is_valid(&origpte) && opa == pa) { + /* + * Wiring change, just update stats. We don't worry about + * wiring PT pages as they remain resident as long as there + * are valid mappings in them. Hence, if a user page is + * wired, the PT page will be also. + */ + if (pte_test(&newpte, PTE_W) && !pte_test(&origpte, PTE_W)) + pmap->pm_stats.wired_count++; + else if (!pte_test(&newpte, PTE_W) && pte_test(&origpte, PTE_W)) + pmap->pm_stats.wired_count--; + + KASSERT(!pte_test(&origpte, PTE_D | PTE_RO), + ("%s: modified page not writable: va: %p, pte: %#jx", + __func__, (void *)va, (uintmax_t)origpte)); + + /* + * Remove the extra PT page reference + */ + if (mpte != NULL) { + mpte->wire_count--; + KASSERT(mpte->wire_count > 0, + ("pmap_enter: missing reference to page table page," + " va: 0x%lx", va)); + } if (pte_test(&origpte, PTE_MANAGED)) { om = m; newpte |= PTE_MANAGED; @@ -3121,116 +3120,116 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, pv = NULL; - /* - * Mapping has changed, invalidate old range and fall through to - * handle validating new mapping. - */ - if (opa) { - if (pte_test(&origpte, PTE_W)) - pmap->pm_stats.wired_count--; - - if (pte_test(&origpte, PTE_MANAGED)) { - om = PHYS_TO_VM_PAGE(opa); - CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, om); - pv = pmap_pvh_remove(&om->md, pmap, va); - } - if (mpte != NULL) { - mpte->wire_count--; - KASSERT(mpte->wire_count > 0, - ("pmap_enter: missing reference to page table page," - " va: %p", (void *)va)); - } - } else - pmap_resident_count_inc(pmap, 1); - - /* - * Enter on the PV list if part of our managed memory. - */ - if ((m->oflags & VPO_UNMANAGED) == 0) { - newpte |= PTE_MANAGED; + /* + * Mapping has changed, invalidate old range and fall through to + * handle validating new mapping. + */ + if (opa) { + if (pte_test(&origpte, PTE_W)) + pmap->pm_stats.wired_count--; + + if (pte_test(&origpte, PTE_MANAGED)) { + om = PHYS_TO_VM_PAGE(opa); + CHANGE_PV_LIST_LOCK_TO_VM_PAGE(&lock, om); + pv = pmap_pvh_remove(&om->md, pmap, va); + } + if (mpte != NULL) { + mpte->wire_count--; + KASSERT(mpte->wire_count > 0, + ("pmap_enter: missing reference to page table page," + " va: %p", (void *)va)); + } + } else + pmap_resident_count_inc(pmap, 1); + + /* + * Enter on the PV list if part of our managed memory. + */ + if ((m->oflags & VPO_UNMANAGED) == 0) { + newpte |= PTE_MANAGED; /* Insert Entry */ if (pv == NULL) pv = get_pv_entry(pmap, &lock); - pv->pv_va = va; - CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); - m->md.pv_gen++; - if (!pte_test(&newpte, PTE_RO)) - vm_page_aflag_set(m, PGA_WRITEABLE); - } else if (pv != NULL) + pv->pv_va = va; + CHANGE_PV_LIST_LOCK_TO_PHYS(&lock, pa); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); + m->md.pv_gen++; + if (!pte_test(&newpte, PTE_RO)) + vm_page_aflag_set(m, PGA_WRITEABLE); + } else if (pv != NULL) free_pv_entry(pmap, pv); - /* - * Increment counters - */ - if (pte_test(&newpte, PTE_W)) - pmap->pm_stats.wired_count++; + /* + * Increment counters + */ + if (pte_test(&newpte, PTE_W)) + pmap->pm_stats.wired_count++; validate: #ifdef PMAP_DEBUG - printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); + printf("pmap_enter: va: %p -> pa: %p\n", (void *)va, (void *)pa); #endif - /* - * if the mapping or permission bits are different, we need to - * update the pte. - */ - if ((origpte & ~ (PTE_D|PTE_REF)) != newpte) { - newpte |= PTE_VR; - if ((flags & VM_PROT_WRITE) != 0) - newpte |= PTE_D; - if (pte_is_valid(&origpte)) { - boolean_t invlva = FALSE; + /* + * if the mapping or permission bits are different, we need to + * update the pte. + */ + if ((origpte & ~ (PTE_D|PTE_REF)) != newpte) { + newpte |= PTE_VR; + if ((flags & VM_PROT_WRITE) != 0) + newpte |= PTE_D; + if (pte_is_valid(&origpte)) { + boolean_t invlva = FALSE; origpte = pte_load_store(pte, newpte); - if (pte_is_ref(&origpte)) { - if (pte_test(&origpte, PTE_MANAGED)) - vm_page_aflag_set(om, PGA_REFERENCED); - if (opa != pa) - invlva = TRUE; - } - if (pte_test(&origpte, PTE_D) && + if (pte_is_ref(&origpte)) { + if (pte_test(&origpte, PTE_MANAGED)) + vm_page_aflag_set(om, PGA_REFERENCED); + if (opa != pa) + invlva = TRUE; + } + if (pte_test(&origpte, PTE_D) && !pte_test(&origpte, PTE_RO)) { - if (pte_test(&origpte, PTE_MANAGED)) - vm_page_dirty(om); - if ((prot & VM_PROT_WRITE) == 0) - invlva = TRUE; - } - if (pte_test(&origpte, PTE_MANAGED) && - TAILQ_EMPTY(&om->md.pv_list) && + if (pte_test(&origpte, PTE_MANAGED)) + vm_page_dirty(om); + if ((prot & VM_PROT_WRITE) == 0) + invlva = TRUE; + } + if (pte_test(&origpte, PTE_MANAGED) && + TAILQ_EMPTY(&om->md.pv_list) && ((om->flags & PG_FICTITIOUS) != 0 || TAILQ_EMPTY(&pa_to_pvh(opa)->pv_list))) - vm_page_aflag_clear(om, PGA_WRITEABLE); - if (invlva) - pmap_invalidate_page(pmap, va); - } else + vm_page_aflag_clear(om, PGA_WRITEABLE); + if (invlva) + pmap_invalidate_page(pmap, va); + } else pte_store(pte, newpte); - } + } - /* - * If both the page table page and the reservation are fully - * populated, then attempt promotion. - */ - if ((mpte == NULL || mpte->wire_count == NPTEPG) && - (m->flags & PG_FICTITIOUS) == 0 && - pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0) - pmap_promote_pde(pmap, pde, va, &lock); - - /* - * Sync I & D caches for executable pages. Do this only if the - * target pmap belongs to the current process. Otherwise, an - * unresolvable TLB miss may occur. - */ - if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && - (prot & VM_PROT_EXECUTE)) { - mips_icache_sync_range(va, PAGE_SIZE); - mips_dcache_wbinv_range(va, PAGE_SIZE); - } - if (lock != NULL) - rw_wunlock(lock); - rw_runlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); - return (KERN_SUCCESS); + /* + * If both the page table page and the reservation are fully + * populated, then attempt promotion. + */ + if ((mpte == NULL || mpte->wire_count == NPTEPG) && + (m->flags & PG_FICTITIOUS) == 0 && + pg_ps_enabled && vm_reserv_level_iffullpop(m) == 0) + pmap_promote_pde(pmap, pde, va, &lock); + + /* + * Sync I & D caches for executable pages. Do this only if the + * target pmap belongs to the current process. Otherwise, an + * unresolvable TLB miss may occur. + */ + if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && + (prot & VM_PROT_EXECUTE)) { + mips_icache_sync_range(va, PAGE_SIZE); + mips_dcache_wbinv_range(va, PAGE_SIZE); + } + if (lock != NULL) + rw_wunlock(lock); + rw_runlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (KERN_SUCCESS); } /* @@ -3481,7 +3480,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, return (FALSE); } pa = VM_PAGE_TO_PHYS(m); - newpde = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_VALID | PTE_PS_1M; + newpde = PTE_RO | TLBLO_PA_TO_PFN(pa) | PTE_VALID | PTE_PS_1M; if (is_cacheable_mem(pa)) { if (m->md.pv_memattr == VM_MEMATTR_UNCACHEABLE) newpde |= PTE_C_UNCACHED; -- 2.41.0