From a95aa3f41bfcc701196d0a10bd47b64fabc1d264 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Wed, 16 Nov 2022 13:34:17 -0500 Subject: [PATCH 08/52] mips: Fix whitespace in pmap_demote_pde_locked() No functional change intended. --- sys/mips/mips/pmap_mips64.c | 226 ++++++++++++++++++------------------ 1 file changed, 113 insertions(+), 113 deletions(-) diff --git a/sys/mips/mips/pmap_mips64.c b/sys/mips/mips/pmap_mips64.c index 91793f9dc698..cd6a2c21e42e 100644 --- a/sys/mips/mips/pmap_mips64.c +++ b/sys/mips/mips/pmap_mips64.c @@ -2191,119 +2191,119 @@ static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp) { - pd_entry_t newpde, oldpde; - pt_entry_t oldpte, *firstpte, newpte; - vm_paddr_t mptepa; - vm_page_t mpte; - struct spglist free; - - PMAP_LOCK_ASSERT(pmap, MA_OWNED); - oldpde = *pde; - oldpte = (pt_entry_t)oldpde; - KASSERT(pte_is_1m_superpage(&oldpte) && pte_is_valid(&oldpte), - ("%s: oldpde is not superpage and/or valid.", __func__)); - if (pte_is_ref(&oldpte) && (mpte = pmap_lookup_pt_page(pmap, va)) != - NULL) - pmap_remove_pt_page(pmap, mpte); - else { - KASSERT(!pte_test(&oldpte, PTE_W), - ("%s: page table page for a wired mapping is missing", - __func__)); - /* - * Invalidate the 2MB page mapping and return "failure" if the - * mapping was never accessed or the allocation of the new - * page table page fails. If the 2MB page mapping belongs to - * the direct map region of the kernel's address space, then - * the page allocation request specifies the highest possible - * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is - * normal. Page table pages are preallocated for every other - * part of the kernel address space, so the direct map region - * is the only part of the kernel address space that must be - * handled here. - */ - if (!pte_is_ref(&oldpte) || (mpte = vm_page_alloc(NULL, - pmap_pde_pindex(va), (va >= VM_MIN_KERNEL_ADDRESS && va < - VM_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | - VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { - SLIST_INIT(&free); - pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free, - lockp); - pmap_invalidate_range(pmap, - (vm_offset_t)(va & ~PDRMASK), - (vm_offset_t)(va & ~PDRMASK) + NBPDR); - vm_page_free_pages_toq(&free, true); - CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", - __func__, va, pmap); - return (FALSE); - } - if (va < VM_MAXUSER_ADDRESS) - pmap_resident_count_inc(pmap, 1); - } - mptepa = VM_PAGE_TO_PHYS(mpte); - newpde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(mptepa); - firstpte = newpde; - KASSERT(pte_is_ref(&oldpte), - ("%s: oldpte is not referenced", __func__)); - KASSERT(pte_test(&oldpte, PTE_RO) ^ pte_test(&oldpte, PTE_D), - ("%s: oldpte is missing PTE_D", __func__)); - newpte = oldpte & ~PTE_PS_IDX_MASK; - - /* - * If the page table page is new, initialize it. - */ - if (mpte->wire_count == 1) { - mpte->wire_count = NPTEPG; - pmap_fill_ptp(firstpte, newpte); - } - KASSERT(TLBLO_PTE_TO_PA(*firstpte) == TLBLO_PTE_TO_PA(newpte), - ("%s: firstpte and newpte map different physical addresses", - __func__)); - - /* - * If the mapping has changed attributes, update the page table - * entries. - */ - if ((*firstpte & PG_PROMOTE_MASK) != (newpte & PG_PROMOTE_MASK)) - pmap_fill_ptp(firstpte, newpte); - - - /* - * The spare PV entries must be reserved prior to demoting the - * mapping, that is, prior to changing the PDE. Otherwise, the state - * of the PDE and the PV lists will be inconsistent, which can result - * in reclaim_pv_chunk() attempting to remove a PV entry from the - * wrong PV list and pmap_pv_demote_pde() failing to find the expected - * PV entry for the 2MB page mapping that is being demoted. - */ - if (pde_test(&oldpde, PTE_MANAGED)) - reserve_pv_entries(pmap, NPTEPG - 1, lockp); - - /* - * Demote the mapping. This pmap is locked. The old PDE has - * PTE_REF set. If the old PDE has PTE_RO clear, it also has - * PTE_D set. Thus, there is no danger of a race with another - * processor changing the setting of PTE_REF and/or PTE_D between - * the read above and the store below. - */ - pmap_update_pde(pmap, va, pde, (pt_entry_t)newpde); - - /* - * Invalidate a stale recursive mapping of the page table page. - */ - if (va >= VM_MAXUSER_ADDRESS) - pmap_invalidate_page(pmap, (vm_offset_t)pmap_pte(pmap, va)); - - /* - * Demote the PV entry. - */ - if (pde_test(&oldpde, PTE_MANAGED)) { - pmap_pv_demote_pde(pmap, va, TLBLO_PDE_TO_PA(oldpde), lockp); - } - atomic_add_long(&pmap_pde_demotions, 1); - CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, - pmap); - - return (TRUE); + pd_entry_t newpde, oldpde; + pt_entry_t oldpte, *firstpte, newpte; + vm_paddr_t mptepa; + vm_page_t mpte; + struct spglist free; + + PMAP_LOCK_ASSERT(pmap, MA_OWNED); + oldpde = *pde; + oldpte = (pt_entry_t)oldpde; + KASSERT(pte_is_1m_superpage(&oldpte) && pte_is_valid(&oldpte), + ("%s: oldpde is not superpage and/or valid.", __func__)); + if (pte_is_ref(&oldpte) && (mpte = pmap_lookup_pt_page(pmap, va)) != + NULL) + pmap_remove_pt_page(pmap, mpte); + else { + KASSERT(!pte_test(&oldpte, PTE_W), + ("%s: page table page for a wired mapping is missing", + __func__)); + /* + * Invalidate the 2MB page mapping and return "failure" if the + * mapping was never accessed or the allocation of the new + * page table page fails. If the 2MB page mapping belongs to + * the direct map region of the kernel's address space, then + * the page allocation request specifies the highest possible + * priority (VM_ALLOC_INTERRUPT). Otherwise, the priority is + * normal. Page table pages are preallocated for every other + * part of the kernel address space, so the direct map region + * is the only part of the kernel address space that must be + * handled here. + */ + if (!pte_is_ref(&oldpte) || (mpte = vm_page_alloc(NULL, + pmap_pde_pindex(va), (va >= VM_MIN_KERNEL_ADDRESS && va < + VM_MAX_ADDRESS ? VM_ALLOC_INTERRUPT : VM_ALLOC_NORMAL) | + VM_ALLOC_NOOBJ | VM_ALLOC_WIRED)) == NULL) { + SLIST_INIT(&free); + pmap_remove_pde(pmap, pde, trunc_2mpage(va), &free, + lockp); + pmap_invalidate_range(pmap, + (vm_offset_t)(va & ~PDRMASK), + (vm_offset_t)(va & ~PDRMASK) + NBPDR); + vm_page_free_pages_toq(&free, true); + CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", + __func__, va, pmap); + return (FALSE); + } + if (va < VM_MAXUSER_ADDRESS) + pmap_resident_count_inc(pmap, 1); + } + mptepa = VM_PAGE_TO_PHYS(mpte); + newpde = (pd_entry_t)MIPS_PHYS_TO_DIRECT(mptepa); + firstpte = newpde; + KASSERT(pte_is_ref(&oldpte), + ("%s: oldpte is not referenced", __func__)); + KASSERT(pte_test(&oldpte, PTE_RO) ^ pte_test(&oldpte, PTE_D), + ("%s: oldpte is missing PTE_D", __func__)); + newpte = oldpte & ~PTE_PS_IDX_MASK; + + /* + * If the page table page is new, initialize it. + */ + if (mpte->wire_count == 1) { + mpte->wire_count = NPTEPG; + pmap_fill_ptp(firstpte, newpte); + } + KASSERT(TLBLO_PTE_TO_PA(*firstpte) == TLBLO_PTE_TO_PA(newpte), + ("%s: firstpte and newpte map different physical addresses", + __func__)); + + /* + * If the mapping has changed attributes, update the page table + * entries. + */ + if ((*firstpte & PG_PROMOTE_MASK) != (newpte & PG_PROMOTE_MASK)) + pmap_fill_ptp(firstpte, newpte); + + + /* + * The spare PV entries must be reserved prior to demoting the + * mapping, that is, prior to changing the PDE. Otherwise, the state + * of the PDE and the PV lists will be inconsistent, which can result + * in reclaim_pv_chunk() attempting to remove a PV entry from the + * wrong PV list and pmap_pv_demote_pde() failing to find the expected + * PV entry for the 2MB page mapping that is being demoted. + */ + if (pde_test(&oldpde, PTE_MANAGED)) + reserve_pv_entries(pmap, NPTEPG - 1, lockp); + + /* + * Demote the mapping. This pmap is locked. The old PDE has + * PTE_REF set. If the old PDE has PTE_RO clear, it also has + * PTE_D set. Thus, there is no danger of a race with another + * processor changing the setting of PTE_REF and/or PTE_D between + * the read above and the store below. + */ + pmap_update_pde(pmap, va, pde, (pt_entry_t)newpde); + + /* + * Invalidate a stale recursive mapping of the page table page. + */ + if (va >= VM_MAXUSER_ADDRESS) + pmap_invalidate_page(pmap, (vm_offset_t)pmap_pte(pmap, va)); + + /* + * Demote the PV entry. + */ + if (pde_test(&oldpde, PTE_MANAGED)) { + pmap_pv_demote_pde(pmap, va, TLBLO_PDE_TO_PA(oldpde), lockp); + } + atomic_add_long(&pmap_pde_demotions, 1); + CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, + pmap); + + return (TRUE); } /* -- 2.41.0