From d2ad26aab06b93d15413dd3ddeead2078a8a6da7 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Wed, 16 Nov 2022 12:00:28 -0500 Subject: [PATCH 05/52] mips: Make pmap_enter_pde() more like other platforms' implementations - Return a Mach error instead of a bool. - Handle some pmap_enter() flags. _NOREPLACE isn't implemented yet. No functional change intended. --- sys/mips/mips/pmap_mips64.c | 61 ++++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 25 deletions(-) diff --git a/sys/mips/mips/pmap_mips64.c b/sys/mips/mips/pmap_mips64.c index 1219b1138147..6940c69e3b97 100644 --- a/sys/mips/mips/pmap_mips64.c +++ b/sys/mips/mips/pmap_mips64.c @@ -145,7 +145,7 @@ __FBSDID("$FreeBSD$"); if (_new_lock != *_lockp) { \ if (*_lockp != NULL) \ rw_wunlock(*_lockp); \ - *_lockp = _new_lock; \ + *_lockp = _new_lock; \ rw_wlock(*_lockp); \ } \ } while (0) @@ -200,6 +200,12 @@ static void pmap_asid_alloc(pmap_t pmap); static struct rwlock_padalign pvh_global_lock; +/* + * Internal flags for pmap_enter()'s helper functions. + */ +#define PMAP_ENTER_NORECLAIM 0x1000000 /* Don't reclaim PV entries. */ +#define PMAP_ENTER_NOREPLACE 0x2000000 /* Don't replace mappings. */ + /* * Data for the pv entry allocation mechanism */ @@ -225,12 +231,12 @@ static boolean_t pmap_demote_pde_locked(pmap_t pmap, pd_entry_t *pde, vm_offset_t va, struct rwlock **lockp); static vm_page_t pmap_allocpde(pmap_t pmap, vm_offset_t va, struct rwlock **lockp); -static boolean_t pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, - vm_prot_t prot, struct rwlock **lockp); +static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, + vm_prot_t prot, u_int flags, struct rwlock **lockp); static void pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); static boolean_t pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, - struct rwlock **lockp); + u_int flags, struct rwlock **lockp); static void pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, struct rwlock **lockp); static void pmap_fill_ptp(pt_entry_t *firstpte, pt_entry_t newpte); @@ -2128,7 +2134,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t va, vm_page_t m, * memory can be allocated without resorting to reclamation. */ static boolean_t -pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, +pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, u_int flags, struct rwlock **lockp) { struct md_page *pvh; @@ -2137,7 +2143,8 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa, rw_assert(&pvh_global_lock, RA_LOCKED); PMAP_LOCK_ASSERT(pmap, MA_OWNED); /* Pass NULL instead of the lock pointer to disable reclamation. */ - if ((pv = get_pv_entry(pmap, NULL)) != NULL) { + if ((pv = get_pv_entry(pmap, (flags & PMAP_ENTER_NORECLAIM) != 0 ? + NULL : lockp)) != NULL) { pv->pv_va = va; CHANGE_PV_LIST_LOCK_TO_PHYS(lockp, pa); pvh = pa_to_pvh(pa); @@ -3442,14 +3449,17 @@ retry: } /* - * Tries to create a 2MB page mapping. Returns TRUE if successful and FALSE - * otherwise. Fails if (1) a page table page cannot be allocated without - * blocking, (2) a mapping already exists at the specified virtual address, or - * (3) a pv entry cannot be allocated without reclaiming another pv entry. + * Tries to create the specified 2MB page mapping. Returns KERN_SUCCESS if the + * mapping was created, and either KERN_FAILURE or KERN_RESOURCE_SHORTAGE + * otherwise. Returns KERN_FAILURE if PMAP_ENTER_NOREPLACE was specified and a + * mapping already exists at the specified virtual address XXX-MJ notyet. + * Returns KERN_RESOURCE_SHORTAGE if PMAP_ENTER_NOSLEEP was specified and a page + * table page allocation failed. Returns KERN_RESOURCE_SHORTAGE if + * PMAP_ENTER_NORECLAIM was specified and a PV entry allocation failed. */ -static boolean_t +static int pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - struct rwlock **lockp) + u_int flags, struct rwlock **lockp) { pd_entry_t *pde; pt_entry_t newpde; @@ -3464,12 +3474,13 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, /* Not doing the kernel pmap for now */ CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p: kernel map", __func__, va, pmap); - return (FALSE); + return (KERN_FAILURE); } - if ((mpde = pmap_allocpde(pmap, va, NULL)) == NULL) { + if ((mpde = pmap_allocpde(pmap, va, (flags & PMAP_ENTER_NOSLEEP) != 0 ? + NULL : lockp)) == NULL) { CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", __func__, va, pmap); - return (FALSE); + return (KERN_RESOURCE_SHORTAGE); } pde = (pd_entry_t *)MIPS_PHYS_TO_DIRECT(VM_PAGE_TO_PHYS(mpde)); pde = &pde[pmap_pde_index(va)]; @@ -3488,7 +3499,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, /* * Abort this mapping if its PV entry could not be created. */ - if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m), + if (!pmap_pv_insert_pde(pmap, va, VM_PAGE_TO_PHYS(m), flags, lockp)) { SLIST_INIT(&free); if (pmap_unwire_ptp(pmap, va, mpde, &free)) { @@ -3497,7 +3508,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, } CTR3(KTR_PMAP, "%s: failure for va %#lx in pmap %p", __func__, va, pmap); - return (FALSE); + return (KERN_RESOURCE_SHORTAGE); } } @@ -3517,19 +3528,18 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, * target pmap belongs to the current process. Otherwise, an * unresolvable TLB miss may occur. */ - if (!is_kernel_pmap(pmap) && (pmap == &curproc->p_vmspace->vm_pmap) && - (prot & VM_PROT_EXECUTE)) { - va &= ~PDRMASK; - mips_icache_sync_range(va, NBPDR); - mips_dcache_wbinv_range(va, NBPDR); - + if (pmap == &curproc->p_vmspace->vm_pmap && (prot & VM_PROT_EXECUTE)) { + va &= ~PDRMASK; + mips_icache_sync_range(va, NBPDR); + mips_dcache_wbinv_range(va, NBPDR); } sched_unpin(); atomic_add_long(&pmap_pde_mappings, 1); CTR3(KTR_PMAP, "%s: success for va %#lx in pmap %p", __func__, va, pmap); - return (TRUE); + + return (KERN_SUCCESS); } /* @@ -3565,7 +3575,8 @@ pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, va = start + ptoa(diff); if ((va & PDRMASK) == 0 && va + NBPDR <= end && m->psind == 1 && pg_ps_enabled && - pmap_enter_pde(pmap, va, m, prot, &lock)) + pmap_enter_pde(pmap, va, m, prot, PMAP_ENTER_NOSLEEP | + PMAP_ENTER_NORECLAIM, &lock) == KERN_SUCCESS) m = &m[NBPDR / PAGE_SIZE - 1]; else mpte = pmap_enter_quick_locked(pmap, va, m, prot, -- 2.41.0