diff --git a/sys/powerpc/booke/machdep_e500.c b/sys/powerpc/booke/machdep_e500.c index 4ad72721260..439e9d10276 100644 --- a/sys/powerpc/booke/machdep_e500.c +++ b/sys/powerpc/booke/machdep_e500.c @@ -87,6 +87,7 @@ booke_enable_l2_cache(void) { uint32_t csr; + return; /* Enable L2 cache on E500mc */ if ((((mfpvr() >> 16) & 0xFFFF) == FSL_E500mc) || (((mfpvr() >> 16) & 0xFFFF) == FSL_E5500)) { @@ -108,6 +109,7 @@ booke_enable_bpred(void) { uint32_t csr; + return; bpred_enable(); csr = mfspr(SPR_BUCSR); if ((boothowto & RB_VERBOSE) != 0 || (csr & BUCSR_BPEN) == 0) diff --git a/sys/powerpc/booke/pmap.c b/sys/powerpc/booke/pmap.c index 434d5038c3e..d0613c3769a 100644 --- a/sys/powerpc/booke/pmap.c +++ b/sys/powerpc/booke/pmap.c @@ -127,6 +127,7 @@ __FBSDID("$FreeBSD$"); #include "mmu_if.h" +#define DEBUG #define SPARSE_MAPDEV #ifdef DEBUG #define debugf(fmt, args...) printf(fmt, ##args) @@ -183,9 +184,6 @@ static int mmu_booke_enter_locked(mmu_t, pmap_t, vm_offset_t, vm_page_t, unsigned int kptbl_min; /* Index of the first kernel ptbl. */ unsigned int kernel_ptbls; /* Number of KVA ptbls. */ -#ifdef __powerpc64__ -unsigned int kernel_pdirs; -#endif static uma_zone_t ptbl_root_zone; /* @@ -267,7 +265,7 @@ static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); -static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t); +static void kernel_pte_alloc(vm_offset_t, vm_offset_t); static pv_entry_t pv_alloc(void); static void pv_free(pv_entry_t); @@ -639,10 +637,13 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) vm_paddr_t physsz, hwphyssz; u_int phys_avail_count; vm_size_t kstack0_sz; - vm_offset_t kernel_pdir, kstack0; vm_paddr_t kstack0_phys; + vm_offset_t kstack0; void *dpcpu; +#ifndef __powerpc64__ + vm_offset_t kernel_pdir; vm_offset_t kernel_ptbl_root; +#endif debugf("mmu_booke_bootstrap: entered\n"); @@ -682,9 +683,8 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) data_end = round_page(data_end); + data_end = round_page(mmu_booke_alloc_kernel_pgtables(data_end)); #ifdef __powerpc64__ - kernel_ptbl_root = data_end; - data_end += PP2D_NENTRIES * sizeof(pte_t**); #else /* Allocate space for ptbl_bufs. */ ptbl_bufs = (struct ptbl_buf *)data_end; @@ -693,22 +693,17 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) (uintptr_t)ptbl_bufs, data_end); data_end = round_page(data_end); - kernel_ptbl_root = data_end; data_end += PDIR_NENTRIES * sizeof(pte_t*); -#endif /* Allocate PTE tables for kernel KVA. */ kernel_pdir = data_end; kernel_ptbls = howmany(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, PDIR_SIZE); -#ifdef __powerpc64__ - kernel_pdirs = howmany(kernel_ptbls, PDIR_NENTRIES); - data_end += kernel_pdirs * PDIR_PAGES * PAGE_SIZE; -#endif data_end += kernel_ptbls * PTBL_PAGES * PAGE_SIZE; debugf(" kernel ptbls: %d\n", kernel_ptbls); debugf(" kernel pdir at 0x%"PRI0ptrX" end = 0x%"PRI0ptrX"\n", kernel_pdir, data_end); +#endif /* Retrieve phys/avail mem regions */ mem_regions(&physmem_regions, &physmem_regions_sz, @@ -751,15 +746,8 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) * possible additional TLB1 translations are in place (above) so that * all range up to the currently calculated 'data_end' is covered. */ + bzero((void *)data_start, data_end - data_start); dpcpu_init(dpcpu, 0); -#ifdef __powerpc64__ - memset((void *)kernel_pdir, 0, - kernel_pdirs * PDIR_PAGES * PAGE_SIZE + - kernel_ptbls * PTBL_PAGES * PAGE_SIZE); -#else - memset((void *)ptbl_bufs, 0, sizeof(struct ptbl_buf) * PTBL_SIZE); - memset((void *)kernel_pdir, 0, kernel_ptbls * PTBL_PAGES * PAGE_SIZE); -#endif /*******************************************************/ /* Set the start and end of kva. */ @@ -939,14 +927,13 @@ mmu_booke_bootstrap(mmu_t mmu, vm_offset_t start, vm_offset_t kernelend) /*******************************************************/ PMAP_LOCK_INIT(kernel_pmap); #ifdef __powerpc64__ - kernel_pmap->pm_pp2d = (pte_t ***)kernel_ptbl_root; #else kptbl_min = VM_MIN_KERNEL_ADDRESS / PDIR_SIZE; kernel_pmap->pm_pdir = (pte_t **)kernel_ptbl_root; #endif debugf("kernel_pmap = 0x%"PRI0ptrX"\n", (uintptr_t)kernel_pmap); - kernel_pte_alloc(virtual_avail, kernstart, kernel_pdir); + kernel_pte_alloc(virtual_avail, kernstart); for (i = 0; i < MAXCPU; i++) { kernel_pmap->pm_tid[i] = TID_KERNEL; @@ -1327,7 +1314,7 @@ mmu_booke_enter_locked(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_page_t m, { pte_t *pte; vm_paddr_t pa; - uint32_t flags; + pte_t flags; int error, su, sync; pa = VM_PAGE_TO_PHYS(m); @@ -1580,6 +1567,13 @@ mmu_booke_remove(mmu_t mmu, pmap_t pmap, vm_offset_t va, vm_offset_t endva) pte = pte_find(mmu, pmap, va); if ((pte != NULL) && PTE_ISVALID(pte)) pte_remove(mmu, pmap, va, hold_flag); + if ((va & PDIR_MASK) == 0) { + PMAP_UNLOCK(pmap); + rw_wunlock(&pvh_global_lock); + maybe_yield(); + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + } } PMAP_UNLOCK(pmap); rw_wunlock(&pvh_global_lock); diff --git a/sys/powerpc/booke/pmap_32.c b/sys/powerpc/booke/pmap_32.c index a301ed0bc58..575991cf648 100644 --- a/sys/powerpc/booke/pmap_32.c +++ b/sys/powerpc/booke/pmap_32.c @@ -1,6 +1,7 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * + * Copyright (C) 2020 Justin Hibbits * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * All rights reserved. diff --git a/sys/powerpc/booke/pmap_64.c b/sys/powerpc/booke/pmap_64.c index 85ce5dc637d..6ff323ee36f 100644 --- a/sys/powerpc/booke/pmap_64.c +++ b/sys/powerpc/booke/pmap_64.c @@ -1,6 +1,7 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * + * Copyright (C) 2020 Justin Hibbits * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski * Copyright (C) 2006 Semihalf, Marian Balakowicz * All rights reserved. @@ -114,6 +115,7 @@ __FBSDID("$FreeBSD$"); unsigned int kernel_pdirs; static uma_zone_t ptbl_root_zone; +static pte_t ****kernel_ptbl_root; /* * Base of the pmap_mapdev() region. On 32-bit it immediately follows the @@ -133,25 +135,48 @@ static unsigned long ilog2(unsigned long); /* Page table management */ /**************************************************************************/ -static struct rwlock_padalign pvh_global_lock; - -#define PMAP_ROOT_SIZE (sizeof(pte_t***) * PP2D_NENTRIES) -static pte_t *ptbl_alloc(mmu_t, pmap_t, pte_t **, - unsigned int, boolean_t); -static void ptbl_free(mmu_t, pmap_t, pte_t **, unsigned int, vm_page_t); -static void ptbl_hold(mmu_t, pmap_t, pte_t **, unsigned int); +#define PMAP_ROOT_SIZE (sizeof(pte_t****) * PG_ROOT_NENTRIES) +static pte_t *ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, + bool nosleep, bool *is_new); +static void ptbl_hold(mmu_t, pmap_t, pte_t *); static int ptbl_unhold(mmu_t, pmap_t, vm_offset_t); static vm_paddr_t pte_vatopa(mmu_t, pmap_t, vm_offset_t); static int pte_enter(mmu_t, pmap_t, vm_page_t, vm_offset_t, uint32_t, boolean_t); static int pte_remove(mmu_t, pmap_t, vm_offset_t, uint8_t); static pte_t *pte_find(mmu_t, pmap_t, vm_offset_t); -static void kernel_pte_alloc(vm_offset_t, vm_offset_t, vm_offset_t); +static void kernel_pte_alloc(vm_offset_t, vm_offset_t); /**************************************************************************/ /* Page table related */ /**************************************************************************/ +/* Allocate a page, to be used in a page table. */ +static vm_offset_t +mmu_booke_alloc_page(mmu_t mmu, pmap_t pmap, unsigned int idx, bool nosleep) +{ + vm_page_t m; + int req; + + req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED | VM_ALLOC_ZERO; + while ((m = vm_page_alloc(NULL, idx, req)) == NULL) { + if (nosleep) + return (0); + + PMAP_UNLOCK(pmap); + rw_wunlock(&pvh_global_lock); + vm_wait(NULL); + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + } + + if (!(m->flags & PG_ZERO)) + /* Zero whole ptbl. */ + mmu_booke_zero_page(mmu, m); + + return (PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m))); +} + /* Initialize pool of kva ptbl buffers. */ static void ptbl_init(void) @@ -162,15 +187,20 @@ ptbl_init(void) static __inline pte_t * pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) { + pte_t ***pdir_l1; pte_t **pdir; pte_t *ptbl; KASSERT((pmap != NULL), ("pte_find: invalid pmap")); - pdir = pmap->pm_pp2d[PP2D_IDX(va)]; - if (!pdir) - return NULL; + pdir_l1 = pmap->pm_root[PG_ROOT_IDX(va)]; + if (pdir_l1 == NULL) + return (NULL); + pdir = pdir_l1[PDIR_L1_IDX(va)]; + if (pdir == NULL) + return (NULL); ptbl = pdir[PDIR_IDX(va)]; + return ((ptbl != NULL) ? &ptbl[PTBL_IDX(va)] : NULL); } @@ -178,141 +208,79 @@ pte_find(mmu_t mmu, pmap_t pmap, vm_offset_t va) * allocate a page of pointers to page directories, do not preallocate the * page tables */ -static pte_t ** -pdir_alloc(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, bool nosleep) -{ - vm_page_t m; - pte_t **pdir; - int req; - - req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; - while ((m = vm_page_alloc(NULL, pp2d_idx, req)) == NULL) { - PMAP_UNLOCK(pmap); - if (nosleep) { - return (NULL); - } - vm_wait(NULL); - PMAP_LOCK(pmap); - } - - /* Zero whole ptbl. */ - pdir = (pte_t **)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - mmu_booke_zero_page(mmu, m); - - return (pdir); -} - -/* Free pdir pages and invalidate pdir entry. */ -static void -pdir_free(mmu_t mmu, pmap_t pmap, unsigned int pp2d_idx, vm_page_t m) -{ - pte_t **pdir; - - pdir = pmap->pm_pp2d[pp2d_idx]; - - KASSERT((pdir != NULL), ("pdir_free: null pdir")); - - pmap->pm_pp2d[pp2d_idx] = NULL; - vm_wire_sub(1); - vm_page_free_zero(m); -} - -/* - * Decrement pdir pages hold count and attempt to free pdir pages. Called - * when removing directory entry from pdir. - * - * Return 1 if pdir pages were freed. - */ -static int -pdir_unhold(mmu_t mmu, pmap_t pmap, u_int pp2d_idx) +static bool +unhold_free_page(mmu_t mmu, pmap_t pmap, vm_page_t m) { - pte_t **pdir; - vm_paddr_t pa; - vm_page_t m; - KASSERT((pmap != kernel_pmap), - ("pdir_unhold: unholding kernel pdir!")); - - pdir = pmap->pm_pp2d[pp2d_idx]; - - /* decrement hold count */ - pa = DMAP_TO_PHYS((vm_offset_t) pdir); - m = PHYS_TO_VM_PAGE(pa); - - /* - * Free pdir page if there are no dir entries in this pdir. - */ m->ref_count--; if (m->ref_count == 0) { - pdir_free(mmu, pmap, pp2d_idx, m); - return (1); + vm_wire_sub(1); + vm_page_free_zero(m); + return (true); } - return (0); + + return (false); } -/* - * Increment hold count for pdir pages. This routine is used when new ptlb - * entry is being inserted into pdir. - */ -static void -pdir_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir) +static vm_offset_t +alloc_or_hold_page(mmu_t mmu, pmap_t pmap, vm_offset_t *ptr_tbl, uint32_t index, + bool nosleep, bool hold, bool *isnew) { + vm_offset_t page; vm_page_t m; - KASSERT((pmap != kernel_pmap), - ("pdir_hold: holding kernel pdir!")); + page = ptr_tbl[index]; + KASSERT(page != 0 || pmap != kernel_pmap, + ("NULL page table page found in kernel pmap!")); + if (page == 0) { + page = mmu_booke_alloc_page(mmu, pmap, index, nosleep); + if (ptr_tbl[index] == 0) { + *isnew = true; + ptr_tbl[index] = page; + return (page); + } + m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS(page)); + page = ptr_tbl[index]; + vm_wire_sub(1); + vm_page_free_zero(m); + } - KASSERT((pdir != NULL), ("pdir_hold: null pdir")); + if (hold) { + m = PHYS_TO_VM_PAGE(pmap_kextract(page)); + m->ref_count++; + } + *isnew = false; - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t)pdir)); - m->ref_count++; + return (page); } /* Allocate page table. */ -static pte_t * -ptbl_alloc(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, - boolean_t nosleep) +static pte_t* +ptbl_alloc(mmu_t mmu, pmap_t pmap, vm_offset_t va, bool nosleep, bool *is_new) { - vm_page_t m; - pte_t *ptbl; - int req; + unsigned int pg_root_idx = PG_ROOT_IDX(va); + unsigned int pdir_l1_idx = PDIR_L1_IDX(va); + unsigned int pdir_idx = PDIR_IDX(va); + vm_offset_t pdir_l1, pdir, ptbl; + bool hold_page; KASSERT((pdir[pdir_idx] == NULL), ("%s: valid ptbl entry exists!", __func__)); - req = VM_ALLOC_NOOBJ | VM_ALLOC_WIRED; - while ((m = vm_page_alloc(NULL, pdir_idx, req)) == NULL) { - if (nosleep) - return (NULL); - PMAP_UNLOCK(pmap); - rw_wunlock(&pvh_global_lock); - vm_wait(NULL); - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - } - - /* Zero whole ptbl. */ - ptbl = (pte_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)); - mmu_booke_zero_page(mmu, m); - - return (ptbl); -} - -/* Free ptbl pages and invalidate pdir entry. */ -static void -ptbl_free(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx, vm_page_t m) -{ - pte_t *ptbl; - - ptbl = pdir[pdir_idx]; - - KASSERT((ptbl != NULL), ("ptbl_free: null ptbl")); - - pdir[pdir_idx] = NULL; - - vm_wire_sub(1); - vm_page_free_zero(m); + hold_page = (pmap != kernel_pmap); + pdir_l1 = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pmap->pm_root, + pg_root_idx, nosleep, hold_page, is_new); + if (pdir_l1 == 0) + return (NULL); + pdir = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir_l1, pdir_l1_idx, + nosleep, hold_page, is_new); + if (pdir == 0) + return (NULL); + ptbl = alloc_or_hold_page(mmu, pmap, (vm_offset_t *)pdir, pdir_idx, + nosleep, false, is_new); + + return ((pte_t *)ptbl); } /* @@ -326,34 +294,43 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va) { pte_t *ptbl; vm_page_t m; - u_int pp2d_idx; + u_int pg_root_idx; + pte_t ***pdir_l1; + u_int pdir_l1_idx; pte_t **pdir; u_int pdir_idx; - pp2d_idx = PP2D_IDX(va); + pg_root_idx = PG_ROOT_IDX(va); + pdir_l1_idx = PDIR_L1_IDX(va); pdir_idx = PDIR_IDX(va); KASSERT((pmap != kernel_pmap), ("ptbl_unhold: unholding kernel ptbl!")); - pdir = pmap->pm_pp2d[pp2d_idx]; + pdir_l1 = pmap->pm_root[pg_root_idx]; + pdir = pdir_l1[pdir_l1_idx]; ptbl = pdir[pdir_idx]; /* decrement hold count */ m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); - /* - * Free ptbl pages if there are no pte entries in this ptbl. - * ref_count has the same value for all ptbl pages, so check the - * last page. - */ - m->ref_count--; - if (m->ref_count == 0) { - ptbl_free(mmu, pmap, pdir, pdir_idx, m); - pdir_unhold(mmu, pmap, pp2d_idx); + if (!unhold_free_page(mmu, pmap, m)) + return (0); + + pdir[pdir_idx] = NULL; + m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir)); + + if (!unhold_free_page(mmu, pmap, m)) return (1); - } - return (0); + + pdir_l1[pdir_l1_idx] = NULL; + m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) pdir_l1)); + + if (!unhold_free_page(mmu, pmap, m)) + return (1); + pmap->pm_root[pg_root_idx] = NULL; + + return (1); } /* @@ -361,18 +338,13 @@ ptbl_unhold(mmu_t mmu, pmap_t pmap, vm_offset_t va) * entry is being inserted into ptbl. */ static void -ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t ** pdir, unsigned int pdir_idx) +ptbl_hold(mmu_t mmu, pmap_t pmap, pte_t *ptbl) { - pte_t *ptbl; vm_page_t m; KASSERT((pmap != kernel_pmap), ("ptbl_hold: holding kernel ptbl!")); - ptbl = pdir[pdir_idx]; - - KASSERT((ptbl != NULL), ("ptbl_hold: null ptbl")); - m = PHYS_TO_VM_PAGE(DMAP_TO_PHYS((vm_offset_t) ptbl)); m->ref_count++; } @@ -442,27 +414,17 @@ static int pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, boolean_t nosleep) { - unsigned int pp2d_idx = PP2D_IDX(va); - unsigned int pdir_idx = PDIR_IDX(va); unsigned int ptbl_idx = PTBL_IDX(va); pte_t *ptbl, *pte, pte_tmp; - pte_t **pdir; + bool is_new; /* Get the page directory pointer. */ - pdir = pmap->pm_pp2d[pp2d_idx]; - if (pdir == NULL) - pdir = pdir_alloc(mmu, pmap, pp2d_idx, nosleep); - - /* Get the page table pointer. */ - ptbl = pdir[pdir_idx]; - + ptbl = ptbl_alloc(mmu, pmap, va, nosleep, &is_new); if (ptbl == NULL) { - /* Allocate page table pages. */ - ptbl = ptbl_alloc(mmu, pmap, pdir, pdir_idx, nosleep); - if (ptbl == NULL) { - KASSERT(nosleep, ("nosleep and NULL ptbl")); - return (ENOMEM); - } + KASSERT(nosleep, ("nosleep and NULL ptbl")); + return (ENOMEM); + } + if (is_new) { pte = &ptbl[ptbl_idx]; } else { /* @@ -478,18 +440,10 @@ pte_enter(mmu_t mmu, pmap_t pmap, vm_page_t m, vm_offset_t va, uint32_t flags, * pages. */ if (pmap != kernel_pmap) - ptbl_hold(mmu, pmap, pdir, pdir_idx); + ptbl_hold(mmu, pmap, ptbl); } } - if (pdir[pdir_idx] == NULL) { - if (pmap != kernel_pmap && pmap->pm_pp2d[pp2d_idx] != NULL) - pdir_hold(mmu, pmap, pdir); - pdir[pdir_idx] = ptbl; - } - if (pmap->pm_pp2d[pp2d_idx] == NULL) - pmap->pm_pp2d[pp2d_idx] = pdir; - /* * Insert pv_entry into pv_list for mapped page if part of managed * memory. @@ -534,22 +488,42 @@ pte_vatopa(mmu_t mmu, pmap_t pmap, vm_offset_t va) /* allocate pte entries to manage (addr & mask) to (addr & mask) + size */ static void -kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir) +kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr) { - int i, j; - vm_offset_t va; pte_t *pte; + vm_size_t kva_size; + int kernel_pdirs, kernel_pgtbls, pdir_l1s; + vm_offset_t va, l1_va, pdir_va, ptbl_va; + int i, j, k; + + kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; + kernel_pmap->pm_root = kernel_ptbl_root; + pdir_l1s = howmany(kva_size, PG_ROOT_SIZE); + kernel_pdirs = howmany(kva_size, PDIR_L1_SIZE); + kernel_pgtbls = howmany(kva_size, PDIR_SIZE); - va = addr; /* Initialize kernel pdir */ - for (i = 0; i < kernel_pdirs; i++) { - kernel_pmap->pm_pp2d[i + PP2D_IDX(va)] = - (pte_t **)(pdir + (i * PAGE_SIZE * PDIR_PAGES)); - for (j = PDIR_IDX(va + (i * PAGE_SIZE * PDIR_NENTRIES * PTBL_NENTRIES)); - j < PDIR_NENTRIES; j++) { - kernel_pmap->pm_pp2d[i + PP2D_IDX(va)][j] = - (pte_t *)(pdir + (kernel_pdirs * PAGE_SIZE) + - (((i * PDIR_NENTRIES) + j) * PAGE_SIZE)); + l1_va = (vm_offset_t)kernel_ptbl_root + + round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***)); + pdir_va = l1_va + + round_page(pdir_l1s * sizeof(pte_t ***)); + ptbl_va = pdir_va + + round_page(kernel_pdirs * sizeof(pte_t **)); + printf("ptbl_root_va: %lx\n", (vm_offset_t)kernel_ptbl_root); + printf("l1_va: %lx\n", l1_va); + printf("pdir_va: %lx\n", pdir_va); + printf("ptbl_va: %lx\n", ptbl_va); + va = VM_MIN_KERNEL_ADDRESS; + for (i = 0; i < pdir_l1s; i++, l1_va += PAGE_SIZE) { + kernel_pmap->pm_root[i] = (pte_t ***)l1_va; + for (j = 0; + j < PDIR_L1_NENTRIES && va < VM_MAX_KERNEL_ADDRESS; + j++, pdir_va += PAGE_SIZE) { + kernel_pmap->pm_root[i][j] = (pte_t **)pdir_va; + for (k = 0; + k < PDIR_NENTRIES && va < VM_MAX_KERNEL_ADDRESS; + k++, va += PDIR_SIZE, ptbl_va += PAGE_SIZE) + kernel_pmap->pm_root[i][j][k] = (pte_t *)ptbl_va; } } @@ -560,13 +534,28 @@ kernel_pte_alloc(vm_offset_t data_end, vm_offset_t addr, vm_offset_t pdir) * addresses. */ for (va = addr; va < data_end; va += PAGE_SIZE) { - pte = &(kernel_pmap->pm_pp2d[PP2D_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]); + pte = &(kernel_pmap->pm_root[PG_ROOT_IDX(va)][PDIR_L1_IDX(va)][PDIR_IDX(va)][PTBL_IDX(va)]); *pte = PTE_RPN_FROM_PA(kernload + (va - kernstart)); *pte |= PTE_M | PTE_SR | PTE_SW | PTE_SX | PTE_WIRED | PTE_VALID | PTE_PS_4KB; } } +static vm_offset_t +mmu_booke_alloc_kernel_pgtables(vm_offset_t data_end) +{ + vm_size_t kva_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS; + kernel_ptbl_root = (pte_t ****)data_end; + + data_end += round_page(PG_ROOT_NENTRIES * sizeof(pte_t ***)); + data_end += round_page(howmany(kva_size, PG_ROOT_SIZE) * sizeof(pte_t ***)); + data_end += round_page(howmany(kva_size, PDIR_L1_SIZE) * sizeof(pte_t **)); + data_end += howmany(kva_size, PDIR_SIZE) * PAGE_SIZE; + + return (data_end); +} + + /* * Initialize a preallocated and zeroed pmap structure, * such as one in a vmspace structure. @@ -585,8 +574,8 @@ mmu_booke_pinit(mmu_t mmu, pmap_t pmap) pmap->pm_tid[i] = TID_NONE; CPU_ZERO(&kernel_pmap->pm_active); bzero(&pmap->pm_stats, sizeof(pmap->pm_stats)); - pmap->pm_pp2d = uma_zalloc(ptbl_root_zone, M_WAITOK); - bzero(pmap->pm_pp2d, sizeof(pte_t **) * PP2D_NENTRIES); + pmap->pm_root = uma_zalloc(ptbl_root_zone, M_WAITOK); + bzero(pmap->pm_root, sizeof(pte_t **) * PG_ROOT_NENTRIES); } /* @@ -601,7 +590,7 @@ mmu_booke_release(mmu_t mmu, pmap_t pmap) KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); - uma_zfree(ptbl_root_zone, pmap->pm_pp2d); + uma_zfree(ptbl_root_zone, pmap->pm_root); } static void diff --git a/sys/powerpc/booke/trap_subr.S b/sys/powerpc/booke/trap_subr.S index b1c1544068a..3c54af1b898 100644 --- a/sys/powerpc/booke/trap_subr.S +++ b/sys/powerpc/booke/trap_subr.S @@ -800,11 +800,16 @@ pte_lookup: beq 1f /* fail quickly if pmap is invalid */ #ifdef __powerpc64__ - rldicl %r21, %r31, (64 - PP2D_L_L), (64 - PP2D_L_NUM) /* pp2d offset */ - rldicl %r25, %r31, (64 - PP2D_H_L), (64 - PP2D_H_NUM) - rldimi %r21, %r25, PP2D_L_NUM, (64 - (PP2D_L_NUM + PP2D_H_NUM)) - slwi %r21, %r21, PP2D_ENTRY_SHIFT /* multiply by pp2d entry size */ - ld %r25, PM_PP2D(%r26) /* pmap pm_pp2d[] address */ + rldicl %r21, %r31, (64 - PG_ROOT_L), (64 - PG_ROOT_NUM) /* pp2d offset */ + slwi %r21, %r21, PG_ROOT_ENTRY_SHIFT /* multiply by pp2d entry size */ + ld %r25, PM_ROOT(%r26) /* pmap pm_pp2d[] address */ + ldx %r25, %r25, %r21 /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */ + + cmpdi %r25, 0 + beq 2f + + rldicl %r21, %r31, (64 - PDIR_L1_L), (64 - PDIR_L1_NUM) /* pp2d offset */ + slwi %r21, %r21, PDIR_L1_ENTRY_SHIFT /* multiply by pp2d entry size */ ldx %r25, %r25, %r21 /* get pdir address, i.e. pmap->pm_pp2d[pp2d_idx] * */ cmpdi %r25, 0 diff --git a/sys/powerpc/include/pmap.h b/sys/powerpc/include/pmap.h index 40f0a1fc76a..192f3be315f 100644 --- a/sys/powerpc/include/pmap.h +++ b/sys/powerpc/include/pmap.h @@ -165,7 +165,7 @@ struct pmap { * Page table directory, * array of pointers to page directories. */ - pte_t ***pm_pp2d; + pte_t ****pm_root; #else /* * Page table directory, diff --git a/sys/powerpc/include/pte.h b/sys/powerpc/include/pte.h index 384967ddfe0..96dcf4ed264 100644 --- a/sys/powerpc/include/pte.h +++ b/sys/powerpc/include/pte.h @@ -295,40 +295,52 @@ typedef uint64_t pte_t; * The virtual address is: * * 4K page size - * +-----+-----+-----+-------+-------------+-------------+----------------+ - * | - |p2d#h| - | p2d#l | dir# | pte# | off in 4K page | - * +-----+-----+-----+-------+-------------+-------------+----------------+ - * 63 62 61 60 59 40 39 30 29 ^ 21 20 ^ 12 11 0 + * +-----+-----------+-------+-------------+-------------+----------------+ + * | - | pg_root |pdir_l1| dir# | pte# | off in 4K page | + * +-----+-----------+-------+-------------+-------------+----------------+ + * 63 52 51 39 38 30 29 ^ 21 20 ^ 12 11 0 * | | * index in 1 page of pointers * - * 1st level - pointers to page table directory (pp2d) + * 1st level - Root page table * - * pp2d consists of PP2D_NENTRIES entries, each being a pointer to + * pp2d consists of PG_ROOT_NENTRIES entries, each being a pointer to * second level entity, i.e. the page table directory (pdir). */ -#define PP2D_H_H 61 -#define PP2D_H_L 60 -#define PP2D_L_H 39 -#define PP2D_L_L 30 /* >30 would work with no page table pool */ -#define PP2D_SIZE (1 << PP2D_L_L) /* va range mapped by pp2d */ -#define PP2D_L_SHIFT PP2D_L_L -#define PP2D_L_NUM (PP2D_L_H-PP2D_L_L+1) -#define PP2D_L_MASK ((1<> PP2D_H_SHIFT) & PP2D_H_MASK) | ((va >> PP2D_L_SHIFT) & PP2D_L_MASK)) -#define PP2D_NENTRIES (1<<(PP2D_L_NUM+PP2D_H_NUM)) -#define PP2D_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry **)) */ +#define PG_ROOT_H 51 +#define PG_ROOT_L 39 +#define PG_ROOT_SIZE (1UL << PG_ROOT_L) /* va range mapped by pp2d */ +#define PG_ROOT_SHIFT PG_ROOT_L +#define PG_ROOT_NUM (PG_ROOT_H - PG_ROOT_L + 1) +#define PG_ROOT_MASK ((1 << PG_ROOT_NUM) - 1) +#define PG_ROOT_IDX(va) ((va >> PG_ROOT_SHIFT) & PG_ROOT_MASK) +#define PG_ROOT_NENTRIES (1 << PG_ROOT_NUM) +#define PG_ROOT_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry **)) */ /* - * 2nd level - page table directory (pdir) + * 2nd level - page directory directory (pdir l1) * * pdir consists of PDIR_NENTRIES entries, each being a pointer to * second level entity, i.e. the actual page table (ptbl). */ -#define PDIR_H (PP2D_L_L-1) +#define PDIR_L1_H (PG_ROOT_L-1) +#define PDIR_L1_L 30 +#define PDIR_L1_NUM (PDIR_L1_H-PDIR_L1_L+1) +#define PDIR_L1_SIZE (1 << PDIR_L1_L) /* va range mapped by pdir */ +#define PDIR_L1_MASK ((1<> PDIR_L1_SHIFT) & PDIR_L1_MASK) +#define PDIR_L1_ENTRY_SHIFT 3 /* log2 (sizeof(struct pte_entry *)) */ +#define PDIR_L1_PAGES ((PDIR_L1_NENTRIES * (1<