Index: sys/amd64/include/pmap.h =================================================================== --- sys/amd64/include/pmap.h (revision 246277) +++ sys/amd64/include/pmap.h (working copy) @@ -113,13 +113,7 @@ ((unsigned long)(l2) << PDRSHIFT) | \ ((unsigned long)(l1) << PAGE_SHIFT)) -/* Initial number of kernel page tables. */ -#ifndef NKPT -#define NKPT 32 -#endif - #define NKPML4E 1 /* number of kernel PML4 slots */ -#define NKPDPE howmany(NKPT, NPDEPG)/* number of kernel PDP slots */ #define NUPML4E (NPML4EPG/2) /* number of userland PML4 pages */ #define NUPDPE (NUPML4E*NPDPEPG)/* number of userland PDP pages */ @@ -181,6 +175,7 @@ #define PML4map ((pd_entry_t *)(addr_PML4map)) #define PML4pml4e ((pd_entry_t *)(addr_PML4pml4e)) +extern int nkpt; /* Initial number of kernel page tables */ extern u_int64_t KPDPphys; /* physical address of kernel level 3 */ extern u_int64_t KPML4phys; /* physical address of kernel level 4 */ Index: sys/amd64/amd64/minidump_machdep.c =================================================================== --- sys/amd64/amd64/minidump_machdep.c (revision 246277) +++ sys/amd64/amd64/minidump_machdep.c (working copy) @@ -232,7 +232,7 @@ /* Walk page table pages, set bits in vm_page_dump */ pmapsize = 0; pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys); - for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR, + for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR, kernel_vm_end); ) { /* * We always write a page, even if it is zero. Each @@ -364,7 +364,7 @@ /* Dump kernel page directory pages */ bzero(fakepd, sizeof(fakepd)); pdp = (uint64_t *)PHYS_TO_DMAP(KPDPphys); - for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + NKPT * NBPDR, + for (va = VM_MIN_KERNEL_ADDRESS; va < MAX(KERNBASE + nkpt * NBPDR, kernel_vm_end); va += NBPDP) { i = (va >> PDPSHIFT) & ((1ul << NPDPEPGSHIFT) - 1); Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c (revision 246277) +++ sys/amd64/amd64/pmap.c (working copy) @@ -202,6 +202,10 @@ vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ +int nkpt; +SYSCTL_INT(_machdep, OID_AUTO, nkpt, CTLFLAG_RD, &nkpt, 0, + "Number of kernel page table pages allocated on bootup"); + static int ndmpdp; static vm_paddr_t dmaplimit; vm_offset_t kernel_vm_end = VM_MIN_KERNEL_ADDRESS; @@ -495,17 +499,42 @@ CTASSERT(powerof2(NDMPML4E)); +/* number of kernel PDP slots */ +#define NKPDPE(ptpgs) howmany((ptpgs), NPDEPG) + static void +nkpt_init(vm_paddr_t addr) +{ + int pt_pages; + +#ifdef NKPT + pt_pages = NKPT; +#else + pt_pages = howmany(addr, 1 << PDRSHIFT); + pt_pages += NKPDPE(pt_pages); + + /* + * Add some slop beyond the bare minimum required for bootstrapping + * the kernel. + * + * This is quite important when allocating KVA for kernel modules. + * The modules are required to be linked in the negative 2GB of + * the address space. If we run out of KVA in this region then + * pmap_growkernel() will need to allocate page table pages to map + * the entire 512GB of KVA space which is an unnecessary tax on + * physical memory. + */ + pt_pages += 4; /* 8MB additional slop for kernel modules */ +#endif + nkpt = pt_pages; +} + +static void create_pagetables(vm_paddr_t *firstaddr) { - int i, j, ndm1g; + int i, j, ndm1g, nkpdpe; - /* Allocate pages */ - KPTphys = allocpages(firstaddr, NKPT); - KPML4phys = allocpages(firstaddr, 1); - KPDPphys = allocpages(firstaddr, NKPML4E); - KPDphys = allocpages(firstaddr, NKPDPE); - + /* Allocate page table pages for the direct map */ ndmpdp = (ptoa(Maxmem) + NBPDP - 1) >> PDPSHIFT; if (ndmpdp < 4) /* Minimum 4GB of dirmap */ ndmpdp = 4; @@ -517,6 +546,22 @@ DMPDphys = allocpages(firstaddr, ndmpdp - ndm1g); dmaplimit = (vm_paddr_t)ndmpdp << PDPSHIFT; + /* Allocate pages */ + KPML4phys = allocpages(firstaddr, 1); + KPDPphys = allocpages(firstaddr, NKPML4E); + + /* + * Allocate the initial number of kernel page table pages required to + * bootstrap. We defer this until after all memory-size dependent + * allocations are done (e.g. direct map), so that we don't have to + * build in too much slop in our estimate. + */ + nkpt_init(*firstaddr); + nkpdpe = NKPDPE(nkpt); + + KPTphys = allocpages(firstaddr, nkpt); + KPDphys = allocpages(firstaddr, nkpdpe); + /* Fill in the underlying page table pages */ /* Read-only from zero to physfree */ /* XXX not fully used, underneath 2M pages */ @@ -526,7 +571,7 @@ } /* Now map the page tables at their location within PTmap */ - for (i = 0; i < NKPT; i++) { + for (i = 0; i < nkpt; i++) { ((pd_entry_t *)KPDphys)[i] = KPTphys + (i << PAGE_SHIFT); ((pd_entry_t *)KPDphys)[i] |= PG_RW | PG_V; } @@ -539,7 +584,7 @@ } /* And connect up the PD to the PDP */ - for (i = 0; i < NKPDPE; i++) { + for (i = 0; i < nkpdpe; i++) { ((pdp_entry_t *)KPDPphys)[i + KPDPI] = KPDphys + (i << PAGE_SHIFT); ((pdp_entry_t *)KPDPphys)[i + KPDPI] |= PG_RW | PG_V | PG_U; @@ -768,7 +813,7 @@ * Initialize the vm page array entries for the kernel pmap's * page table pages. */ - for (i = 0; i < NKPT; i++) { + for (i = 0; i < nkpt; i++) { mpte = PHYS_TO_VM_PAGE(KPTphys + (i << PAGE_SHIFT)); KASSERT(mpte >= vm_page_array && mpte < &vm_page_array[vm_page_array_size], @@ -1995,7 +2040,7 @@ * any new kernel page table pages between "kernel_vm_end" and * "KERNBASE". */ - if (KERNBASE < addr && addr <= KERNBASE + NKPT * NBPDR) + if (KERNBASE < addr && addr <= KERNBASE + nkpt * NBPDR) return; addr = roundup2(addr, NBPDR);