diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index 3b5df5d0fa91..2e092fec4dac 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -355,6 +355,7 @@ pmap_pku_mask_bit(pmap_t pmap) struct pmap kernel_pmap_store; vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ +vm_offset_t virtual_avail2; /* XXX */ vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ int nkpt; @@ -3832,8 +3833,8 @@ SYSCTL_PROC(_vm, OID_AUTO, kvm_free, CTLTYPE_LONG|CTLFLAG_RD, * Allocate physical memory for the vm_page array and map it into KVA, * attempting to back the vm_pages with domain-local memory. */ -void -pmap_page_array_startup(long pages) +vm_offset_t +pmap_page_array_startup(long count, size_t sz) { pdp_entry_t *pdpe; pd_entry_t *pde, newpdir; @@ -3842,12 +3843,12 @@ pmap_page_array_startup(long pages) long pfn; int domain, i; - vm_page_array_size = pages; + vm_page_array_size = count; - start = va = VM_MIN_KERNEL_ADDRESS; - end = va + pages * sizeof(struct vm_page); - while (va < end) { - pfn = first_page + (va - start) / sizeof(struct vm_page); + start = VM_MIN_KERNEL_ADDRESS; + end = start + count * sz; + for (va = start; va < end; va += NBPDR) { + pfn = first_page + (va - start) / sz; domain = _vm_phys_domain(ptoa(pfn)); pdpe = pmap_pdpe(kernel_pmap, va); if ((*pdpe & X86_PG_V) == 0) { @@ -3866,11 +3867,51 @@ pmap_page_array_startup(long pages) newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A | X86_PG_M | PG_PS | pg_g | pg_nx); pde_store(pde, newpdir); - va += NBPDR; } - vm_page_array = (vm_page_t)start; + virtual_avail2 = end; + return (start); } +#if VM_NRESERVLEVEL > 0 +vm_offset_t +pmap_reserv_array_startup(long count, size_t sz) +{ + pdp_entry_t *pdpe; + pd_entry_t *pde, newpdir; + vm_offset_t va, start, end; + vm_paddr_t pa; + long pfn; + int domain, i; + + /* XXX */ + start = round_2mpage(virtual_avail2); + end = start + count * sz; + for (va = start; va < end; va += NBPDR) { + pfn = (va - start) / sz; + domain = _vm_phys_domain(pfn << PDRSHIFT); + pdpe = pmap_pdpe(kernel_pmap, va); + if ((*pdpe & X86_PG_V) == 0) { + pa = vm_phys_early_alloc(domain, PAGE_SIZE); + dump_add_page(pa); + pagezero((void *)PHYS_TO_DMAP(pa)); + *pdpe = (pdp_entry_t)(pa | X86_PG_V | X86_PG_RW | + X86_PG_A | X86_PG_M); + } + pde = pmap_pdpe_to_pde(pdpe, va); + if ((*pde & X86_PG_V) != 0) + panic("Unexpected pde"); + pa = vm_phys_early_alloc(domain, NBPDR); + for (i = 0; i < NPDEPG; i++) + dump_add_page(pa + i * PAGE_SIZE); + newpdir = (pd_entry_t)(pa | X86_PG_V | X86_PG_RW | X86_PG_A | + X86_PG_M | PG_PS | pg_g | pg_nx); + pde_store(pde, newpdir); + } + virtual_avail2 = end; + return (start); +} +#endif + /* * grow the number of kernel page table entries, if needed */ diff --git a/sys/amd64/include/pmap.h b/sys/amd64/include/pmap.h index 494c634bd8a8..dd302c6be879 100644 --- a/sys/amd64/include/pmap.h +++ b/sys/amd64/include/pmap.h @@ -408,7 +408,7 @@ struct pv_chunk { extern caddr_t CADDR1; extern pt_entry_t *CMAP1; -extern vm_offset_t virtual_avail; +extern vm_offset_t virtual_avail, virtual_avail2; extern vm_offset_t virtual_end; extern vm_paddr_t dmaplimit; extern int pmap_pcid_enabled; @@ -467,7 +467,8 @@ int pmap_pkru_set(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, u_int keyidx, int flags); void pmap_thread_init_invl_gen(struct thread *td); int pmap_vmspace_copy(pmap_t dst_pmap, pmap_t src_pmap); -void pmap_page_array_startup(long count); +vm_offset_t pmap_page_array_startup(long count, size_t sz); +vm_offset_t pmap_reserv_array_startup(long count, size_t sz); #endif /* _KERNEL */ /* Return various clipped indexes for a given VA */ diff --git a/sys/vm/vm_kern.c b/sys/vm/vm_kern.c index 482ead0d1180..d90d2325f9ed 100644 --- a/sys/vm/vm_kern.c +++ b/sys/vm/vm_kern.c @@ -774,10 +774,8 @@ kmem_init(vm_offset_t start, vm_offset_t end) * that handle vm_page_array allocation can simply adjust virtual_avail * instead. */ - (void)vm_map_insert(m, NULL, 0, (vm_offset_t)vm_page_array, - (vm_offset_t)vm_page_array + round_2mpage(vm_page_array_size * - sizeof(struct vm_page)), - VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); + (void)vm_map_insert(m, NULL, 0, VM_MIN_KERNEL_ADDRESS, + round_2mpage(virtual_avail2), VM_PROT_RW, VM_PROT_RW, MAP_NOFAULT); #endif vm_map_unlock(m); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 9ea326b5ebf9..5cd3f3fb3a2a 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -744,7 +744,8 @@ vm_page_startup(vm_offset_t vaddr) #endif #ifdef PMAP_HAS_PAGE_ARRAY - pmap_page_array_startup(size / PAGE_SIZE); + vm_page_array = (vm_page_t)pmap_page_array_startup(size / PAGE_SIZE, + sizeof(struct vm_page)); biggestone = vm_phys_avail_largest(); end = new_end = phys_avail[biggestone + 1]; #else diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index a05edb248606..70426eca5353 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -1059,6 +1059,7 @@ vm_reserv_init(void) while (paddr + VM_LEVEL_0_SIZE > paddr && paddr + VM_LEVEL_0_SIZE <= seg->end) { rv = &vm_reserv_array[paddr >> VM_LEVEL_0_SHIFT]; + memset(rv, 0, sizeof(*rv)); rv->pages = PHYS_TO_VM_PAGE(paddr); rv->domain = seg->domain; mtx_init(&rv->lock, "vm reserv", NULL, MTX_DEF); @@ -1363,7 +1364,7 @@ vm_paddr_t vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) { vm_paddr_t new_end, high_water; - size_t size; + long count; int i; high_water = phys_avail[1]; @@ -1378,6 +1379,13 @@ vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) high_water = phys_avail[i + 1]; } + count = howmany(high_water, VM_LEVEL_0_SIZE); + +#ifdef PMAP_HAS_PAGE_ARRAY + vm_reserv_array = (vm_reserv_t)pmap_reserv_array_startup(count, + sizeof(struct vm_reserv)); + new_end = phys_avail[vm_phys_avail_largest() + 1]; +#else /* * Calculate the size (in bytes) of the reservation array. Round up * from "high_water" because every small page is mapped to an element @@ -1385,16 +1393,15 @@ vm_reserv_startup(vm_offset_t *vaddr, vm_paddr_t end) * number of elements in the reservation array can be greater than the * number of superpages. */ - size = howmany(high_water, VM_LEVEL_0_SIZE) * sizeof(struct vm_reserv); /* * Allocate and map the physical memory for the reservation array. The * next available virtual address is returned by reference. */ - new_end = end - round_page(size); + new_end = end - round_page(count * sizeof(struct vm_reserv)); vm_reserv_array = (void *)(uintptr_t)pmap_map(vaddr, new_end, end, VM_PROT_READ | VM_PROT_WRITE); - bzero(vm_reserv_array, size); +#endif /* * Return the next available physical address.