diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 5b47b2c5e482..65322f5a8475 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -429,6 +429,7 @@ vm_page_domain_init(struct vm_domain *vmd) vm_offset_t vm_page_startup(vm_offset_t vaddr) { + vm_page_t m; vm_offset_t mapped; vm_paddr_t high_avail, low_avail, page_range, size; vm_paddr_t new_end; @@ -650,21 +651,13 @@ vm_page_startup(vm_offset_t vaddr) vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); /* - * Clear all of the page structures - */ - bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); - for (i = 0; i < page_range; i++) - vm_page_array[i].order = VM_NFREEORDER; - vm_page_array_size = page_range; - - /* * Initialize the physical memory allocator. */ + vm_page_array_size = page_range; vm_phys_init(); /* - * Add every available physical page that is not blacklisted to - * the free lists. + * Add every available physical page to the free lists. */ vm_cnt.v_page_count = 0; vm_cnt.v_free_count = 0; @@ -672,11 +665,20 @@ vm_page_startup(vm_offset_t vaddr) pa = phys_avail[i]; last_pa = phys_avail[i + 1]; while (pa < last_pa) { - vm_phys_add_page(pa); + vm_phys_init_page(pa); pa += PAGE_SIZE; } + pa = phys_avail[i]; + m = vm_phys_paddr_to_vm_page(pa); + mtx_lock(&vm_page_queue_free_mtx); + vm_phys_free_contig(m, atop(last_pa - pa)); + vm_phys_freecnt_adj(m, (int)atop(last_pa - pa)); + mtx_unlock(&vm_page_queue_free_mtx); } + /* + * Remove blacklisted pages from the physical memory allocator. + */ TAILQ_INIT(&blacklist_head); vm_page_blacklist_load(&list, &listend); vm_page_blacklist_check(list, listend); diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c index 7150c63d31c9..70114cc3834a 100644 --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -732,29 +732,30 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) * Initialize a physical page and add it to the free lists. */ void -vm_phys_add_page(vm_paddr_t pa) +vm_phys_init_page(vm_paddr_t pa) { vm_page_t m; struct vm_domain *vmd; vm_cnt.v_page_count++; m = vm_phys_paddr_to_vm_page(pa); + m->object = NULL; + m->wire_count = 0; m->busy_lock = VPB_UNBUSIED; + m->hold_count = 0; + m->flags = m->aflags = m->oflags = 0; m->phys_addr = pa; m->queue = PQ_NONE; + m->psind = 0; m->segind = vm_phys_paddr_to_segind(pa); vmd = vm_phys_domain(m); vmd->vmd_page_count++; vmd->vmd_segs |= 1UL << m->segind; - KASSERT(m->order == VM_NFREEORDER, - ("vm_phys_add_page: page %p has unexpected order %d", - m, m->order)); + m->order = VM_NFREEORDER; m->pool = VM_FREEPOOL_DEFAULT; + m->act_count = 0; + m->valid = m->dirty = 0; pmap_page_init(m); - mtx_lock(&vm_page_queue_free_mtx); - vm_phys_freecnt_adj(m, 1); - vm_phys_free_pages(m, 0); - mtx_unlock(&vm_page_queue_free_mtx); } /* diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h index 2ce5ba3cfe40..ef0a95e4933f 100644 --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -69,7 +69,6 @@ extern int vm_phys_nsegs; /* * The following functions are only to be used by the virtual memory system. */ -void vm_phys_add_page(vm_paddr_t pa); void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end); vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary); @@ -83,6 +82,7 @@ vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa); void vm_phys_free_contig(vm_page_t m, u_long npages); void vm_phys_free_pages(vm_page_t m, int order); void vm_phys_init(void); +void vm_phys_init_page(vm_paddr_t pa); vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa); vm_page_t vm_phys_scan_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary, int options);