diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 5b47b2c5e482..b8ec1d19a189 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -418,6 +418,27 @@ vm_page_domain_init(struct vm_domain *vmd) } } +static void +vm_page_init_page(vm_page_t m, vm_page_t template, vm_paddr_t pa, int8_t segind) +{ + + bcopy(template, m, sizeof(*m)); + m->phys_addr = pa; + m->segind = segind; + pmap_page_init(m); +} + +static void +vm_page_init_template(vm_page_t m) +{ + + bzero(m, sizeof(*m)); + m->busy_lock = VPB_UNBUSIED; + m->queue = PQ_NONE; + m->order = VM_NFREEORDER; + m->pool = VM_FREEPOOL_DEFAULT; +} + /* * vm_page_startup: * @@ -429,15 +450,20 @@ vm_page_domain_init(struct vm_domain *vmd) vm_offset_t vm_page_startup(vm_offset_t vaddr) { + struct vm_page template; + struct vm_domain *vmd; + struct vm_phys_seg *seg; + vm_page_t m; vm_offset_t mapped; vm_paddr_t high_avail, low_avail, page_range, size; vm_paddr_t new_end; - int i; + int i, segind; vm_paddr_t pa; vm_paddr_t last_pa; char *list, *listend; vm_paddr_t end; vm_paddr_t biggestsize; + u_long pagecount; int biggestone; int pages_per_zone; @@ -650,33 +676,47 @@ vm_page_startup(vm_offset_t vaddr) vm_phys_add_seg(phys_avail[i], phys_avail[i + 1]); /* - * Clear all of the page structures - */ - bzero((caddr_t) vm_page_array, page_range * sizeof(struct vm_page)); - for (i = 0; i < page_range; i++) - vm_page_array[i].order = VM_NFREEORDER; - vm_page_array_size = page_range; - - /* * Initialize the physical memory allocator. */ + vm_page_array_size = page_range; vm_phys_init(); /* - * Add every available physical page that is not blacklisted to - * the free lists. + * Initialize the page structures and add every available page to the + * physical memory allocator's free lists. */ vm_cnt.v_page_count = 0; vm_cnt.v_free_count = 0; - for (i = 0; phys_avail[i + 1] != 0; i += 2) { - pa = phys_avail[i]; - last_pa = phys_avail[i + 1]; - while (pa < last_pa) { - vm_phys_add_page(pa); - pa += PAGE_SIZE; + vm_page_init_template(&template); + for (segind = 0; segind < vm_phys_nsegs; segind++) { + seg = &vm_phys_segs[segind]; + for (pa = seg->start, m = seg->first_page; pa < seg->end; + pa += PAGE_SIZE, m++) + vm_page_init_page(m, &template, pa, segind); + + for (i = 0; phys_avail[i + 1] != 0; i += 2) { + if (seg->start < phys_avail[i] || + seg->end > phys_avail[i + 1]) + continue; + + m = seg->first_page; + pagecount = (u_long)atop(seg->end - seg->start); + + mtx_lock(&vm_page_queue_free_mtx); + vm_phys_free_contig(m, pagecount); + vm_phys_freecnt_adj(m, (int)pagecount); + mtx_unlock(&vm_page_queue_free_mtx); + vm_cnt.v_page_count += (u_int)pagecount; + + vmd = &vm_dom[seg->domain]; + vmd->vmd_page_count += (u_int)pagecount; + vmd->vmd_segs |= 1UL << m->segind; } } + /* + * Remove blacklisted pages from the physical memory allocator. + */ TAILQ_INIT(&blacklist_head); vm_page_blacklist_load(&list, &listend); vm_page_blacklist_check(list, listend); diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c index 7150c63d31c9..ccef3220571f 100644 --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -171,7 +171,6 @@ static vm_page_t vm_phys_alloc_seg_contig(struct vm_phys_seg *seg, vm_paddr_t boundary); static void _vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end, int domain); static void vm_phys_create_seg(vm_paddr_t start, vm_paddr_t end); -static int vm_phys_paddr_to_segind(vm_paddr_t pa); static void vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order); @@ -729,35 +728,6 @@ vm_phys_split_pages(vm_page_t m, int oind, struct vm_freelist *fl, int order) } /* - * Initialize a physical page and add it to the free lists. - */ -void -vm_phys_add_page(vm_paddr_t pa) -{ - vm_page_t m; - struct vm_domain *vmd; - - vm_cnt.v_page_count++; - m = vm_phys_paddr_to_vm_page(pa); - m->busy_lock = VPB_UNBUSIED; - m->phys_addr = pa; - m->queue = PQ_NONE; - m->segind = vm_phys_paddr_to_segind(pa); - vmd = vm_phys_domain(m); - vmd->vmd_page_count++; - vmd->vmd_segs |= 1UL << m->segind; - KASSERT(m->order == VM_NFREEORDER, - ("vm_phys_add_page: page %p has unexpected order %d", - m, m->order)); - m->pool = VM_FREEPOOL_DEFAULT; - pmap_page_init(m); - mtx_lock(&vm_page_queue_free_mtx); - vm_phys_freecnt_adj(m, 1); - vm_phys_free_pages(m, 0); - mtx_unlock(&vm_page_queue_free_mtx); -} - -/* * Allocate a contiguous, power of two-sized set of physical pages * from the free lists. * @@ -1065,24 +1035,6 @@ vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) } /* - * Find the segment containing the given physical address. - */ -static int -vm_phys_paddr_to_segind(vm_paddr_t pa) -{ - struct vm_phys_seg *seg; - int segind; - - for (segind = 0; segind < vm_phys_nsegs; segind++) { - seg = &vm_phys_segs[segind]; - if (pa >= seg->start && pa < seg->end) - return (segind); - } - panic("vm_phys_paddr_to_segind: paddr %#jx is not in any segment" , - (uintmax_t)pa); -} - -/* * Free a contiguous, power of two-sized set of physical pages. * * The free page queues must be locked. diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h index 2ce5ba3cfe40..631f15e20dd5 100644 --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -69,7 +69,6 @@ extern int vm_phys_nsegs; /* * The following functions are only to be used by the virtual memory system. */ -void vm_phys_add_page(vm_paddr_t pa); void vm_phys_add_seg(vm_paddr_t start, vm_paddr_t end); vm_page_t vm_phys_alloc_contig(u_long npages, vm_paddr_t low, vm_paddr_t high, u_long alignment, vm_paddr_t boundary);