diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index c9ff9bc..1845271 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -2306,7 +2306,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) va_last = va + NBPDR - PAGE_SIZE; do { m++; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_pv_demote_pde: page %p is not managed", m)); va += PAGE_SIZE; pmap_insert_entry(pmap, va, m); @@ -2833,8 +2833,6 @@ pmap_remove_all(vm_page_t m) vm_offset_t va; vm_page_t free; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); free = NULL; vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); @@ -3180,8 +3178,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%lx)", va)); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || - (m->oflags & VPO_BUSY) != 0, + KASSERT((m->flags & PG_UNMANAGED) != 0 || (m->oflags & VPO_BUSY) != 0, ("pmap_enter: page %p is not busy", m)); mpte = NULL; @@ -3262,7 +3259,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->flags & PG_UNMANAGED) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if (pv == NULL) @@ -3375,7 +3372,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) } newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | PG_PS | PG_V; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->flags & PG_UNMANAGED) == 0) { newpde |= PG_MANAGED; /* @@ -3542,7 +3539,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + if ((m->flags & PG_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { free = NULL; @@ -3567,7 +3564,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, /* * Now validate mapping with RO protection */ - if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) + if ((m->flags & PG_UNMANAGED) != 0) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); @@ -3944,7 +3941,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m) int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -3985,7 +3982,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (count); vm_page_lock_queues(); count = pmap_pvh_wired_mappings(&m->md, count); @@ -4027,7 +4024,7 @@ pmap_page_is_mapped(vm_page_t m) { boolean_t rv; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + if ((m->flags & PG_UNMANAGED) != 0) return (FALSE); vm_page_lock_queues(); rv = !TAILQ_EMPTY(&m->md.pv_list) || @@ -4104,7 +4101,8 @@ pmap_remove_pages(pmap_t pmap) m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); - KASSERT(m < &vm_page_array[vm_page_array_size], + KASSERT((m->flags & PG_FICTITIOUS) != 0 || + m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); @@ -4185,7 +4183,7 @@ pmap_is_modified(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* @@ -4266,7 +4264,7 @@ pmap_is_referenced(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); vm_page_lock_queues(); rv = pmap_is_referenced_pvh(&m->md) || @@ -4314,7 +4312,7 @@ pmap_remove_write(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* @@ -4383,7 +4381,7 @@ pmap_ts_referenced(vm_page_t m) vm_offset_t va; int rtval = 0; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); vm_page_lock_queues(); @@ -4457,7 +4455,7 @@ pmap_clear_modify(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -4534,7 +4532,7 @@ pmap_clear_reference(vm_page_t m) pt_entry_t *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); diff --git a/sys/i386/i386/pmap.c b/sys/i386/i386/pmap.c index d10bbe5..b9906cd 100644 --- a/sys/i386/i386/pmap.c +++ b/sys/i386/i386/pmap.c @@ -2374,7 +2374,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm_paddr_t pa) va_last = va + NBPDR - PAGE_SIZE; do { m++; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_pv_demote_pde: page %p is not managed", m)); va += PAGE_SIZE; pmap_insert_entry(pmap, va, m); @@ -2901,8 +2901,6 @@ pmap_remove_all(vm_page_t m) vm_offset_t va; vm_page_t free; - KASSERT((m->flags & PG_FICTITIOUS) == 0, - ("pmap_remove_all: page %p is fictitious", m)); free = NULL; vm_page_lock_queues(); sched_pin(); @@ -3273,7 +3271,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, KASSERT(va < UPT_MIN_ADDRESS || va >= UPT_MAX_ADDRESS, ("pmap_enter: invalid to pmap_enter page table pages (va: 0x%x)", va)); - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0 || + KASSERT((m->flags & PG_UNMANAGED) != 0 || (m->oflags & VPO_BUSY) != 0, ("pmap_enter: page %p is not busy", m)); @@ -3362,7 +3360,7 @@ pmap_enter(pmap_t pmap, vm_offset_t va, vm_prot_t access, vm_page_t m, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->flags & PG_UNMANAGED) == 0) { KASSERT(va < kmi.clean_sva || va >= kmi.clean_eva, ("pmap_enter: managed mapping within the clean submap")); if (pv == NULL) @@ -3472,7 +3470,7 @@ pmap_enter_pde(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) } newpde = VM_PAGE_TO_PHYS(m) | pmap_cache_bits(m->md.pat_mode, 1) | PG_PS | PG_V; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) { + if ((m->flags & PG_UNMANAGED) == 0) { newpde |= PG_MANAGED; /* @@ -3641,7 +3639,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, /* * Enter on the PV list if part of our managed memory. */ - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0 && + if ((m->flags & PG_UNMANAGED) == 0 && !pmap_try_insert_pv_entry(pmap, va, m)) { if (mpte != NULL) { free = NULL; @@ -3669,7 +3667,7 @@ pmap_enter_quick_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, /* * Now validate mapping with RO protection */ - if (m->flags & (PG_FICTITIOUS|PG_UNMANAGED)) + if ((m->flags & PG_UNMANAGED) != 0) pte_store(pte, pa | PG_V | PG_U); else pte_store(pte, pa | PG_V | PG_U | PG_MANAGED); @@ -4070,7 +4068,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m) int loops = 0; boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_page_exists_quick: page %p is not managed", m)); rv = FALSE; vm_page_lock_queues(); @@ -4111,7 +4109,7 @@ pmap_page_wired_mappings(vm_page_t m) int count; count = 0; - if ((m->flags & PG_FICTITIOUS) != 0) + if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) return (count); vm_page_lock_queues(); count = pmap_pvh_wired_mappings(&m->md, count); @@ -4155,7 +4153,7 @@ pmap_page_is_mapped(vm_page_t m) { boolean_t rv; - if ((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != 0) + if ((m->flags & PG_UNMANAGED) != 0) return (FALSE); vm_page_lock_queues(); rv = !TAILQ_EMPTY(&m->md.pv_list) || @@ -4232,7 +4230,8 @@ pmap_remove_pages(pmap_t pmap) m, (uintmax_t)m->phys_addr, (uintmax_t)tpte)); - KASSERT(m < &vm_page_array[vm_page_array_size], + KASSERT((m->flags & PG_FICTITIOUS) != 0 || + m < &vm_page_array[vm_page_array_size], ("pmap_remove_pages: bad tpte %#jx", (uintmax_t)tpte)); @@ -4315,7 +4314,7 @@ pmap_is_modified(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_is_modified: page %p is not managed", m)); /* @@ -4398,7 +4397,7 @@ pmap_is_referenced(vm_page_t m) { boolean_t rv; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_is_referenced: page %p is not managed", m)); vm_page_lock_queues(); rv = pmap_is_referenced_pvh(&m->md) || @@ -4448,7 +4447,7 @@ pmap_remove_write(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_remove_write: page %p is not managed", m)); /* @@ -4524,7 +4523,7 @@ pmap_ts_referenced(vm_page_t m) vm_offset_t va; int rtval = 0; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_ts_referenced: page %p is not managed", m)); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); vm_page_lock_queues(); @@ -4600,7 +4599,7 @@ pmap_clear_modify(vm_page_t m) pt_entry_t oldpte, *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_clear_modify: page %p is not managed", m)); VM_OBJECT_LOCK_ASSERT(m->object, MA_OWNED); KASSERT((m->oflags & VPO_BUSY) == 0, @@ -4689,7 +4688,7 @@ pmap_clear_reference(vm_page_t m) pt_entry_t *pte; vm_offset_t va; - KASSERT((m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0, + KASSERT((m->flags & PG_UNMANAGED) == 0, ("pmap_clear_reference: page %p is not managed", m)); vm_page_lock_queues(); sched_pin(); diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index d417a84..eee8583 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -1035,7 +1035,7 @@ vm_fault_prefault(pmap_t pmap, vm_offset_t addra, vm_map_entry_t entry) break; } if (m->valid == VM_PAGE_BITS_ALL && - (m->flags & PG_FICTITIOUS) == 0) + (m->flags & (PG_FICTITIOUS | PG_UNMANAGED)) == 0) pmap_enter_quick(pmap, addr, m, entry->protection); VM_OBJECT_UNLOCK(lobject); } diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 5df2f31..f97751c 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -1785,8 +1785,8 @@ again: vm_page_lock(p); if ((wirings = p->wire_count) != 0 && (wirings = pmap_page_wired_mappings(p)) != p->wire_count) { - /* Fictitious pages do not have managed mappings. */ - if ((p->flags & PG_FICTITIOUS) == 0) + /* Unmanaged pages do not have managed mappings. */ + if ((p->flags & PG_UNMANAGED) == 0) pmap_remove_all(p); /* Account for removal of managed, wired mappings. */ p->wire_count -= wirings; @@ -1799,7 +1799,8 @@ again: } if (vm_page_sleep_if_busy(p, TRUE, "vmopar")) goto again; - KASSERT((p->flags & PG_FICTITIOUS) == 0, + KASSERT((p->flags & (PG_FICTITIOUS | PG_UNMANAGED)) != + (PG_FICTITIOUS | PG_UNMANAGED), ("vm_object_page_remove: page %p is fictitious", p)); if (clean_only && p->valid) { pmap_remove_write(p); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index e2758ec..14f0213 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -143,7 +143,7 @@ struct vpglocks vm_page_queue_free_lock; struct vpglocks pa_lock[PA_LOCK_COUNT]; vm_page_t vm_page_array = 0; -int vm_page_array_size = 0; +long vm_page_array_size = 0; long first_page = 0; int vm_page_zero_count = 0; @@ -483,7 +483,7 @@ vm_page_flag_set(vm_page_t m, unsigned short bits) * VPO_BUSY. Currently, this flag is only set by pmap_enter(). */ KASSERT((bits & PG_WRITEABLE) == 0 || - ((m->flags & (PG_UNMANAGED | PG_FICTITIOUS)) == 0 && + ((m->flags & PG_UNMANAGED) == 0 && (m->oflags & VPO_BUSY) != 0), ("PG_WRITEABLE and !VPO_BUSY")); m->flags |= bits; } @@ -618,6 +618,30 @@ vm_page_unhold_pages(vm_page_t *ma, int count) mtx_unlock(mtx); } +vm_page_t +PHYS_TO_VM_PAGE(vm_paddr_t pa) +{ + vm_page_t m; + +#ifdef VM_PHYSSEG_SPARSE + m = vm_phys_paddr_to_vm_page(pa); + if (m == NULL) + m = vm_phys_fictitious_to_vm_page(pa); + return (m); +#elif defined(VM_PHYSSEG_DENSE) + long pi; + + pi = atop(pa); + if (pi >= first_page && pi < vm_page_array_size) { + m = &vm_page_array[pi - first_page]; + return (m); + } + return (vm_phys_fictitious_to_vm_page(pa)); +#else +#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." +#endif +} + /* * vm_page_getfake: * @@ -631,15 +655,25 @@ vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr) vm_page_t m; m = uma_zalloc(fakepg_zone, M_WAITOK | M_ZERO); + vm_page_initfake(m, paddr, memattr); + return (m); +} + +void +vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr) +{ + + if ((m->flags & PG_FICTITIOUS) != 0) + return; + m->phys_addr = paddr; m->queue = PQ_NONE; /* Fictitious pages don't use "segind". */ - m->flags = PG_FICTITIOUS; + m->flags = PG_FICTITIOUS | PG_UNMANAGED; /* Fictitious pages don't use "order" or "pool". */ m->oflags = VPO_BUSY; m->wire_count = 1; pmap_page_set_memattr(m, memattr); - return (m); } /* @@ -651,9 +685,14 @@ void vm_page_putfake(vm_page_t m) { - KASSERT((m->flags & PG_FICTITIOUS) != 0, - ("vm_page_putfake: bad page %p", m)); - uma_zfree(fakepg_zone, m); + if ((m->flags & PG_UNMANAGED) == 0) { + pmap_remove_all(m); + vm_page_lock(m); + vm_page_remove(m); + vm_page_unlock(m); + } else { + uma_zfree(fakepg_zone, m); + } } /* diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index c34d2f0..5409ee6 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -266,7 +266,7 @@ struct vnode; extern int vm_page_zero_count; extern vm_page_t vm_page_array; /* First resident page in table */ -extern int vm_page_array_size; /* number of vm_page_t's */ +extern long vm_page_array_size; /* number of vm_page_t's */ extern long first_page; /* first physical page number */ #define VM_PAGE_IS_FREE(m) (((m)->flags & PG_FREE) != 0) @@ -275,19 +275,7 @@ extern long first_page; /* first physical page number */ vm_page_t vm_phys_paddr_to_vm_page(vm_paddr_t pa); -static __inline vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); - -static __inline vm_page_t -PHYS_TO_VM_PAGE(vm_paddr_t pa) -{ -#ifdef VM_PHYSSEG_SPARSE - return (vm_phys_paddr_to_vm_page(pa)); -#elif defined(VM_PHYSSEG_DENSE) - return (&vm_page_array[atop(pa) - first_page]); -#else -#error "Either VM_PHYSSEG_DENSE or VM_PHYSSEG_SPARSE must be defined." -#endif -} +vm_page_t PHYS_TO_VM_PAGE(vm_paddr_t pa); extern struct vpglocks vm_page_queue_lock; @@ -353,6 +341,7 @@ void vm_page_dontneed(vm_page_t); void vm_page_deactivate (vm_page_t); vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); +void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); vm_page_t vm_page_next(vm_page_t m); diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c index 93fee70..cfbb7d2 100644 --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -82,6 +82,15 @@ static struct vm_phys_seg vm_phys_segs[VM_PHYSSEG_MAX]; static int vm_phys_nsegs; +#define VM_PHYS_FICTITIOUS_NSEGS 8 +static struct vm_phys_fictitious_seg { + vm_paddr_t start; + vm_paddr_t end; + vm_page_t first_page; +} vm_phys_fictitious_segs[VM_PHYS_FICTITIOUS_NSEGS]; +static struct mtx vm_phys_fictitious_reg_mtx; +MALLOC_DEFINE(M_FICT_PAGES, "", ""); + static struct vm_freelist vm_phys_free_queues[VM_RAW_NFREELIST][VM_NFREEPOOL][VM_NFREEORDER]; static struct vm_freelist @@ -361,6 +370,8 @@ vm_phys_init(void) for (flind = 0; flind < vm_nfreelists; flind++) vm_phys_lookup_lists[0][flind] = &vm_phys_free_queues[flind]; #endif + + mtx_init(&vm_phys_fictitious_reg_mtx, "vmfctr", NULL, MTX_DEF); } /* @@ -525,6 +536,109 @@ vm_phys_paddr_to_vm_page(vm_paddr_t pa) return (NULL); } +vm_page_t +vm_phys_fictitious_to_vm_page(vm_paddr_t pa) +{ + struct vm_phys_fictitious_seg *seg; + int segind; + + for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { + seg = &vm_phys_fictitious_segs[segind]; + if (pa >= seg->start && pa < seg->end) + return (&seg->first_page[atop(pa - seg->start)]); + } + return (NULL); +} + +int +vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, + vm_memattr_t memattr) +{ + struct vm_phys_fictitious_seg *seg; + vm_page_t fp; + long i, page_count; + int segind; +#ifdef VM_PHYSSEG_DENSE + long pi; + boolean_t malloced; +#endif + + page_count = (end - start) / PAGE_SIZE; + +#ifdef VM_PHYSSEG_DENSE + pi = atop(start); + if (pi >= first_page && atop(end) < vm_page_array_size) { + fp = &vm_page_array[pi - first_page]; + malloced = FALSE; + } else +#endif + { + fp = malloc(page_count * sizeof(struct vm_page), M_FICT_PAGES, + M_WAITOK | M_ZERO); + malloced = TRUE; + } + for (i = 0; i < page_count; i++) { + vm_page_initfake(&fp[i], start + PAGE_SIZE * i, memattr); + pmap_page_init(&fp[i]); + /* + * Page queue lock is not taken. Pages are not yet + * visible. + */ + fp[i].flags &= ~PG_UNMANAGED; + fp[i].oflags &= ~VPO_BUSY; + } + mtx_lock(&vm_phys_fictitious_reg_mtx); + for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { + seg = &vm_phys_fictitious_segs[segind]; + if (seg->start == 0 && seg->end == 0) { + seg->start = start; + seg->end = end; + seg->first_page = fp; + mtx_unlock(&vm_phys_fictitious_reg_mtx); + return (0); + } + } + mtx_unlock(&vm_phys_fictitious_reg_mtx); +#ifdef VM_PHYSSEG_DENSE + if (malloced) +#endif + free(fp, M_FICT_PAGES); + return (EBUSY); +} + +void +vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end) +{ + struct vm_phys_fictitious_seg *seg; + vm_page_t fp; + int segind; +#ifdef VM_PHYSSEG_DENSE + long pi; +#endif + +#ifdef VM_PHYSSEG_DENSE + pi = atop(start); +#endif + + mtx_lock(&vm_phys_fictitious_reg_mtx); + for (segind = 0; segind < VM_PHYS_FICTITIOUS_NSEGS; segind++) { + seg = &vm_phys_fictitious_segs[segind]; + if (seg->start == start && seg->end == end) { + seg->start = seg->end = 0; + fp = seg->first_page; + seg->first_page = NULL; + mtx_unlock(&vm_phys_fictitious_reg_mtx); +#ifdef VM_PHYSSEG_DENSE + if (pi < first_page || atop(end) >= vm_page_array_size) +#endif + free(fp, M_FICT_PAGES); + return; + } + } + mtx_unlock(&vm_phys_fictitious_reg_mtx); + KASSERT(0, ("Unregistering not registered fictitious range")); +} + /* * Find the segment containing the given physical address. */ diff --git a/sys/vm/vm_phys.h b/sys/vm/vm_phys.h index a5b9e93..fd01b81 100644 --- a/sys/vm/vm_phys.h +++ b/sys/vm/vm_phys.h @@ -56,6 +56,10 @@ vm_page_t vm_phys_alloc_contig(unsigned long npages, vm_page_t vm_phys_alloc_freelist_pages(int flind, int pool, int order); vm_page_t vm_phys_alloc_pages(int pool, int order); vm_paddr_t vm_phys_bootstrap_alloc(vm_size_t size, unsigned long alignment); +int vm_phys_fictitious_reg_range(vm_paddr_t start, vm_paddr_t end, + vm_memattr_t memattr); +void vm_phys_fictitious_unreg_range(vm_paddr_t start, vm_paddr_t end); +vm_page_t vm_phys_fictitious_to_vm_page(vm_paddr_t pa); void vm_phys_free_pages(vm_page_t m, int order); void vm_phys_init(void); void vm_phys_set_pool(int pool, vm_page_t m, int order);