Index: sys/arm/arm/pmap.c =================================================================== --- sys/arm/arm/pmap.c (revision 249335) +++ sys/arm/arm/pmap.c (working copy) @@ -410,7 +410,6 @@ static uma_zone_t l2table_zone; static vm_offset_t pmap_kernel_l2dtable_kva; static vm_offset_t pmap_kernel_l2ptp_kva; static vm_paddr_t pmap_kernel_l2ptp_phys; -static struct vm_object pvzone_obj; static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; /* @@ -1981,7 +1980,7 @@ pmap_init(void) NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); + uma_zone_reserve_kva(pvzone, pv_entry_max); } Index: sys/conf/files =================================================================== --- sys/conf/files (revision 249335) +++ sys/conf/files (working copy) @@ -2242,6 +2242,7 @@ vm/vm_page.c standard vm/vm_pageout.c standard vm/vm_pager.c standard vm/vm_phys.c standard +vm/vm_radix.c standard vm/vm_reserv.c standard vm/vm_unix.c standard vm/vm_zeroidle.c standard Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h (revision 249335) +++ sys/vm/uma_int.h (working copy) @@ -211,8 +211,8 @@ struct uma_keg { uma_alloc uk_allocf; /* Allocation function */ uma_free uk_freef; /* Free routine */ - struct vm_object *uk_obj; /* Zone specific object */ - vm_offset_t uk_kva; /* Base kva for zones with objs */ + u_long uk_offset; /* Next free offset from base KVA */ + vm_offset_t uk_kva; /* Zone base KVA */ uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ u_int16_t uk_pgoff; /* Offset to uma_slab struct */ Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c (revision 249335) +++ sys/vm/vm_map.c (working copy) @@ -120,7 +120,6 @@ static uma_zone_t mapentzone; static uma_zone_t kmapentzone; static uma_zone_t mapzone; static uma_zone_t vmspace_zone; -static struct vm_object kmapentobj; static int vmspace_zinit(void *mem, int size, int flags); static void vmspace_zfini(void *mem, int size); static int vm_map_zinit(void *mem, int ize, int flags); @@ -293,7 +292,7 @@ vmspace_alloc(min, max) void vm_init2(void) { - uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, + uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count, (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 + maxproc * 2 + maxfiles); vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, @@ -1544,13 +1543,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, start = 0; p_start = NULL; - if ((p = TAILQ_FIRST(&object->memq)) != NULL) { - if (p->pindex < pindex) { - p = vm_page_splay(pindex, object->root); - if ((object->root = p)->pindex < pindex) - p = TAILQ_NEXT(p, listq); - } - } + p = vm_page_find_least(object, pindex); /* * Assert: the variable p is either (1) the page with the * least pindex greater than or equal to the parameter pindex Index: sys/vm/vm_phys.c =================================================================== --- sys/vm/vm_phys.c (revision 249335) +++ sys/vm/vm_phys.c (working copy) @@ -706,7 +706,7 @@ done: m_object = m->object; vm_page_cache_remove(m); if (m_object->type == OBJT_VNODE && - m_object->cache == NULL) { + vm_object_cache_is_empty(m_object)) { /* * Enqueue the vnode for deferred vdrop(). * Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c (revision 249335) +++ sys/vm/swap_pager.c (working copy) @@ -190,7 +190,6 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max, static struct mtx sw_alloc_mtx; /* protect list manipulation */ static struct pagerlst swap_pager_object_list[NOBJLISTS]; static uma_zone_t swap_zone; -static struct vm_object swap_zone_obj; /* * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure @@ -394,7 +393,7 @@ swap_pager_swap_init(void) if (swap_zone == NULL) panic("failed to create swap_zone."); do { - if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n)) + if (uma_zone_reserve_kva(swap_zone, n)) break; /* * if the allocation failed, try a zone two thirds the Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c (revision 249335) +++ sys/vm/vm_object.c (working copy) @@ -92,6 +92,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -171,14 +172,15 @@ vm_object_zdtor(void *mem, int size, void *arg) object = (vm_object_t)mem; KASSERT(TAILQ_EMPTY(&object->memq), - ("object %p has resident pages", - object)); + ("object %p has resident pages in its memq", object)); + KASSERT(vm_radix_is_empty(&object->rtree), + ("object %p has resident pages in its trie", object)); #if VM_NRESERVLEVEL > 0 KASSERT(LIST_EMPTY(&object->rvq), ("object %p has reservations", object)); #endif - KASSERT(object->cache == NULL, + KASSERT(vm_object_cache_is_empty(object), ("object %p has cached pages", object)); KASSERT(object->paging_in_progress == 0, @@ -200,23 +202,24 @@ vm_object_zinit(void *mem, int size, int flags) object = (vm_object_t)mem; bzero(&object->mtx, sizeof(object->mtx)); - VM_OBJECT_LOCK_INIT(object, "standard object"); + mtx_init(&object->mtx, "vm object", NULL, MTX_DEF | MTX_DUPOK); /* These are true for any object that has been freed */ + object->rtree.rt_root = 0; object->paging_in_progress = 0; object->resident_page_count = 0; object->shadow_count = 0; + object->cache.rt_root = 0; return (0); } -void +static void _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) { TAILQ_INIT(&object->memq); LIST_INIT(&object->shadow_head); - object->root = NULL; object->type = type; object->size = size; object->generation = 1; @@ -232,7 +235,6 @@ _vm_object_allocate(objtype_t type, vm_pindex_t si #if VM_NRESERVLEVEL > 0 LIST_INIT(&object->rvq); #endif - object->cache = NULL; mtx_lock(&vm_object_list_mtx); TAILQ_INSERT_TAIL(&vm_object_list, object, object_list); @@ -250,7 +252,8 @@ vm_object_init(void) TAILQ_INIT(&vm_object_list); mtx_init(&vm_object_list_mtx, "vm object_list", NULL, MTX_DEF); - VM_OBJECT_LOCK_INIT(&kernel_object_store, "kernel object"); + mtx_init(&kernel_object_store.mtx, "vm object", "kernel object", + MTX_DEF); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kernel_object); #if VM_NRESERVLEVEL > 0 @@ -258,7 +261,7 @@ vm_object_init(void) kernel_object->pg_color = (u_short)atop(VM_MIN_KERNEL_ADDRESS); #endif - VM_OBJECT_LOCK_INIT(&kmem_object_store, "kmem object"); + mtx_init(&kmem_object_store.mtx, "vm object", "kmem object", MTX_DEF); _vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS), kmem_object); #if VM_NRESERVLEVEL > 0 @@ -278,6 +281,8 @@ vm_object_init(void) NULL, #endif vm_object_zinit, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM|UMA_ZONE_NOFREE); + + vm_radix_init(); } void @@ -731,7 +736,7 @@ vm_object_terminate(vm_object_t object) if (__predict_false(!LIST_EMPTY(&object->rvq))) vm_reserv_break_all(object); #endif - if (__predict_false(object->cache != NULL)) + if (__predict_false(!vm_object_cache_is_empty(object))) vm_page_cache_free(object, 0, 0); /* @@ -1396,14 +1401,8 @@ vm_object_split(vm_map_entry_t entry) } new_object->flags |= orig_object->flags & OBJ_NEEDGIANT; retry: - if ((m = TAILQ_FIRST(&orig_object->memq)) != NULL) { - if (m->pindex < offidxstart) { - m = vm_page_splay(offidxstart, orig_object->root); - if ((orig_object->root = m)->pindex < offidxstart) - m = TAILQ_NEXT(m, listq); - } - } vm_page_lock_queues(); + m = vm_page_find_least(orig_object, offidxstart); for (; m != NULL && (idx = m->pindex - offidxstart) < size; m = m_next) { m_next = TAILQ_NEXT(m, listq); @@ -1439,7 +1438,7 @@ retry: /* * Transfer any cached pages from orig_object to new_object. */ - if (__predict_false(orig_object->cache != NULL)) + if (__predict_false(!vm_object_cache_is_empty(orig_object))) vm_page_cache_transfer(orig_object, offidxstart, new_object); } @@ -1770,7 +1769,8 @@ vm_object_collapse(vm_object_t object) /* * Free any cached pages from backing_object. */ - if (__predict_false(backing_object->cache != NULL)) + if (__predict_false( + !vm_object_cache_is_empty(backing_object))) vm_page_cache_free(backing_object, 0, 0); } /* @@ -1917,13 +1917,8 @@ vm_object_page_remove(vm_object_t object, vm_pinde vm_object_pip_add(object, 1); again: vm_page_lock_queues(); - if ((p = TAILQ_FIRST(&object->memq)) != NULL) { - if (p->pindex < start) { - p = vm_page_splay(start, object->root); - if ((object->root = p)->pindex < start) - p = TAILQ_NEXT(p, listq); - } - } + p = vm_page_find_least(object, start); + /* * Assert: the variable p is either (1) the page with the * least pindex greater than or equal to the parameter pindex @@ -1957,7 +1952,7 @@ again: vm_page_unlock_queues(); vm_object_pip_wakeup(object); skipmemq: - if (__predict_false(object->cache != NULL)) + if (__predict_false(!vm_object_cache_is_empty(object))) vm_page_cache_free(object, start, end); } Index: sys/vm/vm_reserv.c =================================================================== --- sys/vm/vm_reserv.c (revision 249335) +++ sys/vm/vm_reserv.c (working copy) @@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include /* @@ -311,10 +312,9 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex /* * Look for an existing reservation. */ - msucc = NULL; - mpred = object->root; - while (mpred != NULL) { - KASSERT(mpred->pindex != pindex, + mpred = vm_radix_lookup_le(&object->rtree, pindex); + if (mpred != NULL) { + KASSERT(mpred->pindex < pindex, ("vm_reserv_alloc_page: pindex already allocated")); rv = vm_reserv_from_page(mpred); if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) { @@ -324,30 +324,22 @@ vm_reserv_alloc_page(vm_object_t object, vm_pindex return (NULL); vm_reserv_populate(rv); return (m); - } else if (mpred->pindex < pindex) { - if (msucc != NULL || - (msucc = TAILQ_NEXT(mpred, listq)) == NULL) - break; - KASSERT(msucc->pindex != pindex, - ("vm_reserv_alloc_page: pindex already allocated")); - rv = vm_reserv_from_page(msucc); - if (rv->object == object && - vm_reserv_has_pindex(rv, pindex)) { - m = &rv->pages[VM_RESERV_INDEX(object, pindex)]; - /* Handle vm_page_rename(m, new_object, ...). */ - if ((m->flags & (PG_CACHED | PG_FREE)) == 0) - return (NULL); - vm_reserv_populate(rv); - return (m); - } else if (pindex < msucc->pindex) - break; - } else if (msucc == NULL) { - msucc = mpred; - mpred = TAILQ_PREV(msucc, pglist, listq); - continue; } - msucc = NULL; - mpred = object->root = vm_page_splay(pindex, object->root); + msucc = TAILQ_NEXT(mpred, listq); + } else + msucc = TAILQ_FIRST(&object->memq); + if (msucc != NULL) { + KASSERT(msucc->pindex > pindex, + ("vm_reserv_alloc_page: pindex already allocated")); + rv = vm_reserv_from_page(msucc); + if (rv->object == object && vm_reserv_has_pindex(rv, pindex)) { + m = &rv->pages[VM_RESERV_INDEX(object, pindex)]; + /* Handle vm_page_rename(m, new_object, ...). */ + if ((m->flags & (PG_CACHED | PG_FREE)) == 0) + return (NULL); + vm_reserv_populate(rv); + return (m); + } } /* Index: sys/vm/vm_object.h =================================================================== --- sys/vm/vm_object.h (revision 249335) +++ sys/vm/vm_object.h (working copy) @@ -71,6 +71,8 @@ #include #include +#include + /* * Types defined: * @@ -87,7 +89,7 @@ struct vm_object { LIST_HEAD(, vm_object) shadow_head; /* objects that this is a shadow for */ LIST_ENTRY(vm_object) shadow_list; /* chain of shadow objects */ TAILQ_HEAD(, vm_page) memq; /* list of resident pages */ - vm_page_t root; /* root of the resident page splay tree */ + struct vm_radix rtree; /* root of the resident page radix trie */ vm_pindex_t size; /* Object size */ int generation; /* generation ID */ int ref_count; /* How many refs?? */ @@ -102,7 +104,7 @@ struct vm_object { vm_ooffset_t backing_object_offset;/* Offset in backing object */ TAILQ_ENTRY(vm_object) pager_object_list; /* list of all objects of this pager type */ LIST_HEAD(, vm_reserv) rvq; /* list of reservations */ - vm_page_t cache; /* root of the cache page splay tree */ + struct vm_radix cache; /* root of the cache page radix trie */ void *handle; union { /* @@ -182,9 +184,6 @@ extern struct vm_object kmem_object_store; #define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx) #define VM_OBJECT_LOCK_ASSERT(object, type) \ mtx_assert(&(object)->mtx, (type)) -#define VM_OBJECT_LOCK_INIT(object, type) \ - mtx_init(&(object)->mtx, "vm object", \ - (type), MTX_DEF | MTX_DUPOK) #define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx) #define VM_OBJECT_MTX(object) (&(object)->mtx) #define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx) @@ -207,8 +206,14 @@ void vm_object_pip_wakeup(vm_object_t object); void vm_object_pip_wakeupn(vm_object_t object, short i); void vm_object_pip_wait(vm_object_t object, char *waitid); +static __inline boolean_t +vm_object_cache_is_empty(vm_object_t object) +{ + + return (vm_radix_is_empty(&object->cache)); +} + vm_object_t vm_object_allocate (objtype_t, vm_pindex_t); -void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t); boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t); void vm_object_collapse (vm_object_t); void vm_object_deallocate (vm_object_t); Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c (revision 249335) +++ sys/vm/vm_page.c (working copy) @@ -122,6 +122,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -563,63 +564,6 @@ vm_page_dirty(vm_page_t m) } /* - * vm_page_splay: - * - * Implements Sleator and Tarjan's top-down splay algorithm. Returns - * the vm_page containing the given pindex. If, however, that - * pindex is not found in the vm_object, returns a vm_page that is - * adjacent to the pindex, coming before or after it. - */ -vm_page_t -vm_page_splay(vm_pindex_t pindex, vm_page_t root) -{ - struct vm_page dummy; - vm_page_t lefttreemax, righttreemin, y; - - if (root == NULL) - return (root); - lefttreemax = righttreemin = &dummy; - for (;; root = y) { - if (pindex < root->pindex) { - if ((y = root->left) == NULL) - break; - if (pindex < y->pindex) { - /* Rotate right. */ - root->left = y->right; - y->right = root; - root = y; - if ((y = root->left) == NULL) - break; - } - /* Link into the new root's right tree. */ - righttreemin->left = root; - righttreemin = root; - } else if (pindex > root->pindex) { - if ((y = root->right) == NULL) - break; - if (pindex > y->pindex) { - /* Rotate left. */ - root->right = y->left; - y->left = root; - root = y; - if ((y = root->right) == NULL) - break; - } - /* Link into the new root's left tree. */ - lefttreemax->right = root; - lefttreemax = root; - } else - break; - } - /* Assemble the new root. */ - lefttreemax->right = root->left; - righttreemin->left = root->right; - root->left = dummy.right; - root->right = dummy.left; - return (root); -} - -/* * vm_page_insert: [ internal use only ] * * Inserts the given mem entry into the object and object list. @@ -635,7 +579,7 @@ vm_page_dirty(vm_page_t m) void vm_page_insert(vm_page_t m, vm_object_t object, vm_pindex_t pindex) { - vm_page_t root; + vm_page_t neighbor; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); if (m->object != NULL) @@ -650,28 +594,19 @@ vm_page_insert(vm_page_t m, vm_object_t object, vm /* * Now link into the object's ordered list of backed pages. */ - root = object->root; - if (root == NULL) { - m->left = NULL; - m->right = NULL; + if (object->resident_page_count == 0) { TAILQ_INSERT_TAIL(&object->memq, m, listq); } else { - root = vm_page_splay(pindex, root); - if (pindex < root->pindex) { - m->left = root->left; - m->right = root; - root->left = NULL; - TAILQ_INSERT_BEFORE(root, m, listq); - } else if (pindex == root->pindex) - panic("vm_page_insert: offset already allocated"); - else { - m->right = root->right; - m->left = root; - root->right = NULL; - TAILQ_INSERT_AFTER(&object->memq, root, m, listq); - } + neighbor = vm_radix_lookup_le(&object->rtree, pindex); + if (neighbor != NULL) { + KASSERT(pindex > neighbor->pindex, + ("vm_page_insert: offset %ju less than %ju", + (uintmax_t)pindex, (uintmax_t)neighbor->pindex)); + TAILQ_INSERT_AFTER(&object->memq, neighbor, m, listq); + } else + TAILQ_INSERT_HEAD(&object->memq, m, listq); } - object->root = m; + vm_radix_insert(&object->rtree, m); object->generation++; /* @@ -708,7 +643,6 @@ void vm_page_remove(vm_page_t m) { vm_object_t object; - vm_page_t root; if ((object = m->object) == NULL) return; @@ -722,15 +656,7 @@ vm_page_remove(vm_page_t m) /* * Now remove from the object's list of backed pages. */ - if (m != object->root) - vm_page_splay(m->pindex, object->root); - if (m->left == NULL) - root = m->right; - else { - root = vm_page_splay(m->pindex, m->left); - root->right = m->right; - } - object->root = root; + vm_radix_remove(&object->rtree, m->pindex); TAILQ_REMOVE(&object->memq, m, listq); /* @@ -760,14 +686,28 @@ vm_page_remove(vm_page_t m) vm_page_t vm_page_lookup(vm_object_t object, vm_pindex_t pindex) { + + VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + return (vm_radix_lookup(&object->rtree, pindex)); +} + +/* + * vm_page_find_least: + * + * Returns the page associated with the object with least pindex + * greater than or equal to the parameter pindex, or NULL. + * + * The object must be locked. + * The routine may not block. + */ +vm_page_t +vm_page_find_least(vm_object_t object, vm_pindex_t pindex) +{ vm_page_t m; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); - if ((m = object->root) != NULL && m->pindex != pindex) { - m = vm_page_splay(pindex, m); - if ((object->root = m)->pindex != pindex) - m = NULL; - } + if ((m = TAILQ_FIRST(&object->memq)) != NULL && m->pindex < pindex) + m = vm_radix_lookup_ge(&object->rtree, pindex); return (m); } @@ -848,45 +788,18 @@ vm_page_rename(vm_page_t m, vm_object_t new_object void vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) { - vm_page_t m, m_next; + vm_page_t m; boolean_t empty; mtx_lock(&vm_page_queue_free_mtx); - if (__predict_false(object->cache == NULL)) { + if (__predict_false(vm_radix_is_empty(&object->cache))) { mtx_unlock(&vm_page_queue_free_mtx); return; } - m = object->cache = vm_page_splay(start, object->cache); - if (m->pindex < start) { - if (m->right == NULL) - m = NULL; - else { - m_next = vm_page_splay(start, m->right); - m_next->left = m; - m->right = NULL; - m = object->cache = m_next; - } - } - - /* - * At this point, "m" is either (1) a reference to the page - * with the least pindex that is greater than or equal to - * "start" or (2) NULL. - */ - for (; m != NULL && (m->pindex < end || end == 0); m = m_next) { - /* - * Find "m"'s successor and remove "m" from the - * object's cache. - */ - if (m->right == NULL) { - object->cache = m->left; - m_next = NULL; - } else { - m_next = vm_page_splay(start, m->right); - m_next->left = m->left; - object->cache = m_next; - } - /* Convert "m" to a free page. */ + while ((m = vm_radix_lookup_ge(&object->cache, start)) != NULL) { + if (end != 0 && m->pindex >= end) + break; + vm_radix_remove(&object->cache, m->pindex); m->object = NULL; m->valid = 0; /* Clear PG_CACHED and set PG_FREE. */ @@ -896,7 +809,7 @@ vm_page_cache_free(vm_object_t object, vm_pindex_t cnt.v_cache_count--; cnt.v_free_count++; } - empty = object->cache == NULL; + empty = vm_radix_is_empty(&object->cache); mtx_unlock(&vm_page_queue_free_mtx); if (object->type == OBJT_VNODE && empty) vdrop(object->handle); @@ -911,15 +824,9 @@ vm_page_cache_free(vm_object_t object, vm_pindex_t static inline vm_page_t vm_page_cache_lookup(vm_object_t object, vm_pindex_t pindex) { - vm_page_t m; mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); - if ((m = object->cache) != NULL && m->pindex != pindex) { - m = vm_page_splay(pindex, m); - if ((object->cache = m)->pindex != pindex) - m = NULL; - } - return (m); + return (vm_radix_lookup(&object->cache, pindex)); } /* @@ -931,28 +838,11 @@ vm_page_cache_lookup(vm_object_t object, vm_pindex void vm_page_cache_remove(vm_page_t m) { - vm_object_t object; - vm_page_t root; mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); KASSERT((m->flags & PG_CACHED) != 0, ("vm_page_cache_remove: page %p is not cached", m)); - object = m->object; - if (m != object->cache) { - root = vm_page_splay(m->pindex, object->cache); - KASSERT(root == m, - ("vm_page_cache_remove: page %p is not cached in object %p", - m, object)); - } - if (m->left == NULL) - root = m->right; - else if (m->right == NULL) - root = m->left; - else { - root = vm_page_splay(m->pindex, m->left); - root->right = m->right; - } - object->cache = root; + vm_radix_remove(&m->object->cache, m->pindex); m->object = NULL; cnt.v_cache_count--; } @@ -972,7 +862,7 @@ void vm_page_cache_transfer(vm_object_t orig_object, vm_pindex_t offidxstart, vm_object_t new_object) { - vm_page_t m, m_next; + vm_page_t m; /* * Insertion into an object's collection of cached pages @@ -980,53 +870,24 @@ vm_page_cache_transfer(vm_object_t orig_object, vm * not. */ VM_OBJECT_LOCK_ASSERT(new_object, MA_OWNED); - KASSERT(new_object->cache == NULL, + KASSERT(vm_radix_is_empty(&new_object->cache), ("vm_page_cache_transfer: object %p has cached pages", new_object)); mtx_lock(&vm_page_queue_free_mtx); - if ((m = orig_object->cache) != NULL) { + while ((m = vm_radix_lookup_ge(&orig_object->cache, + offidxstart)) != NULL) { /* * Transfer all of the pages with offset greater than or * equal to 'offidxstart' from the original object's * cache to the new object's cache. */ - m = vm_page_splay(offidxstart, m); - if (m->pindex < offidxstart) { - orig_object->cache = m; - new_object->cache = m->right; - m->right = NULL; - } else { - orig_object->cache = m->left; - new_object->cache = m; - m->left = NULL; - } - while ((m = new_object->cache) != NULL) { - if ((m->pindex - offidxstart) >= new_object->size) { - /* - * Return all of the cached pages with - * offset greater than or equal to the - * new object's size to the original - * object's cache. - */ - new_object->cache = m->left; - m->left = orig_object->cache; - orig_object->cache = m; - break; - } - m_next = vm_page_splay(m->pindex, m->right); - /* Update the page's object and offset. */ - m->object = new_object; - m->pindex -= offidxstart; - if (m_next == NULL) - break; - m->right = NULL; - m_next->left = m; - new_object->cache = m_next; - } - KASSERT(new_object->cache == NULL || - new_object->type == OBJT_SWAP, - ("vm_page_cache_transfer: object %p's type is incompatible" - " with cached pages", new_object)); + if ((m->pindex - offidxstart) >= new_object->size) + break; + vm_radix_remove(&orig_object->cache, m->pindex); + /* Update the page's object and offset. */ + m->object = new_object; + m->pindex -= offidxstart; + vm_radix_insert(&new_object->cache, m); } mtx_unlock(&vm_page_queue_free_mtx); } @@ -1160,7 +1021,8 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pind m->valid = 0; m_object = m->object; vm_page_cache_remove(m); - if (m_object->type == OBJT_VNODE && m_object->cache == NULL) + if (m_object->type == OBJT_VNODE && + vm_object_cache_is_empty(m_object)) vp = m_object->handle; } else { KASSERT(VM_PAGE_IS_FREE(m), @@ -1664,7 +1526,7 @@ void vm_page_cache(vm_page_t m) { vm_object_t object; - vm_page_t root; + boolean_t cache_was_empty; mtx_assert(&vm_page_queue_mtx, MA_OWNED); object = m->object; @@ -1700,15 +1562,7 @@ vm_page_cache(vm_page_t m) * Remove the page from the object's collection of resident * pages. */ - if (m != object->root) - vm_page_splay(m->pindex, object->root); - if (m->left == NULL) - root = m->right; - else { - root = vm_page_splay(m->pindex, m->left); - root->right = m->right; - } - object->root = root; + vm_radix_remove(&object->rtree, m->pindex); TAILQ_REMOVE(&object->memq, m, listq); object->resident_page_count--; object->generation++; @@ -1727,25 +1581,8 @@ vm_page_cache(vm_page_t m) mtx_lock(&vm_page_queue_free_mtx); m->flags |= PG_CACHED; cnt.v_cache_count++; - root = object->cache; - if (root == NULL) { - m->left = NULL; - m->right = NULL; - } else { - root = vm_page_splay(m->pindex, root); - if (m->pindex < root->pindex) { - m->left = root->left; - m->right = root; - root->left = NULL; - } else if (__predict_false(m->pindex == root->pindex)) - panic("vm_page_cache: offset already cached"); - else { - m->right = root->right; - m->left = root; - root->right = NULL; - } - } - object->cache = m; + cache_was_empty = vm_radix_is_empty(&object->cache); + vm_radix_insert(&object->cache, m); #if VM_NRESERVLEVEL > 0 if (!vm_reserv_free_page(m)) { #else @@ -1763,9 +1600,9 @@ vm_page_cache(vm_page_t m) * the object's only resident page. */ if (object->type == OBJT_VNODE) { - if (root == NULL && object->resident_page_count != 0) + if (cache_was_empty && object->resident_page_count != 0) vhold(object->handle); - else if (root != NULL && object->resident_page_count == 0) + else if (!cache_was_empty && object->resident_page_count == 0) vdrop(object->handle); } } Index: sys/vm/vm_page.h =================================================================== --- sys/vm/vm_page.h (revision 249335) +++ sys/vm/vm_page.h (working copy) @@ -103,8 +103,6 @@ TAILQ_HEAD(pglist, vm_page); struct vm_page { TAILQ_ENTRY(vm_page) pageq; /* queue info for FIFO queue or free list (P) */ TAILQ_ENTRY(vm_page) listq; /* pages in same object (O) */ - struct vm_page *left; /* splay tree link (O) */ - struct vm_page *right; /* splay tree link (O) */ vm_object_t object; /* which object am I in (O,P)*/ vm_pindex_t pindex; /* offset into object (O,P) */ @@ -323,6 +321,7 @@ int vm_page_try_to_cache (vm_page_t); int vm_page_try_to_free (vm_page_t); void vm_page_dontneed (register vm_page_t); void vm_page_deactivate (vm_page_t); +vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); void vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); vm_page_t vm_page_next(vm_page_t m); @@ -331,7 +330,6 @@ void vm_page_remove (vm_page_t); void vm_page_rename (vm_page_t, vm_object_t, vm_pindex_t); void vm_page_requeue(vm_page_t m); void vm_page_sleep(vm_page_t m, const char *msg); -vm_page_t vm_page_splay(vm_pindex_t, vm_page_t); vm_offset_t vm_page_startup(vm_offset_t vaddr); void vm_page_unwire (vm_page_t, int); void vm_page_wire (vm_page_t); Index: sys/vm/uma.h =================================================================== --- sys/vm/uma.h (revision 249335) +++ sys/vm/uma.h (working copy) @@ -400,24 +400,24 @@ void uma_reclaim(void); void uma_set_align(int align); /* - * Switches the backing object of a zone + * Reserves the maximum KVA space required by the zone and configures the zone + * to use a VM_ALLOC_NOOBJ-based backend allocator. * * Arguments: - * zone The zone to update - * obj The obj to use for future allocations - * size The size of the object to allocate + * zone The zone to update. + * nitems The upper limit on the number of items that can be allocated. * * Returns: - * 0 if kva space can not be allocated + * 0 if KVA space can not be allocated * 1 if successful * * Discussion: - * A NULL object can be used and uma will allocate one for you. Setting - * the size will limit the amount of memory allocated to this zone. + * When the machine supports a direct map and the zone's items are smaller + * than a page, the zone will use the direct map instead of allocating KVA + * space. * */ -struct vm_object; -int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size); +int uma_zone_reserve_kva(uma_zone_t zone, int nitems); /* * Sets a high limit on the number of items allowed in a zone @@ -476,7 +476,7 @@ void uma_zone_set_zinit(uma_zone_t zone, uma_init void uma_zone_set_zfini(uma_zone_t zone, uma_fini zfini); /* - * Replaces the standard page_alloc or obj_alloc functions for this zone + * Replaces the standard backend allocator for this zone. * * Arguments: * zone The zone whos back end allocator is being changed. Index: sys/vm/vnode_pager.c =================================================================== --- sys/vm/vnode_pager.c (revision 249335) +++ sys/vm/vnode_pager.c (working copy) @@ -422,7 +422,7 @@ vnode_pager_setsize(vp, nsize) m->dirty = VM_PAGE_BITS_ALL; vm_page_unlock_queues(); } else if ((nsize & PAGE_MASK) && - __predict_false(object->cache != NULL)) { + __predict_false(!vm_object_cache_is_empty(object))) { vm_page_cache_free(object, OFF_TO_IDX(nsize), nobjsize); } Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c (revision 249335) +++ sys/vm/uma_core.c (working copy) @@ -78,6 +78,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -208,7 +209,7 @@ enum zfreeskip { SKIP_NONE, SKIP_DTOR, SKIP_FINI } /* Prototypes.. */ -static void *obj_alloc(uma_zone_t, int, u_int8_t *, int); +static void *noobj_alloc(uma_zone_t, int, u_int8_t *, int); static void *page_alloc(uma_zone_t, int, u_int8_t *, int); static void *startup_alloc(uma_zone_t, int, u_int8_t *, int); static void page_free(void *, int, u_int8_t); @@ -967,50 +968,51 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *p * NULL if M_NOWAIT is set. */ static void * -obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) +noobj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { - vm_object_t object; + TAILQ_HEAD(, vm_page) alloctail; + u_long npages; vm_offset_t retkva, zkva; - vm_page_t p; - int pages, startpages; + vm_page_t p, p_next; - object = zone->uz_keg->uk_obj; - retkva = 0; + TAILQ_INIT(&alloctail); - /* - * This looks a little weird since we're getting one page at a time. - */ - VM_OBJECT_LOCK(object); - p = TAILQ_LAST(&object->memq, pglist); - pages = p != NULL ? p->pindex + 1 : 0; - startpages = pages; - zkva = zone->uz_keg->uk_kva + pages * PAGE_SIZE; - for (; bytes > 0; bytes -= PAGE_SIZE) { - p = vm_page_alloc(object, pages, - VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); - if (p == NULL) { - if (pages != startpages) - pmap_qremove(retkva, pages - startpages); - while (pages != startpages) { - pages--; - p = TAILQ_LAST(&object->memq, pglist); - vm_page_lock_queues(); - vm_page_unwire(p, 0); - vm_page_free(p); - vm_page_unlock_queues(); - } - retkva = 0; - goto done; + npages = howmany(bytes, PAGE_SIZE); + while (npages > 0) { + p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | + VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + if (p != NULL) { + /* + * Since the page does not belong to an object, its + * listq is unused. + */ + TAILQ_INSERT_TAIL(&alloctail, p, listq); + npages--; + continue; } + if (wait & M_WAITOK) { + VM_WAIT; + continue; + } + + /* + * Page allocation failed, free intermediate pages and + * exit. + */ + TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { + vm_page_unwire(p, 0); + vm_page_free(p); + } + return (NULL); + } + *flags = UMA_SLAB_PRIV; + zkva = zone->uz_keg->uk_kva + + atomic_fetchadd_long(&zone->uz_keg->uk_offset, round_page(bytes)); + retkva = zkva; + TAILQ_FOREACH(p, &alloctail, listq) { pmap_qenter(zkva, &p, 1); - if (retkva == 0) - retkva = zkva; zkva += PAGE_SIZE; - pages += 1; } -done: - VM_OBJECT_UNLOCK(object); - *flags = UMA_SLAB_PRIV; return ((void *)retkva); } @@ -2625,7 +2627,7 @@ uma_zone_set_allocf(uma_zone_t zone, uma_alloc all /* See uma.h */ int -uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) +uma_zone_reserve_kva(uma_zone_t zone, int count) { uma_keg_t keg; vm_offset_t kva; @@ -2637,23 +2639,25 @@ int if (pages * keg->uk_ipers < count) pages++; - kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); - - if (kva == 0) - return (0); - if (obj == NULL) { - obj = vm_object_allocate(OBJT_DEFAULT, - pages); - } else { - VM_OBJECT_LOCK_INIT(obj, "uma object"); - _vm_object_allocate(OBJT_DEFAULT, - pages, obj); - } +#ifdef UMA_MD_SMALL_ALLOC + if (keg->uk_ppera > 1) { +#else + if (1) { +#endif + kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); + if (kva == 0) + return (0); + } else + kva = 0; ZONE_LOCK(zone); keg->uk_kva = kva; - keg->uk_obj = obj; + keg->uk_offset = 0; keg->uk_maxpages = pages; - keg->uk_allocf = obj_alloc; +#ifdef UMA_MD_SMALL_ALLOC + keg->uk_allocf = (keg->uk_ppera > 1) ? noobj_alloc : uma_small_alloc; +#else + keg->uk_allocf = noobj_alloc; +#endif keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; ZONE_UNLOCK(zone); return (1); Index: sys/i386/include/pmap.h =================================================================== --- sys/i386/include/pmap.h (revision 249335) +++ sys/i386/include/pmap.h (working copy) @@ -158,6 +158,8 @@ #include #include +#include + #ifdef PAE typedef uint64_t pdpt_entry_t; @@ -377,7 +379,7 @@ struct pmap { pdpt_entry_t *pm_pdpt; /* KVA of page director pointer table */ #endif - vm_page_t pm_root; /* spare page table pages */ + struct vm_radix pm_root; /* spare page table pages */ }; typedef struct pmap *pmap_t; @@ -404,7 +406,7 @@ extern struct pmap kernel_pmap_store; */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_list; + TAILQ_ENTRY(pv_entry) pv_next; } *pv_entry_t; /* Index: sys/i386/i386/pmap.c =================================================================== --- sys/i386/i386/pmap.c (revision 249335) +++ sys/i386/i386/pmap.c (working copy) @@ -137,6 +137,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -391,7 +392,7 @@ pmap_bootstrap(vm_paddr_t firstaddr) #ifdef PAE kernel_pmap->pm_pdpt = (pdpt_entry_t *) (KERNBASE + (u_int)IdlePDPT); #endif - kernel_pmap->pm_root = NULL; + kernel_pmap->pm_root.rt_root = 0; kernel_pmap->pm_active = -1; /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvchunk); LIST_INIT(&allpmaps); @@ -1482,7 +1483,8 @@ pmap_free_zero_pages(vm_page_t free) while (free != NULL) { m = free; - free = m->right; + free = (void *)m->object; + m->object = NULL; /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } @@ -1501,7 +1503,7 @@ pmap_add_delayed_free_list(vm_page_t m, vm_page_t m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; - m->right = *free; + m->object = (void *)*free; *free = m; } @@ -1511,31 +1513,12 @@ pmap_add_delayed_free_list(vm_page_t m, vm_page_t * for mapping a distinct range of virtual addresses. The pmap's collection is * ordered by this virtual address range. */ -static void +static __inline void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) { - vm_page_t root; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - root = pmap->pm_root; - if (root == NULL) { - mpte->left = NULL; - mpte->right = NULL; - } else { - root = vm_page_splay(mpte->pindex, root); - if (mpte->pindex < root->pindex) { - mpte->left = root->left; - mpte->right = root; - root->left = NULL; - } else if (mpte->pindex == root->pindex) - panic("pmap_insert_pt_page: pindex already inserted"); - else { - mpte->right = root->right; - mpte->left = root; - root->right = NULL; - } - } - pmap->pm_root = mpte; + vm_radix_insert(&pmap->pm_root, mpte); } /* @@ -1543,19 +1526,12 @@ pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) * specified pmap's collection of idle page table pages. Returns NULL if there * is no page table page corresponding to the specified virtual address. */ -static vm_page_t +static __inline vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va) { - vm_page_t mpte; - vm_pindex_t pindex = va >> PDRSHIFT; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) { - mpte = vm_page_splay(pindex, mpte); - if ((pmap->pm_root = mpte)->pindex != pindex) - mpte = NULL; - } - return (mpte); + return (vm_radix_lookup(&pmap->pm_root, va >> PDRSHIFT)); } /* @@ -1563,21 +1539,12 @@ pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va) * of idle page table pages. The specified page table page must be a member of * the pmap's collection. */ -static void +static __inline void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte) { - vm_page_t root; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if (mpte != pmap->pm_root) - vm_page_splay(mpte->pindex, pmap->pm_root); - if (mpte->left == NULL) - root = mpte->right; - else { - root = vm_page_splay(mpte->pindex, mpte->left); - root->right = mpte->right; - } - pmap->pm_root = root; + vm_radix_remove(&pmap->pm_root, mpte->pindex); } /* @@ -1663,7 +1630,7 @@ pmap_pinit0(pmap_t pmap) #ifdef PAE pmap->pm_pdpt = (pdpt_entry_t *)(KERNBASE + (vm_offset_t)IdlePDPT); #endif - pmap->pm_root = NULL; + pmap->pm_root.rt_root = 0; pmap->pm_active = 0; PCPU_SET(curpmap, pmap); TAILQ_INIT(&pmap->pm_pvchunk); @@ -1704,9 +1671,9 @@ pmap_pinit(pmap_t pmap) KASSERT(pmap_kextract((vm_offset_t)pmap->pm_pdpt) < (4ULL<<30), ("pmap_pinit: pdpt above 4g")); #endif - pmap->pm_root = NULL; + pmap->pm_root.rt_root = 0; } - KASSERT(pmap->pm_root == NULL, + KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_pinit: pmap has reserved page table page(s)")); /* @@ -1963,7 +1930,7 @@ pmap_release(pmap_t pmap) KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); - KASSERT(pmap->pm_root == NULL, + KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_release: pmap has reserved page table page(s)")); pmap_lazyfix(pmap); @@ -2153,7 +2120,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues TAILQ_FOREACH(m, &vpq->pl, pageq) { if (m->hold_count || m->busy) continue; - TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); /* Avoid deadlock and lock recursion. */ @@ -2177,7 +2144,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues pmap_unuse_pt(pmap, va, &free); pmap_invalidate_page(pmap, va); pmap_free_zero_pages(free); - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) @@ -2330,9 +2297,9 @@ pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, pv_entry_t pv; mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { + TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { - TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); + TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); break; } } @@ -2360,7 +2327,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); m = PHYS_TO_VM_PAGE(pa); - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); /* Instantiate the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { @@ -2396,7 +2363,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, v pv = pmap_pvh_remove(&m->md, pmap, va); KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); pvh = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); /* Free the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { @@ -2443,7 +2410,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_ mtx_assert(&vm_page_queue_mtx, MA_OWNED); pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); } /* @@ -2459,7 +2426,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t if (pv_entry_count < pv_entry_high_water && (pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); return (TRUE); } else return (FALSE); @@ -2479,7 +2446,7 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm (pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; pvh = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); return (TRUE); } else return (FALSE); @@ -2928,7 +2895,7 @@ pmap_remove_all(vm_page_t m) pmap_unuse_pt(pmap, pv->pv_va, &free); pmap_invalidate_page(pmap, pv->pv_va); pmap_free_zero_pages(free); - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } @@ -4063,7 +4030,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m) return FALSE; mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { return TRUE; } @@ -4073,7 +4040,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m) } if (loops < 16) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { + TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) return (TRUE); loops++; @@ -4196,7 +4163,7 @@ pmap_remove_pages(pmap_t pmap) if ((tpte & PG_PS) != 0) { pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; pvh = pa_to_pvh(tpte & PG_PS_FRAME); - TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); + TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if (TAILQ_EMPTY(&mt->md.pv_list)) @@ -4214,7 +4181,7 @@ pmap_remove_pages(pmap_t pmap) } } else { pmap->pm_stats.resident_count--; - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) @@ -4276,7 +4243,7 @@ pmap_is_modified_pvh(struct md_page *pvh) mtx_assert(&vm_page_queue_mtx, MA_OWNED); rv = FALSE; sched_pin(); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { + TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pte = pmap_pte_quick(pmap, pv->pv_va); @@ -4332,7 +4299,7 @@ pmap_remove_write(vm_page_t m) return; sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4341,7 +4308,7 @@ pmap_remove_write(vm_page_t m) (void)pmap_demote_pde(pmap, pde, va); PMAP_UNLOCK(pmap); } - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); @@ -4397,7 +4364,7 @@ pmap_ts_referenced(vm_page_t m) sched_pin(); mtx_assert(&vm_page_queue_mtx, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, pvn) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4430,9 +4397,9 @@ pmap_ts_referenced(vm_page_t m) if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pvf = pv; do { - pvn = TAILQ_NEXT(pv, pv_list); - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + pvn = TAILQ_NEXT(pv, pv_next); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); @@ -4472,7 +4439,7 @@ pmap_clear_modify(vm_page_t m) return; sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4508,7 +4475,7 @@ pmap_clear_modify(vm_page_t m) } PMAP_UNLOCK(pmap); } - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); @@ -4549,7 +4516,7 @@ pmap_clear_reference(vm_page_t m) return; sched_pin(); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4571,7 +4538,7 @@ pmap_clear_reference(vm_page_t m) } PMAP_UNLOCK(pmap); } - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); @@ -5098,7 +5065,7 @@ pmap_pvdump(vm_paddr_t pa) printf("pa %x", pa); m = PHYS_TO_VM_PAGE(pa); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); printf(" -> pmap %p, va %x", (void *)pmap, pv->pv_va); pads(pmap); Index: sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c =================================================================== --- sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (revision 249335) +++ sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c (working copy) @@ -373,7 +373,7 @@ again: VM_OBJECT_LOCK(obj); vm_page_wakeup(m); } else { - if (__predict_false(obj->cache != NULL)) { + if (__predict_false(!vm_object_cache_is_empty(obj))) { vm_page_cache_free(obj, OFF_TO_IDX(start), OFF_TO_IDX(start) + 1); } Index: sys/amd64/include/pmap.h =================================================================== --- sys/amd64/include/pmap.h (revision 249335) +++ sys/amd64/include/pmap.h (working copy) @@ -149,6 +149,8 @@ #include #include +#include + typedef u_int64_t pd_entry_t; typedef u_int64_t pt_entry_t; typedef u_int64_t pdp_entry_t; @@ -249,7 +251,7 @@ struct pmap { u_int pm_active; /* active on cpus */ /* spare u_int here due to padding */ struct pmap_statistics pm_stats; /* pmap statistics */ - vm_page_t pm_root; /* spare page table pages */ + struct vm_radix pm_root; /* spare page table pages */ }; typedef struct pmap *pmap_t; @@ -276,7 +278,7 @@ extern struct pmap kernel_pmap_store; */ typedef struct pv_entry { vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_list; + TAILQ_ENTRY(pv_entry) pv_next; } *pv_entry_t; /* Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC (revision 249335) +++ sys/amd64/conf/GENERIC (working copy) @@ -68,6 +68,12 @@ options AUDIT # Security event auditing #options KDTRACE_HOOKS # Kernel DTrace hooks options INCLUDE_CONFIG_FILE # Include this file in kernel +options KDB +options DDB +options GDB +options INVARIANT_SUPPORT +options INVARIANTS + # Make an SMP-capable kernel by default options SMP # Symmetric MultiProcessor Kernel Index: sys/amd64/amd64/pmap.c =================================================================== --- sys/amd64/amd64/pmap.c (revision 249335) +++ sys/amd64/amd64/pmap.c (working copy) @@ -136,6 +136,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -533,7 +534,7 @@ pmap_bootstrap(vm_paddr_t *firstaddr) */ PMAP_LOCK_INIT(kernel_pmap); kernel_pmap->pm_pml4 = (pdp_entry_t *)PHYS_TO_DMAP(KPML4phys); - kernel_pmap->pm_root = NULL; + kernel_pmap->pm_root.rt_root = 0; kernel_pmap->pm_active = -1; /* don't allow deactivation */ TAILQ_INIT(&kernel_pmap->pm_pvchunk); @@ -1317,7 +1318,8 @@ pmap_free_zero_pages(vm_page_t free) while (free != NULL) { m = free; - free = m->right; + free = (void *)m->object; + m->object = NULL; /* Preserve the page's PG_ZERO setting. */ vm_page_free_toq(m); } @@ -1336,7 +1338,7 @@ pmap_add_delayed_free_list(vm_page_t m, vm_page_t m->flags |= PG_ZERO; else m->flags &= ~PG_ZERO; - m->right = *free; + m->object = (void *)*free; *free = m; } @@ -1346,31 +1348,12 @@ pmap_add_delayed_free_list(vm_page_t m, vm_page_t * for mapping a distinct range of virtual addresses. The pmap's collection is * ordered by this virtual address range. */ -static void +static __inline void pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) { - vm_page_t root; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - root = pmap->pm_root; - if (root == NULL) { - mpte->left = NULL; - mpte->right = NULL; - } else { - root = vm_page_splay(mpte->pindex, root); - if (mpte->pindex < root->pindex) { - mpte->left = root->left; - mpte->right = root; - root->left = NULL; - } else if (mpte->pindex == root->pindex) - panic("pmap_insert_pt_page: pindex already inserted"); - else { - mpte->right = root->right; - mpte->left = root; - root->right = NULL; - } - } - pmap->pm_root = mpte; + vm_radix_insert(&pmap->pm_root, mpte); } /* @@ -1378,19 +1361,12 @@ pmap_insert_pt_page(pmap_t pmap, vm_page_t mpte) * specified pmap's collection of idle page table pages. Returns NULL if there * is no page table page corresponding to the specified virtual address. */ -static vm_page_t +static __inline vm_page_t pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va) { - vm_page_t mpte; - vm_pindex_t pindex = pmap_pde_pindex(va); PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if ((mpte = pmap->pm_root) != NULL && mpte->pindex != pindex) { - mpte = vm_page_splay(pindex, mpte); - if ((pmap->pm_root = mpte)->pindex != pindex) - mpte = NULL; - } - return (mpte); + return (vm_radix_lookup(&pmap->pm_root, pmap_pde_pindex(va))); } /* @@ -1398,25 +1374,12 @@ pmap_lookup_pt_page(pmap_t pmap, vm_offset_t va) * of idle page table pages. The specified page table page must be a member of * the pmap's collection. */ -static void +static __inline void pmap_remove_pt_page(pmap_t pmap, vm_page_t mpte) { - vm_page_t root; PMAP_LOCK_ASSERT(pmap, MA_OWNED); - if (mpte != pmap->pm_root) { - root = vm_page_splay(mpte->pindex, pmap->pm_root); - KASSERT(mpte == root, - ("pmap_remove_pt_page: mpte %p is missing from pmap %p", - mpte, pmap)); - } - if (mpte->left == NULL) - root = mpte->right; - else { - root = vm_page_splay(mpte->pindex, mpte->left); - root->right = mpte->right; - } - pmap->pm_root = root; + vm_radix_remove(&pmap->pm_root, mpte->pindex); } /* @@ -1512,7 +1475,7 @@ pmap_pinit0(pmap_t pmap) PMAP_LOCK_INIT(pmap); pmap->pm_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(KPML4phys); - pmap->pm_root = NULL; + pmap->pm_root.rt_root = 0; pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvchunk); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); @@ -1549,7 +1512,7 @@ pmap_pinit(pmap_t pmap) /* install self-referential address mapping entry(s) */ pmap->pm_pml4[PML4PML4I] = VM_PAGE_TO_PHYS(pml4pg) | PG_V | PG_RW | PG_A | PG_M; - pmap->pm_root = NULL; + pmap->pm_root.rt_root = 0; pmap->pm_active = 0; TAILQ_INIT(&pmap->pm_pvchunk); bzero(&pmap->pm_stats, sizeof pmap->pm_stats); @@ -1796,7 +1759,7 @@ pmap_release(pmap_t pmap) KASSERT(pmap->pm_stats.resident_count == 0, ("pmap_release: pmap resident count %ld != 0", pmap->pm_stats.resident_count)); - KASSERT(pmap->pm_root == NULL, + KASSERT(vm_radix_is_empty(&pmap->pm_root), ("pmap_release: pmap has reserved page table page(s)")); m = PHYS_TO_VM_PAGE(pmap->pm_pml4[PML4PML4I] & PG_FRAME); @@ -1991,7 +1954,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues TAILQ_FOREACH(m, &vpq->pl, pageq) { if (m->hold_count || m->busy) continue; - TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &m->md.pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); /* Avoid deadlock and lock recursion. */ @@ -2015,7 +1978,7 @@ pmap_collect(pmap_t locked_pmap, struct vpgqueues pmap_unuse_pt(pmap, va, *pde, &free); pmap_invalidate_page(pmap, va); pmap_free_zero_pages(free); - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) @@ -2167,9 +2130,9 @@ pmap_pvh_remove(struct md_page *pvh, pmap_t pmap, pv_entry_t pv; mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { + TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (pmap == PV_PMAP(pv) && va == pv->pv_va) { - TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); + TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); break; } } @@ -2202,7 +2165,7 @@ pmap_pv_demote_pde(pmap_t pmap, vm_offset_t va, vm pv = pmap_pvh_remove(pvh, pmap, va); KASSERT(pv != NULL, ("pmap_pv_demote_pde: pv not found")); m = PHYS_TO_VM_PAGE(pa); - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); /* Instantiate the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { @@ -2243,7 +2206,7 @@ pmap_pv_promote_pde(pmap_t pmap, vm_offset_t va, v pv = pmap_pvh_remove(&m->md, pmap, va); KASSERT(pv != NULL, ("pmap_pv_promote_pde: pv not found")); pvh = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); /* Free the remaining NPTEPG - 1 pv entries. */ va_last = va + NBPDR - PAGE_SIZE; do { @@ -2295,7 +2258,7 @@ pmap_insert_entry(pmap_t pmap, vm_offset_t va, vm_ mtx_assert(&vm_page_queue_mtx, MA_OWNED); pv = get_pv_entry(pmap, FALSE); pv->pv_va = va; - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); } /* @@ -2311,7 +2274,7 @@ pmap_try_insert_pv_entry(pmap_t pmap, vm_offset_t if (pv_entry_count < pv_entry_high_water && (pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); return (TRUE); } else return (FALSE); @@ -2331,7 +2294,7 @@ pmap_pv_insert_pde(pmap_t pmap, vm_offset_t va, vm (pv = get_pv_entry(pmap, TRUE)) != NULL) { pv->pv_va = va; pvh = pa_to_pvh(pa); - TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_list); + TAILQ_INSERT_TAIL(&pvh->pv_list, pv, pv_next); return (TRUE); } else return (FALSE); @@ -2768,7 +2731,7 @@ pmap_remove_all(vm_page_t m) pmap_unuse_pt(pmap, pv->pv_va, *pde, &free); pmap_invalidate_page(pmap, pv->pv_va); pmap_free_zero_pages(free); - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); free_pv_entry(pmap, pv); PMAP_UNLOCK(pmap); } @@ -3852,7 +3815,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m) return FALSE; mtx_assert(&vm_page_queue_mtx, MA_OWNED); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { if (PV_PMAP(pv) == pmap) { return TRUE; } @@ -3862,7 +3825,7 @@ pmap_page_exists_quick(pmap_t pmap, vm_page_t m) } if (loops < 16) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { + TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { if (PV_PMAP(pv) == pmap) return (TRUE); loops++; @@ -3985,7 +3948,7 @@ pmap_remove_pages(pmap_t pmap) if ((tpte & PG_PS) != 0) { pmap->pm_stats.resident_count -= NBPDR / PAGE_SIZE; pvh = pa_to_pvh(tpte & PG_PS_FRAME); - TAILQ_REMOVE(&pvh->pv_list, pv, pv_list); + TAILQ_REMOVE(&pvh->pv_list, pv, pv_next); if (TAILQ_EMPTY(&pvh->pv_list)) { for (mt = m; mt < &m[NBPDR / PAGE_SIZE]; mt++) if (TAILQ_EMPTY(&mt->md.pv_list)) @@ -4003,7 +3966,7 @@ pmap_remove_pages(pmap_t pmap) } } else { pmap->pm_stats.resident_count--; - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); if (TAILQ_EMPTY(&m->md.pv_list)) { pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); if (TAILQ_EMPTY(&pvh->pv_list)) @@ -4062,7 +4025,7 @@ pmap_is_modified_pvh(struct md_page *pvh) mtx_assert(&vm_page_queue_mtx, MA_OWNED); rv = FALSE; - TAILQ_FOREACH(pv, &pvh->pv_list, pv_list) { + TAILQ_FOREACH(pv, &pvh->pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pte = pmap_pte(pmap, pv->pv_va); @@ -4116,7 +4079,7 @@ pmap_remove_write(vm_page_t m) return; mtx_assert(&vm_page_queue_mtx, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4125,7 +4088,7 @@ pmap_remove_write(vm_page_t m) (void)pmap_demote_pde(pmap, pde, va); PMAP_UNLOCK(pmap); } - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); @@ -4174,7 +4137,7 @@ pmap_ts_referenced(vm_page_t m) return (rtval); mtx_assert(&vm_page_queue_mtx, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, pvn) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, pvn) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4207,9 +4170,9 @@ pmap_ts_referenced(vm_page_t m) if ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { pvf = pv; do { - pvn = TAILQ_NEXT(pv, pv_list); - TAILQ_REMOVE(&m->md.pv_list, pv, pv_list); - TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_list); + pvn = TAILQ_NEXT(pv, pv_next); + TAILQ_REMOVE(&m->md.pv_list, pv, pv_next); + TAILQ_INSERT_TAIL(&m->md.pv_list, pv, pv_next); pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); @@ -4246,7 +4209,7 @@ pmap_clear_modify(vm_page_t m) return; mtx_assert(&vm_page_queue_mtx, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4277,7 +4240,7 @@ pmap_clear_modify(vm_page_t m) } PMAP_UNLOCK(pmap); } - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va); @@ -4311,7 +4274,7 @@ pmap_clear_reference(vm_page_t m) return; mtx_assert(&vm_page_queue_mtx, MA_OWNED); pvh = pa_to_pvh(VM_PAGE_TO_PHYS(m)); - TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_list, next_pv) { + TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) { va = pv->pv_va; pmap = PV_PMAP(pv); PMAP_LOCK(pmap); @@ -4333,7 +4296,7 @@ pmap_clear_reference(vm_page_t m) } PMAP_UNLOCK(pmap); } - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + TAILQ_FOREACH(pv, &m->md.pv_list, pv_next) { pmap = PV_PMAP(pv); PMAP_LOCK(pmap); pde = pmap_pde(pmap, pv->pv_va);