Index: sys/arm/arm/pmap-v6.c =================================================================== --- sys/arm/arm/pmap-v6.c (revision 246940) +++ sys/arm/arm/pmap-v6.c (working copy) @@ -390,7 +390,6 @@ static uma_zone_t l2table_zone; static vm_offset_t pmap_kernel_l2dtable_kva; static vm_offset_t pmap_kernel_l2ptp_kva; static vm_paddr_t pmap_kernel_l2ptp_phys; -static struct vm_object pvzone_obj; static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; static struct rwlock pvh_global_lock; @@ -1162,7 +1161,7 @@ pmap_init(void) NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; - uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); + uma_zone_set_obj(pvzone, pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); /* Index: sys/arm/arm/pmap.c =================================================================== --- sys/arm/arm/pmap.c (revision 246940) +++ sys/arm/arm/pmap.c (working copy) @@ -395,7 +395,6 @@ static uma_zone_t l2table_zone; static vm_offset_t pmap_kernel_l2dtable_kva; static vm_offset_t pmap_kernel_l2ptp_kva; static vm_paddr_t pmap_kernel_l2ptp_phys; -static struct vm_object pvzone_obj; static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; static struct rwlock pvh_global_lock; @@ -1826,7 +1825,7 @@ pmap_init(void) NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); pv_entry_max = shpgperproc * maxproc + cnt.v_page_count; - uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); + uma_zone_set_obj(pvzone, pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); /* Index: sys/powerpc/booke/pmap.c =================================================================== --- sys/powerpc/booke/pmap.c (revision 246940) +++ sys/powerpc/booke/pmap.c (working copy) @@ -217,7 +217,6 @@ static struct rwlock_padalign pvh_global_lock; /* Data for the pv entry allocation mechanism */ static uma_zone_t pvzone; -static struct vm_object pvzone_obj; static int pv_entry_count = 0, pv_entry_max = 0, pv_entry_high_water = 0; #define PV_ENTRY_ZONE_MIN 2048 /* min pv entries in uma zone */ @@ -1343,7 +1342,7 @@ mmu_booke_init(mmu_t mmu) TUNABLE_INT_FETCH("vm.pmap.pv_entries", &pv_entry_max); pv_entry_high_water = 9 * (pv_entry_max / 10); - uma_zone_set_obj(pvzone, &pvzone_obj, pv_entry_max); + uma_zone_set_obj(pvzone, pv_entry_max); /* Pre-fill pvzone with initial number of pv entries. */ uma_prealloc(pvzone, PV_ENTRY_ZONE_MIN); Index: sys/vm/swap_pager.c =================================================================== --- sys/vm/swap_pager.c (revision 246940) +++ sys/vm/swap_pager.c (working copy) @@ -343,7 +343,6 @@ SYSCTL_INT(_vm, OID_AUTO, swap_async_max, static struct mtx sw_alloc_mtx; /* protect list manipulation */ static struct pagerlst swap_pager_object_list[NOBJLISTS]; static uma_zone_t swap_zone; -static struct vm_object swap_zone_obj; /* * pagerops for OBJT_SWAP - "swap pager". Some ops are also global procedure @@ -554,7 +553,7 @@ swap_pager_swap_init(void) if (swap_zone == NULL) panic("failed to create swap_zone."); do { - if (uma_zone_set_obj(swap_zone, &swap_zone_obj, n)) + if (uma_zone_set_obj(swap_zone, n)) break; /* * if the allocation failed, try a zone two thirds the Index: sys/vm/uma.h =================================================================== --- sys/vm/uma.h (revision 246940) +++ sys/vm/uma.h (working copy) @@ -432,11 +432,10 @@ void uma_reclaim(void); void uma_set_align(int align); /* - * Switches the backing object of a zone + * Switches the backing object of a zone to VM_ALLOC_NOOBJ. * * Arguments: * zone The zone to update. - * obj The VM object to use for future allocations. * size The size of the object to allocate. * * Returns: @@ -444,12 +443,11 @@ void uma_set_align(int align); * 1 if successful * * Discussion: - * A NULL object can be used and uma will allocate one for you. Setting - * the size will limit the amount of memory allocated to this zone. + * Setting the size will limit the amount of memory allocated to + * this zone. * */ -struct vm_object; -int uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int size); +int uma_zone_set_obj(uma_zone_t zone, int size); /* * Sets a high limit on the number of items allowed in a zone Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c (revision 246940) +++ sys/vm/uma_core.c (working copy) @@ -79,6 +79,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -1032,48 +1033,56 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *p static void * obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { - vm_object_t object; + TAILQ_HEAD(, vm_page) alloctail; + u_long nbytes; vm_offset_t retkva, zkva; - vm_page_t p; - int pages, startpages; + vm_page_t p, p_next; uma_keg_t keg; + TAILQ_INIT(&alloctail); keg = zone_first_keg(zone); - object = keg->uk_obj; - retkva = 0; /* * This looks a little weird since we're getting one page at a time. */ - VM_OBJECT_LOCK(object); - p = TAILQ_LAST(&object->memq, pglist); - pages = p != NULL ? p->pindex + 1 : 0; - startpages = pages; - zkva = keg->uk_kva + pages * PAGE_SIZE; - for (; bytes > 0; bytes -= PAGE_SIZE) { - p = vm_page_alloc(object, pages, - VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); - if (p == NULL) { - if (pages != startpages) - pmap_qremove(retkva, pages - startpages); - while (pages != startpages) { - pages--; - p = TAILQ_LAST(&object->memq, pglist); - vm_page_unwire(p, 0); - vm_page_free(p); - } - retkva = 0; - goto done; + nbytes = howmany(bytes, PAGE_SIZE); + while (nbytes > 0) { + p = vm_page_alloc(NULL, 0, VM_ALLOC_INTERRUPT | + VM_ALLOC_WIRED | VM_ALLOC_NOOBJ); + if (p != NULL) { + + /* + * As VM_ALLOC_NOOBJ is used, listq iterator + * should be fine to be used. + */ + TAILQ_INSERT_TAIL(&alloctail, p, listq); + nbytes--; + continue; } + if (wait & M_WAITOK) { + VM_WAIT; + continue; + } + + /* + * Page allocation failed, free intermediate pages and + * exit. + */ + TAILQ_FOREACH_SAFE(p, &alloctail, listq, p_next) { + vm_page_unwire(p, 0); + vm_page_free(p); + } + return (NULL); + } + *flags = UMA_SLAB_PRIV; + zkva = keg->uk_kva + + atomic_fetchadd_long(&keg->uk_offset, howmany(bytes, PAGE_SIZE)) * + PAGE_SIZE; + retkva = zkva; + TAILQ_FOREACH(p, &alloctail, listq) { pmap_qenter(zkva, &p, 1); - if (retkva == 0) - retkva = zkva; zkva += PAGE_SIZE; - pages += 1; } -done: - VM_OBJECT_UNLOCK(object); - *flags = UMA_SLAB_PRIV; return ((void *)retkva); } @@ -3012,7 +3021,7 @@ uma_zone_set_allocf(uma_zone_t zone, uma_alloc all /* See uma.h */ int -uma_zone_set_obj(uma_zone_t zone, struct vm_object *obj, int count) +uma_zone_set_obj(uma_zone_t zone, int count) { uma_keg_t keg; vm_offset_t kva; @@ -3024,21 +3033,25 @@ int if (pages * keg->uk_ipers < count) pages++; - kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); - - if (kva == 0) - return (0); - if (obj == NULL) - obj = vm_object_allocate(OBJT_PHYS, pages); - else { - VM_OBJECT_LOCK_INIT(obj, "uma object"); - _vm_object_allocate(OBJT_PHYS, pages, obj); - } +#ifdef UMA_MD_SMALL_ALLOC + if (keg->uk_ppera > 1) { +#else + if (1) { +#endif + kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); + if (kva == 0) + return (0); + } else + kva = 0; ZONE_LOCK(zone); keg->uk_kva = kva; - keg->uk_obj = obj; + keg->uk_offset = 0; keg->uk_maxpages = pages; +#ifdef UMA_MD_SMALL_ALLOC + keg->uk_allocf = (keg->uk_ppera > 1) ? obj_alloc : uma_small_alloc; +#else keg->uk_allocf = obj_alloc; +#endif keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; ZONE_UNLOCK(zone); return (1); Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h (revision 246940) +++ sys/vm/uma_int.h (working copy) @@ -221,7 +221,7 @@ struct uma_keg { uma_alloc uk_allocf; /* Allocation function */ uma_free uk_freef; /* Free routine */ - struct vm_object *uk_obj; /* Zone specific object */ + u_long uk_offset; /* Zone specific next page index */ vm_offset_t uk_kva; /* Base kva for zones with objs */ uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ Index: sys/vm/vm_map.c =================================================================== --- sys/vm/vm_map.c (revision 246940) +++ sys/vm/vm_map.c (working copy) @@ -125,7 +125,6 @@ static uma_zone_t mapentzone; static uma_zone_t kmapentzone; static uma_zone_t mapzone; static uma_zone_t vmspace_zone; -static struct vm_object kmapentobj; static int vmspace_zinit(void *mem, int size, int flags); static void vmspace_zfini(void *mem, int size); static int vm_map_zinit(void *mem, int ize, int flags); @@ -303,7 +302,7 @@ vmspace_alloc(min, max) void vm_init2(void) { - uma_zone_set_obj(kmapentzone, &kmapentobj, lmin(cnt.v_page_count, + uma_zone_set_obj(kmapentzone, lmin(cnt.v_page_count, (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 + maxproc * 2 + maxfiles); vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, Index: sys/vm/vm_object.c =================================================================== --- sys/vm/vm_object.c (revision 246940) +++ sys/vm/vm_object.c (working copy) @@ -97,6 +97,9 @@ __FBSDID("$FreeBSD$"); #include #include +#define VM_OBJECT_LOCK_INIT(object, type) \ + mtx_init(&(object)->mtx, "vm object", (type), MTX_DEF | MTX_DUPOK) + static int old_msync; SYSCTL_INT(_vm, OID_AUTO, old_msync, CTLFLAG_RW, &old_msync, 0, "Use old (insecure) msync behavior"); @@ -204,7 +207,7 @@ vm_object_zinit(void *mem, int size, int flags) return (0); } -void +static void _vm_object_allocate(objtype_t type, vm_pindex_t size, vm_object_t object) { Index: sys/vm/vm_object.h =================================================================== --- sys/vm/vm_object.h (revision 246940) +++ sys/vm/vm_object.h (working copy) @@ -208,9 +208,6 @@ extern struct vm_object kmem_object_store; #define VM_OBJECT_LOCK(object) mtx_lock(&(object)->mtx) #define VM_OBJECT_LOCK_ASSERT(object, type) \ mtx_assert(&(object)->mtx, (type)) -#define VM_OBJECT_LOCK_INIT(object, type) \ - mtx_init(&(object)->mtx, "vm object", \ - (type), MTX_DEF | MTX_DUPOK) #define VM_OBJECT_LOCKED(object) mtx_owned(&(object)->mtx) #define VM_OBJECT_MTX(object) (&(object)->mtx) #define VM_OBJECT_TRYLOCK(object) mtx_trylock(&(object)->mtx) @@ -241,7 +238,6 @@ vm_object_cache_is_empty(vm_object_t object) } vm_object_t vm_object_allocate (objtype_t, vm_pindex_t); -void _vm_object_allocate (objtype_t, vm_pindex_t, vm_object_t); boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t, boolean_t); void vm_object_collapse (vm_object_t); Index: sys/vm/vm_radix.c =================================================================== --- sys/vm/vm_radix.c (revision 246940) +++ sys/vm/vm_radix.c (working copy) @@ -63,7 +63,7 @@ #endif #ifndef VM_RADIX_BOOT_CACHE -#define VM_RADIX_BOOT_CACHE 1500 +#define VM_RADIX_BOOT_CACHE 250 #endif /* @@ -373,7 +373,6 @@ vm_radix_node_zone_dtor(void *mem, int size __unus static void vm_radix_init(void *arg __unused) { - int nitems; vm_radix_node_zone = uma_zcreate("RADIX NODE", sizeof(struct vm_radix_node), NULL, @@ -383,10 +382,9 @@ vm_radix_init(void *arg __unused) NULL, #endif NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE); - nitems = uma_zone_set_max(vm_radix_node_zone, cnt.v_page_count); - if (nitems < cnt.v_page_count) - panic("%s: unexpected requested number of items", __func__); - uma_prealloc(vm_radix_node_zone, nitems); + if (!uma_zone_set_obj(vm_radix_node_zone, cnt.v_page_count)) + panic("%s: unable to create new zone", __func__); + uma_prealloc(vm_radix_node_zone, cnt.v_page_count); boot_cache_cnt = VM_RADIX_BOOT_CACHE + 1; } SYSINIT(vm_radix_init, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_init, NULL);