Index: sys/vm/uma_core.c =================================================================== --- sys/vm/uma_core.c (revision 246940) +++ sys/vm/uma_core.c (working copy) @@ -1032,38 +1032,45 @@ page_alloc(uma_zone_t zone, int bytes, u_int8_t *p static void * obj_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { + u_long pages, startpages; vm_object_t object; vm_offset_t retkva, zkva; vm_page_t p; - int pages, startpages; uma_keg_t keg; + int nbytes, pflags; keg = zone_first_keg(zone); object = keg->uk_obj; - retkva = 0; + pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED; + if (object == NULL) + pflags |= VM_ALLOC_NOOBJ; /* * This looks a little weird since we're getting one page at a time. */ - VM_OBJECT_LOCK(object); - p = TAILQ_LAST(&object->memq, pglist); - pages = p != NULL ? p->pindex + 1 : 0; +retry: + retkva = 0; + nbytes = howmany(bytes, PAGE_SIZE); + if (object != NULL) + VM_OBJECT_LOCK(object); + pages = keg->uk_offset; startpages = pages; zkva = keg->uk_kva + pages * PAGE_SIZE; - for (; bytes > 0; bytes -= PAGE_SIZE) { - p = vm_page_alloc(object, pages, - VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED); + for (; nbytes > 0; nbytes--) { + p = vm_page_alloc(object, pages, pflags); if (p == NULL) { - if (pages != startpages) - pmap_qremove(retkva, pages - startpages); while (pages != startpages) { - pages--; - p = TAILQ_LAST(&object->memq, pglist); + zkva -= PAGE_SIZE; + p = PHYS_TO_VM_PAGE(pmap_kextract(zkva)); + pmap_qremove(zkva, 1); vm_page_unwire(p, 0); vm_page_free(p); + pages--; } + if (retkva != 0 && retkva != zkva) + panic("UMA: Invalid kva scanning"); retkva = 0; - goto done; + break; } pmap_qenter(zkva, &p, 1); if (retkva == 0) @@ -1071,8 +1078,20 @@ obj_alloc(uma_zone_t zone, int bytes, u_int8_t *fl zkva += PAGE_SIZE; pages += 1; } -done: - VM_OBJECT_UNLOCK(object); + if (object != NULL) { + keg->uk_offset = pages; + VM_OBJECT_UNLOCK(object); + } else if (!atomic_cmpset_long(&keg->uk_offset, startpages, pages)) { + while (pages != startpages) { + zkva -= PAGE_SIZE; + p = PHYS_TO_VM_PAGE(pmap_kextract(zkva)); + pmap_qremove(zkva, 1); + vm_page_unwire(p, 0); + vm_page_free(p); + pages--; + } + goto retry; + } *flags = UMA_SLAB_PRIV; return ((void *)retkva); @@ -3024,21 +3043,33 @@ uma_zone_set_obj(uma_zone_t zone, struct vm_object if (pages * keg->uk_ipers < count) pages++; +#ifdef UMA_MD_SMALL_ALLOC + if (keg->uk_ppera > 1 || obj != NULL) { + kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); + if (kva == 0) + return (0); + } else + kva = 0; +#else kva = kmem_alloc_nofault(kernel_map, pages * UMA_SLAB_SIZE); - if (kva == 0) return (0); - if (obj == NULL) - obj = vm_object_allocate(OBJT_PHYS, pages); - else { +#endif + if (obj != NULL) { VM_OBJECT_LOCK_INIT(obj, "uma object"); _vm_object_allocate(OBJT_PHYS, pages, obj); } ZONE_LOCK(zone); keg->uk_kva = kva; keg->uk_obj = obj; + keg->uk_offset = 0; keg->uk_maxpages = pages; +#ifdef UMA_MD_SMALL_ALLOC + keg->uk_allocf = (keg->uk_ppera <= 1 && obj == NULL) ? + uma_small_alloc : obj_alloc; +#else keg->uk_allocf = obj_alloc; +#endif keg->uk_flags |= UMA_ZONE_NOFREE | UMA_ZFLAG_PRIVALLOC; ZONE_UNLOCK(zone); return (1); Index: sys/vm/uma_int.h =================================================================== --- sys/vm/uma_int.h (revision 246940) +++ sys/vm/uma_int.h (working copy) @@ -222,6 +222,7 @@ struct uma_keg { uma_free uk_freef; /* Free routine */ struct vm_object *uk_obj; /* Zone specific object */ + u_long uk_offset; /* Zone specific next page index */ vm_offset_t uk_kva; /* Base kva for zones with objs */ uma_zone_t uk_slabzone; /* Slab zone backing us, if OFFPAGE */ Index: sys/vm/vm_radix.c =================================================================== --- sys/vm/vm_radix.c (revision 246940) +++ sys/vm/vm_radix.c (working copy) @@ -63,7 +63,7 @@ #endif #ifndef VM_RADIX_BOOT_CACHE -#define VM_RADIX_BOOT_CACHE 1500 +#define VM_RADIX_BOOT_CACHE 250 #endif /* @@ -373,7 +373,6 @@ vm_radix_node_zone_dtor(void *mem, int size __unus static void vm_radix_init(void *arg __unused) { - int nitems; vm_radix_node_zone = uma_zcreate("RADIX NODE", sizeof(struct vm_radix_node), NULL, @@ -383,10 +382,9 @@ vm_radix_init(void *arg __unused) NULL, #endif NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE); - nitems = uma_zone_set_max(vm_radix_node_zone, cnt.v_page_count); - if (nitems < cnt.v_page_count) - panic("%s: unexpected requested number of items", __func__); - uma_prealloc(vm_radix_node_zone, nitems); + if (!uma_zone_set_obj(vm_radix_node_zone, NULL, cnt.v_page_count)) + panic("%s: unable to create new zone", __func__); + uma_prealloc(vm_radix_node_zone, cnt.v_page_count); boot_cache_cnt = VM_RADIX_BOOT_CACHE + 1; } SYSINIT(vm_radix_init, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_init, NULL);