Index: sys/vm/vm_radix.c =================================================================== --- sys/vm/vm_radix.c (revision 253502) +++ sys/vm/vm_radix.c (working copy) @@ -103,8 +103,7 @@ struct vm_radix_node { static uma_zone_t vm_radix_node_zone; /* - * Allocate a radix node. Pre-allocation should ensure that the request - * will always be satisfied. + * Allocate a radix node. */ static __inline struct vm_radix_node * vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) @@ -112,21 +111,8 @@ vm_radix_node_get(vm_pindex_t owner, uint16_t coun struct vm_radix_node *rnode; rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT); - - /* - * The required number of nodes should already be pre-allocated - * by vm_radix_prealloc(). However, UMA can hold a few nodes - * in per-CPU buckets, which will not be accessible by the - * current CPU. Thus, the allocation could return NULL when - * the pre-allocated pool is close to exhaustion. Anyway, - * in practice this should never occur because a new node - * is not always required for insert. Thus, the pre-allocated - * pool should have some extra pages that prevent this from - * becoming a problem. - */ if (rnode == NULL) - panic("%s: uma_zalloc() returned NULL for a new node", - __func__); + panic("a fess d mammt"); rnode->rn_owner = owner; rnode->rn_count = count; rnode->rn_clev = clevel; @@ -308,31 +294,33 @@ vm_radix_node_zone_init(void *mem, int size __unus return (0); } +#ifndef UMA_MD_SMALL_ALLOC /* - * Pre-allocate intermediate nodes from the UMA slab zone. + * Reserve the KVA necessary to satisfy the node allocation. + * This is mandatory in architectures not supporting direct + * mapping as they will need otherwise to carve into the kernel maps for + * every node allocation, resulting into deadlocks for consumers already + * working with kernel maps. */ static void -vm_radix_prealloc(void *arg __unused) +vm_radix_reserve_kva(void *arg __unused) { - int nodes; /* * Calculate the number of reserved nodes, discounting the pages that * are needed to store them. */ - nodes = ((vm_paddr_t)cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + - sizeof(struct vm_radix_node)); - if (!uma_zone_reserve_kva(vm_radix_node_zone, nodes)) + if (!uma_zone_reserve_kva(vm_radix_node_zone, + ((vm_paddr_t)cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + + sizeof(struct vm_radix_node)))) panic("%s: unable to create new zone", __func__); - uma_prealloc(vm_radix_node_zone, nodes); } -SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc, - NULL); +SYSINIT(vm_radix_reserve_kva, SI_SUB_KMEM, SI_ORDER_SECOND, + vm_radix_reserve_kva, NULL); +#endif /* * Initialize the UMA slab zone. - * Until vm_radix_prealloc() is called, the zone will be served by the - * UMA boot-time pre-allocated pool of pages. */ void vm_radix_init(void) @@ -345,8 +333,7 @@ vm_radix_init(void) #else NULL, #endif - vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM | - UMA_ZONE_NOFREE); + vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM); } /* Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c (revision 253502) +++ sys/vm/vm_page.c (working copy) @@ -316,7 +316,8 @@ vm_page_startup(vm_offset_t vaddr) /* * Initialize the page and queue locks. */ - mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); + mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF | + MTX_RECURSE); for (i = 0; i < PA_LOCK_COUNT; i++) mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); for (i = 0; i < PQ_COUNT; i++)