Index: sys/vm/vm_radix.c =================================================================== --- sys/vm/vm_radix.c (revision 250745) +++ sys/vm/vm_radix.c (working copy) @@ -103,30 +103,21 @@ struct vm_radix_node { static uma_zone_t vm_radix_node_zone; /* - * Allocate a radix node. Pre-allocation should ensure that the request - * will always be satisfied. + * Allocate a radix node. */ static __inline struct vm_radix_node * vm_radix_node_get(vm_pindex_t owner, uint16_t count, uint16_t clevel) { struct vm_radix_node *rnode; + u_int i; - rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT); - - /* - * The required number of nodes should already be pre-allocated - * by vm_radix_prealloc(). However, UMA can hold a few nodes - * in per-CPU buckets, which will not be accessible by the - * current CPU. Thus, the allocation could return NULL when - * the pre-allocated pool is close to exhaustion. Anyway, - * in practice this should never occur because a new node - * is not always required for insert. Thus, the pre-allocated - * pool should have some extra pages that prevent this from - * becoming a problem. - */ + rnode = uma_zalloc(vm_radix_node_zone, M_NOWAIT | M_ZERO); if (rnode == NULL) - panic("%s: uma_zalloc() returned NULL for a new node", - __func__); + panic("a fess d mammt"); + for (i = 0; i < VM_RADIX_COUNT; i++) + KASSERT(rnode->rn_child[i] == NULL, + ("vm_radix_node_put: rnode %p has child %d %p", rnode, + i, rnode->rn_child[i])); rnode->rn_owner = owner; rnode->rn_count = count; rnode->rn_clev = clevel; @@ -295,20 +286,8 @@ vm_radix_node_zone_dtor(void *mem, int size __unus } #endif +#if 0 /* - * Radix node zone initializer. - */ -static int -vm_radix_node_zone_init(void *mem, int size __unused, int flags __unused) -{ - struct vm_radix_node *rnode; - - rnode = mem; - memset(rnode->rn_child, 0, sizeof(rnode->rn_child)); - return (0); -} - -/* * Pre-allocate intermediate nodes from the UMA slab zone. */ static void @@ -328,6 +307,7 @@ vm_radix_prealloc(void *arg __unused) } SYSINIT(vm_radix_prealloc, SI_SUB_KMEM, SI_ORDER_SECOND, vm_radix_prealloc, NULL); +#endif /* * Initialize the UMA slab zone. @@ -345,7 +325,7 @@ vm_radix_init(void) #else NULL, #endif - vm_radix_node_zone_init, NULL, VM_RADIX_PAD, UMA_ZONE_VM | + NULL, NULL, VM_RADIX_PAD, UMA_ZONE_VM | UMA_ZONE_NOFREE); } Index: sys/vm/vm_page.c =================================================================== --- sys/vm/vm_page.c (revision 250745) +++ sys/vm/vm_page.c (working copy) @@ -315,7 +315,8 @@ vm_page_startup(vm_offset_t vaddr) /* * Initialize the page and queue locks. */ - mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF); + mtx_init(&vm_page_queue_free_mtx, "vm page free queue", NULL, MTX_DEF | + MTX_RECURSE); for (i = 0; i < PA_LOCK_COUNT; i++) mtx_init(&pa_lock[i], "vm page", NULL, MTX_DEF); for (i = 0; i < PQ_COUNT; i++) Index: sys/amd64/conf/GENERIC =================================================================== --- sys/amd64/conf/GENERIC (revision 250745) +++ sys/amd64/conf/GENERIC (working copy) @@ -82,8 +82,8 @@ options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking options INVARIANT_SUPPORT # Extra sanity checks of internal structures, required by INVARIANTS -options WITNESS # Enable checks to detect deadlocks and cycles -options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed +#options WITNESS # Enable checks to detect deadlocks and cycles +#options WITNESS_SKIPSPIN # Don't run witness on spinlocks for speed options MALLOC_DEBUG_MAXZONES=8 # Separate malloc(9) zones # Make an SMP-capable kernel by default