diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c index 558b4c7..3ab799e 100644 --- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -179,9 +180,12 @@ struct uma_bucket_zone { int ubz_entries; }; -#define BUCKET_MAX 128 +#define BUCKET_SIZE_THRESHOLD 131072 +#define BUCKET_MAX 128 struct uma_bucket_zone bucket_zones[] = { + { NULL, "4 Bucket", 4 }, + { NULL, "8 Bucket", 8 }, { NULL, "16 Bucket", 16 }, { NULL, "32 Bucket", 32 }, { NULL, "64 Bucket", 64 }, @@ -189,7 +193,7 @@ struct uma_bucket_zone bucket_zones[] = { { NULL, NULL, 0} }; -#define BUCKET_SHIFT 4 +#define BUCKET_SHIFT 2 #define BUCKET_ZONES ((BUCKET_MAX >> BUCKET_SHIFT) + 1) /* @@ -636,6 +640,45 @@ cache_drain(uma_zone_t zone) } /* + * Safely drain per-CPU caches of a zone to alloc bucket. + * The alloc bucket is NOT drained. + * This is an expensive call because it needs to bind to all CPUs + * one by one and enter a critical section on each of them in order + * to safely access their cache buckets. + * Zone lock must be held on call this function. + */ +static void +cache_drain_safe(uma_zone_t zone) +{ + uma_cache_t cache; + int cpu; + + mtx_assert(&zone->uz_lock, MA_OWNED); + + CPU_FOREACH(cpu) { + thread_lock(curthread); + sched_bind(curthread, cpu); + thread_unlock(curthread); + critical_enter(); + cache = &zone->uz_cpu[cpu]; + if (cache->uc_allocbucket) { + LIST_INSERT_HEAD(&zone->uz_full_bucket, + cache->uc_allocbucket, ub_link); + cache->uc_allocbucket = NULL; + } + if (cache->uc_freebucket) { + LIST_INSERT_HEAD(&zone->uz_full_bucket, + cache->uc_freebucket, ub_link); + cache->uc_freebucket = NULL; + } + critical_exit(); + } + thread_lock(curthread); + sched_unbind(curthread); + thread_unlock(curthread); +} + +/* * Drain the cached buckets from a zone. Expects a locked zone on entry. */ static void @@ -769,6 +812,8 @@ zone_drain_wait(uma_zone_t zone, int waitok) mtx_lock(&uma_mtx); } zone->uz_flags |= UMA_ZFLAG_DRAINING; + if (vm_page_count_min() && !(zone->uz_flags & UMA_ZFLAG_INTERNAL)) + cache_drain_safe(zone); bucket_cache_drain(zone); ZONE_UNLOCK(zone); /* @@ -1457,12 +1502,21 @@ zone_ctor(void *mem, int size, void *udata, int flags) return (0); } + zone->uz_count_max = BUCKET_SIZE_THRESHOLD / zone->uz_size; + if (keg->uk_flags & UMA_ZONE_MAXBUCKET) + zone->uz_count_max = BUCKET_MAX; + else if (zone->uz_count_max > BUCKET_MAX) + zone->uz_count_max = BUCKET_MAX; + else if (zone->uz_count_max < (1 << BUCKET_SHIFT)) + zone->uz_count_max = 0; + if (keg->uk_flags & UMA_ZONE_MAXBUCKET) zone->uz_count = BUCKET_MAX; - else if (keg->uk_ipers <= BUCKET_MAX) + else if (keg->uk_ipers < zone->uz_count_max) zone->uz_count = keg->uk_ipers; else - zone->uz_count = BUCKET_MAX; + zone->uz_count = zone->uz_count_max; + return (0); } @@ -1955,6 +2009,9 @@ uma_zalloc_arg(uma_zone_t zone, void *udata, int flags) "uma_zalloc_arg: zone \"%s\"", zone->uz_name); } + if (zone->uz_count_max == 0) + goto no_cache; + /* * If possible, allocate from the per-CPU cache. There are two * requirements for safe access to the per-CPU cache: (1) the thread @@ -2076,7 +2133,7 @@ zalloc_start: critical_exit(); /* Bump up our uz_count so we get here less */ - if (zone->uz_count < BUCKET_MAX) + if (zone->uz_count < zone->uz_count_max) zone->uz_count++; /* @@ -2095,6 +2152,7 @@ zalloc_start: printf("uma_zalloc_arg: Bucketzone returned NULL\n"); #endif +no_cache: item = zone_alloc_item(zone, udata, flags); return (item); } diff --git a/sys/vm/uma_int.h b/sys/vm/uma_int.h index 7713593..6d81e3d 100644 --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -330,6 +330,7 @@ struct uma_zone { u_int64_t uz_sleeps; /* Total number of alloc sleeps */ uint16_t uz_fills; /* Outstanding bucket fills */ uint16_t uz_count; /* Highest value ub_ptr can have */ + uint16_t uz_count_max; /* Highest value uz_count can have */ /* * This HAS to be the last item because we adjust the zone size