--- a/sys/vm/uma_core.c +++ b/sys/vm/uma_core.c @@ -71,6 +71,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -635,6 +636,37 @@ cache_drain(uma_zone_t zone) ZONE_UNLOCK(zone); } +#if 1 +static void +cache_drain2(uma_zone_t zone) +{ + uma_cache_t cache; + int cpu; + + CPU_FOREACH(cpu) { + thread_lock(curthread); + sched_bind(curthread, cpu); + thread_unlock(curthread); + critical_enter(); + cache = &zone->uz_cpu[cpu]; + if (cache->uc_allocbucket) { + LIST_INSERT_HEAD(&zone->uz_full_bucket, + cache->uc_allocbucket, ub_link); + cache->uc_allocbucket = NULL; + } + if (cache->uc_freebucket) { + LIST_INSERT_HEAD(&zone->uz_full_bucket, + cache->uc_freebucket, ub_link); + cache->uc_freebucket = NULL; + } + critical_exit(); + } + thread_lock(curthread); + sched_unbind(curthread); + thread_unlock(curthread); +} +#endif + /* * Drain the cached buckets from a zone. Expects a locked zone on entry. */ @@ -769,6 +801,10 @@ zone_drain_wait(uma_zone_t zone, int waitok) mtx_lock(&uma_mtx); } zone->uz_flags |= UMA_ZFLAG_DRAINING; +#if 1 + if (vm_page_count_min() && !(zone->uz_flags & UMA_ZFLAG_INTERNAL)) + cache_drain2(zone); +#endif bucket_cache_drain(zone); ZONE_UNLOCK(zone); /* @@ -779,6 +815,8 @@ zone_drain_wait(uma_zone_t zone, int waitok) zone_foreach_keg(zone, &keg_drain); ZONE_LOCK(zone); zone->uz_flags &= ~UMA_ZFLAG_DRAINING; + if (zone->uz_count > zone->uz_count_min) + zone->uz_count--; wakeup(zone); out: ZONE_UNLOCK(zone); @@ -1398,6 +1436,9 @@ zone_ctor(void *mem, int size, void *udata, int flags) zone->uz_fails = 0; zone->uz_sleeps = 0; zone->uz_fills = zone->uz_count = 0; + zone->uz_count_step = 0; + zone->uz_count_need = 0; + zone->uz_count_min = 1; zone->uz_flags = 0; keg = arg->keg; @@ -1458,11 +1499,16 @@ zone_ctor(void *mem, int size, void *udata, int flags) } if (keg->uk_flags & UMA_ZONE_MAXBUCKET) - zone->uz_count = BUCKET_MAX; + zone->uz_count_min = BUCKET_MAX; else if (keg->uk_ipers <= BUCKET_MAX) - zone->uz_count = keg->uk_ipers; + zone->uz_count_min = keg->uk_ipers; else - zone->uz_count = BUCKET_MAX; + zone->uz_count_min = BUCKET_MAX; + zone->uz_count = zone->uz_count_min; + zone->uz_count_step = mp_ncpus * zone->uz_size / PAGE_SIZE; /*XXX*/ + if (zone->uz_count_step == 0) + zone->uz_count_step = 1; + return (0); } @@ -2076,8 +2122,13 @@ zalloc_start: critical_exit(); /* Bump up our uz_count so we get here less */ - if (zone->uz_count < BUCKET_MAX) - zone->uz_count++; + if (zone->uz_count < BUCKET_MAX) { + zone->uz_count_need++; + if (zone->uz_count_need == zone->uz_count_step) { + zone->uz_count_need = 0; + zone->uz_count++; + } + } /* * Now lets just fill a bucket and put it on the free list. If that --- a/sys/vm/uma_int.h +++ b/sys/vm/uma_int.h @@ -330,6 +330,9 @@ struct uma_zone { u_int64_t uz_sleeps; /* Total number of alloc sleeps */ uint16_t uz_fills; /* Outstanding bucket fills */ uint16_t uz_count; /* Highest value ub_ptr can have */ + uint16_t uz_count_step; + uint16_t uz_count_need; + uint16_t uz_count_min; /* Smallest value uz_count can have */ /* * This HAS to be the last item because we adjust the zone size