==== //SpectraBSD/stable/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c#16 (text) - //SpectraBSD/stable/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/arc.c#19 (text) ==== content @@ -2484,6 +2484,8 @@ uint64_t to_free; to_free = arc_c >> arc_shrink_shift; + DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, + uint64_t, arc_c_min, uint64_t, arc_p, uint64_t, to_free); if (arc_c > arc_c_min + to_free) atomic_add_64(&arc_c, -to_free); else @@ -2494,12 +2496,18 @@ arc_c = MAX(arc_size, arc_c_min); if (arc_p > arc_c) arc_p = (arc_c >> 1); + + DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t, arc_p); + ASSERT(arc_c >= arc_c_min); ASSERT((int64_t)arc_p >= 0); } - if (arc_size > arc_c) + if (arc_size > arc_c) { + DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size, + uint64_t, arc_c); arc_adjust(); + } } static int needfree = 0; @@ -2509,6 +2517,8 @@ { #ifdef _KERNEL + DTRACE_PROBE2(arc__reclaim_needed, + int, needfree, int, vm_paging_needed()); if (needfree) return (1); @@ -2545,6 +2555,7 @@ */ if (availrmem < swapfs_minfree + swapfs_reserve + extra) return (1); +#endif /* sun */ #if defined(__i386) /* @@ -2558,14 +2569,10 @@ * heap is allocated. (Or, in the calculation, if less than 1/4th is * free) */ - if (btop(vmem_size(heap_arena, VMEM_FREE)) < - (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) + if (vmem_size(heap_arena, VMEM_FREE) < + (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) return (1); #endif -#else /* !sun */ - if (kmem_used() > (kmem_size() * 3) / 4) - return (1); -#endif /* sun */ #else /* !_KERNEL */ if (spa_get_random(100) == 0) @@ -2627,11 +2634,11 @@ clock_t growtime = 0; arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; callb_cpr_t cpr; + int last_needfree = 0; CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); - mutex_enter(&arc_reclaim_thr_lock); - while (arc_thread_exit == 0) { + for (;;) { if (arc_reclaim_needed()) { if (arc_no_grow) { @@ -2646,6 +2653,10 @@ membar_producer(); } + DTRACE_PROBE2(arc__reclaim_strategy, + int, arc_no_grow, + arc_reclaim_strategy_t, last_reclaim); + /* reset the growth delay for every reclaim */ growtime = ddi_get_lbolt() + (arc_grow_retry * hz); @@ -2670,18 +2681,27 @@ if (arc_eviction_list != NULL) arc_do_user_evicts(); + mutex_enter(&arc_reclaim_thr_lock); + if (arc_thread_exit != 0) + break; + + needfree -= last_needfree; + VERIFY3S(needfree, >=, 0); + if (needfree == 0) { + /* + * Block until needed, or one second, + * whichever is shorter. + */ #ifdef _KERNEL - if (needfree) { - needfree = 0; wakeup(&needfree); +#endif + CALLB_CPR_SAFE_BEGIN(&cpr); + (void) cv_timedwait(&arc_reclaim_thr_cv, + &arc_reclaim_thr_lock, hz); + CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); } -#endif - - /* block until needed, or one second, whichever is shorter */ - CALLB_CPR_SAFE_BEGIN(&cpr); - (void) cv_timedwait(&arc_reclaim_thr_cv, - &arc_reclaim_thr_lock, hz); - CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); + last_needfree = needfree; + mutex_exit(&arc_reclaim_thr_lock); } arc_thread_exit = 0; @@ -4067,13 +4087,12 @@ static eventhandler_tag arc_event_lowmem = NULL; static void -arc_lowmem(void *arg __unused, int howto __unused) +arc_lowmem(void *arg __unused, int tries) { - /* Serialize access via arc_lowmem_lock. */ - mutex_enter(&arc_lowmem_lock); + DTRACE_PROBE1(arc__lowmem_signaled, int, needfree); mutex_enter(&arc_reclaim_thr_lock); - needfree = 1; + needfree++; cv_signal(&arc_reclaim_thr_cv); /* @@ -4081,12 +4100,13 @@ * here from ARC itself and may hold ARC locks and thus risk a deadlock * with ARC reclaim thread. */ - if (curproc == pageproc) { + if (tries > 1 && curproc == pageproc) { while (needfree) - msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0); + msleep(&needfree, &arc_reclaim_thr_lock, 0, + "zfs:lowmem", 0); } + mutex_exit(&arc_reclaim_thr_lock); - mutex_exit(&arc_lowmem_lock); } #endif ==== //SpectraBSD/stable/sys/vm/vm_pageout.c#8 (text) ==== @@ -663,7 +663,7 @@ * may acquire locks and/or sleep, so they can only be invoked * when "tries" is greater than zero. */ - EVENTHANDLER_INVOKE(vm_lowmem, 0); + EVENTHANDLER_INVOKE(vm_lowmem, tries); /* * We do this explicitly after the caches have been drained @@ -925,7 +925,7 @@ /* * Decrease registered cache sizes. */ - EVENTHANDLER_INVOKE(vm_lowmem, 0); + EVENTHANDLER_INVOKE(vm_lowmem, pass); /* * We do this explicitly after the caches have been * drained above.