From 7f66ba575724cec7faaa95becba3805dc09a01dd Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Mon, 30 May 2016 12:22:27 -0700 Subject: [PATCH 2/4] Add a helper function for reading per-CPU vmmeter statistics. Use it to read the pdwakeups value. In HEAD, this statistic is maintained per-CPU. --- sys/sys/vmmeter.h | 8 ++++++++ sys/vm/vm_meter.c | 34 +++++++++++++++++++++------------- sys/vm/vm_pageout.c | 10 ++++++---- 3 files changed, 35 insertions(+), 17 deletions(-) diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h index 9337aa3..eae9fce 100644 --- a/sys/sys/vmmeter.h +++ b/sys/sys/vmmeter.h @@ -197,6 +197,14 @@ vm_laundry_target(void) vm_paging_target()); } +/* + * Obtain the value of a per-CPU counter. + */ +#define VM_METER_PCPU_CNT(member) \ + vm_meter_cnt(__offsetof(struct vmmeter, member)) + +u_int vm_meter_cnt(size_t); + #endif /* systemwide totals computed every five seconds */ diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index bb462d3..4bb079c 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -224,29 +224,37 @@ vmtotal(SYSCTL_HANDLER_ARGS) } /* - * vcnt() - accumulate statistics from all cpus and the global cnt - * structure. + * vm_meter_cnt() - accumulate statistics from all cpus and the global cnt + * structure. * * The vmmeter structure is now per-cpu as well as global. Those * statistics which can be kept on a per-cpu basis (to avoid cache * stalls between cpus) can be moved to the per-cpu vmmeter. Remaining * statistics, such as v_free_reserved, are left in the global * structure. - * - * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) */ -static int -vcnt(SYSCTL_HANDLER_ARGS) +u_int +vm_meter_cnt(size_t offset) { - int count = *(int *)arg1; - int offset = (char *)arg1 - (char *)&vm_cnt; + struct pcpu *pcpu; + u_int count; int i; + count = *(u_int *)((char *)&vm_cnt + offset); CPU_FOREACH(i) { - struct pcpu *pcpu = pcpu_find(i); - count += *(int *)((char *)&pcpu->pc_cnt + offset); + pcpu = pcpu_find(i); + count += *(u_int *)((char *)&pcpu->pc_cnt + offset); } - return (SYSCTL_OUT(req, &count, sizeof(int))); + return (count); +} + +static int +cnt_sysctl(SYSCTL_HANDLER_ARGS) +{ + u_int count; + + count = vm_meter_cnt((char *)arg1 - (char *)&vm_cnt); + return (SYSCTL_OUT(req, &count, sizeof(count))); } SYSCTL_PROC(_vm, VM_TOTAL, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE, @@ -261,8 +269,8 @@ SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); #define VM_STATS(parent, var, descr) \ SYSCTL_PROC(parent, OID_AUTO, var, \ - CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &vm_cnt.var, 0, vcnt, \ - "IU", descr) + CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &vm_cnt.var, 0, \ + cnt_sysctl, "IU", descr) #define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr) #define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr) diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index ecb1f9e..0229ea2 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -1110,7 +1110,8 @@ vm_pageout_laundry_worker(void *arg) { struct vm_domain *domain; uint64_t ninact, nlaundry; - int cycle, tcycle, domidx, gen, launder, laundered; + u_int wakeups, gen; + int cycle, tcycle, domidx, launder, laundered; int shortfall, prev_shortfall, target; domidx = (uintptr_t)arg; @@ -1119,7 +1120,7 @@ vm_pageout_laundry_worker(void *arg) vm_pageout_init_marker(&domain->vmd_laundry_marker, PQ_LAUNDRY); cycle = tcycle = 0; - gen = -1; + gen = 0; shortfall = prev_shortfall = 0; target = 0; @@ -1191,13 +1192,14 @@ vm_pageout_laundry_worker(void *arg) ninact = vm_cnt.v_inactive_count; nlaundry = vm_cnt.v_laundry_count; + wakeups = VM_METER_PCPU_CNT(v_pdwakeups); if (ninact > 0 && - vm_cnt.v_pdwakeups != gen && + wakeups != gen && vm_cnt.v_free_count < bkgrd_launder_thresh && nlaundry * bkgrd_launder_ratio >= ninact) { cycle = 0; tcycle = VM_LAUNDER_INTERVAL; - gen = vm_cnt.v_pdwakeups; + gen = wakeups; if (nlaundry >= ninact) target = vm_cnt.v_free_target; else -- 2.8.1