diff --git a/sys/amd64/amd64/genassym.c b/sys/amd64/amd64/genassym.c index 3043bb5..09072c6 100644 --- a/sys/amd64/amd64/genassym.c +++ b/sys/amd64/amd64/genassym.c @@ -95,9 +95,6 @@ ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); ASSYM(TDP_CALLCHAIN, TDP_CALLCHAIN); ASSYM(TDP_KTHREAD, TDP_KTHREAD); -ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); -ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall)); -ASSYM(V_INTR, offsetof(struct vmmeter, v_intr)); ASSYM(KSTACK_PAGES, KSTACK_PAGES); ASSYM(PAGE_SIZE, PAGE_SIZE); ASSYM(NPTEPG, NPTEPG); diff --git a/sys/amd64/amd64/machdep.c b/sys/amd64/amd64/machdep.c index a1c61eb..caeccd6 100644 --- a/sys/amd64/amd64/machdep.c +++ b/sys/amd64/amd64/machdep.c @@ -267,7 +267,7 @@ cpu_startup(dummy) memsize = (uintmax_t)strtoul(sysenv, (char **)NULL, 10) << 10; freeenv(sysenv); } - if (memsize < ptoa((uintmax_t)cnt.v_free_count)) + if (memsize < ptoa((uintmax_t)vmmeter.v_free_count)) memsize = ptoa((uintmax_t)Maxmem); printf("real memory = %ju (%ju MB)\n", memsize, memsize >> 20); @@ -293,8 +293,8 @@ cpu_startup(dummy) vm_ksubmap_init(&kmi); printf("avail memory = %ju (%ju MB)\n", - ptoa((uintmax_t)cnt.v_free_count), - ptoa((uintmax_t)cnt.v_free_count) / 1048576); + ptoa((uintmax_t)vmmeter.v_free_count), + ptoa((uintmax_t)vmmeter.v_free_count) / 1048576); /* * Set up buffers, so they can be used to read disk labels. diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c index e1d373c..5032bb4 100644 --- a/sys/amd64/amd64/pmap.c +++ b/sys/amd64/amd64/pmap.c @@ -1622,7 +1622,7 @@ _pmap_unwire_ptp(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_page_t *free) * the page table page is globally performed before TLB shoot- * down is begun. */ - atomic_subtract_rel_int(&cnt.v_wire_count, 1); + atomic_subtract_rel_int(&vmmeter.v_wire_count, 1); /* * Put page on a list so that it is released after @@ -1773,7 +1773,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) if (_pmap_allocpte(pmap, NUPDE + NUPDPE + pml4index, lockp) == NULL) { --m->wire_count; - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } @@ -1806,7 +1806,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) if (_pmap_allocpte(pmap, NUPDE + pdpindex, lockp) == NULL) { --m->wire_count; - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); vm_page_free_zero(m); return (NULL); } @@ -1820,7 +1820,7 @@ _pmap_allocpte(pmap_t pmap, vm_pindex_t ptepindex, struct rwlock **lockp) if (_pmap_allocpte(pmap, NUPDE + pdpindex, lockp) == NULL) { --m->wire_count; - atomic_subtract_int(&cnt.v_wire_count, + atomic_subtract_int(&vmmeter.v_wire_count, 1); vm_page_free_zero(m); return (NULL); @@ -1947,7 +1947,7 @@ pmap_release(pmap_t pmap) pmap->pm_pml4[PML4PML4I] = 0; /* Recursive Mapping */ m->wire_count--; - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); vm_page_free_zero(m); PMAP_LOCK_DESTROY(pmap); } @@ -2236,7 +2236,7 @@ reclaim_pv_chunk(pmap_t locked_pmap, struct rwlock **lockp) free = (void *)m_pc->object; /* Recycle a freed page table page. */ m_pc->wire_count = 1; - atomic_add_int(&cnt.v_wire_count, 1); + atomic_add_int(&vmmeter.v_wire_count, 1); } pmap_free_zero_pages(free); return (m_pc); @@ -2847,7 +2847,7 @@ pmap_remove_pde(pmap_t pmap, pd_entry_t *pdq, vm_offset_t sva, ("pmap_remove_pde: pte page wire count error")); mpte->wire_count = 0; pmap_add_delayed_free_list(mpte, free, FALSE); - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); } } return (pmap_unuse_pt(pmap, sva, *pmap_pdpe(pmap, sva), free)); @@ -4496,7 +4496,7 @@ pmap_remove_pages(pmap_t pmap) ("pmap_remove_pages: pte page wire count error")); mpte->wire_count = 0; pmap_add_delayed_free_list(mpte, &free, FALSE); - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); } } else { pmap_resident_count_dec(pmap, 1); diff --git a/sys/amd64/amd64/trap.c b/sys/amd64/amd64/trap.c index 3eaf3fd..a2d4b2e 100644 --- a/sys/amd64/amd64/trap.c +++ b/sys/amd64/amd64/trap.c @@ -202,7 +202,7 @@ trap(struct trapframe *frame) register_t addr = 0; ksiginfo_t ksi; - PCPU_INC(cnt.v_trap); + VMMETER_INC(v_trap); type = frame->tf_trapno; #ifdef SMP diff --git a/sys/amd64/amd64/uma_machdep.c b/sys/amd64/amd64/uma_machdep.c index c4ca677..eab79d2 100644 --- a/sys/amd64/amd64/uma_machdep.c +++ b/sys/amd64/amd64/uma_machdep.c @@ -32,6 +32,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -80,5 +81,5 @@ uma_small_free(void *mem, int size, u_int8_t flags) m = PHYS_TO_VM_PAGE(pa); m->wire_count--; vm_page_free(m); - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); } diff --git a/sys/amd64/include/counter.h b/sys/amd64/include/counter.h index b571e70..f3e85f0 100644 --- a/sys/amd64/include/counter.h +++ b/sys/amd64/include/counter.h @@ -31,7 +31,7 @@ #include -extern struct pcpu __pcpu[1]; +extern struct pcpu __pcpu[]; #define counter_enter() do {} while (0) #define counter_exit() do {} while (0) diff --git a/sys/arm/arm/genassym.c b/sys/arm/arm/genassym.c index 4b8f4cc..60cb677 100644 --- a/sys/arm/arm/genassym.c +++ b/sys/arm/arm/genassym.c @@ -87,10 +87,6 @@ ASSYM(CF_L2CACHE_WBINV_ALL, offsetof(struct cpu_functions, cf_l2cache_wbinv_all) ASSYM(CF_TLB_FLUSHID_SE, offsetof(struct cpu_functions, cf_tlb_flushID_SE)); ASSYM(CF_ICACHE_SYNC, offsetof(struct cpu_functions, cf_icache_sync_all)); -ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); -ASSYM(V_SOFT, offsetof(struct vmmeter, v_soft)); -ASSYM(V_INTR, offsetof(struct vmmeter, v_intr)); - ASSYM(TD_PCB, offsetof(struct thread, td_pcb)); ASSYM(TD_FLAGS, offsetof(struct thread, td_flags)); ASSYM(TD_PROC, offsetof(struct thread, td_proc)); diff --git a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c index d949acb..6a42606 100644 --- a/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c +++ b/sys/cddl/contrib/opensolaris/uts/common/fs/zfs/zfs_vnops.c @@ -73,6 +73,7 @@ #include #include #include +#include #include #include @@ -5701,8 +5702,8 @@ zfs_getpages(struct vnode *vp, vm_page_t *m, int count, int reqpage) return (zfs_vm_pagerret_ok); } - PCPU_INC(cnt.v_vnodein); - PCPU_ADD(cnt.v_vnodepgsin, reqsize); + VMMETER_INC(v_vnodein); + VMMETER_ADD(v_vnodepgsin, reqsize); if (IDX_TO_OFF(mreq->pindex) >= object->un_pager.vnp.vnp_size) { for (i = reqstart; i < reqstart + reqsize; i++) { diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c index 28b683a..bf2e6a7 100644 --- a/sys/compat/linprocfs/linprocfs.c +++ b/sys/compat/linprocfs/linprocfs.c @@ -479,17 +479,17 @@ linprocfs_dostat(PFS_FILL_ARGS) } sbuf_printf(sb, "disk 0 0 0 0\n" - "page %u %u\n" - "swap %u %u\n" - "intr %u\n" - "ctxt %u\n" + "page %ju %ju\n" + "swap %ju %ju\n" + "intr %ju\n" + "ctxt %ju\n" "btime %lld\n", - cnt.v_vnodepgsin, - cnt.v_vnodepgsout, - cnt.v_swappgsin, - cnt.v_swappgsout, - cnt.v_intr, - cnt.v_swtch, + (uintmax_t)VMMETER_FETCH(v_vnodepgsin), + (uintmax_t)VMMETER_FETCH(v_vnodepgsout), + (uintmax_t)VMMETER_FETCH(v_swappgsin), + (uintmax_t)VMMETER_FETCH(v_swappgsout), + (uintmax_t)VMMETER_FETCH(v_intr), + (uintmax_t)VMMETER_FETCH(v_swtch), (long long)boottime.tv_sec); return (0); } diff --git a/sys/fs/fuse/fuse_vnops.c b/sys/fs/fuse/fuse_vnops.c index 69c511d..7d96cd5 100644 --- a/sys/fs/fuse/fuse_vnops.c +++ b/sys/fs/fuse/fuse_vnops.c @@ -82,6 +82,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -1783,8 +1784,8 @@ fuse_vnop_getpages(struct vop_getpages_args *ap) kva = (vm_offset_t)bp->b_data; pmap_qenter(kva, pages, npages); - PCPU_INC(cnt.v_vnodein); - PCPU_ADD(cnt.v_vnodepgsin, npages); + VMMETER_INC(v_vnodein); + VMMETER_ADD(v_vnodepgsin, npages); iov.iov_base = (caddr_t)kva; iov.iov_len = count; @@ -1952,8 +1953,8 @@ fuse_vnop_putpages(struct vop_putpages_args *ap) kva = (vm_offset_t)bp->b_data; pmap_qenter(kva, pages, npages); - PCPU_INC(cnt.v_vnodeout); - PCPU_ADD(cnt.v_vnodepgsout, count); + VMMETER_INC(v_vnodeout); + VMMETER_ADD(v_vnodepgsout, count); iov.iov_base = (caddr_t)kva; iov.iov_len = count; diff --git a/sys/fs/msdosfs/msdosfs_denode.c b/sys/fs/msdosfs/msdosfs_denode.c index 501604a..2120cfe 100644 --- a/sys/fs/msdosfs/msdosfs_denode.c +++ b/sys/fs/msdosfs/msdosfs_denode.c @@ -55,6 +55,7 @@ #include #include #include +#include #include #include diff --git a/sys/fs/msdosfs/msdosfs_vnops.c b/sys/fs/msdosfs/msdosfs_vnops.c index da9a5df..12ad579 100644 --- a/sys/fs/msdosfs/msdosfs_vnops.c +++ b/sys/fs/msdosfs/msdosfs_vnops.c @@ -63,6 +63,7 @@ #include #include #include +#include #include #include diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c index 47c2f35..663576c 100644 --- a/sys/fs/nfsclient/nfs_clbio.c +++ b/sys/fs/nfsclient/nfs_clbio.c @@ -157,8 +157,8 @@ ncl_getpages(struct vop_getpages_args *ap) kva = (vm_offset_t) bp->b_data; pmap_qenter(kva, pages, npages); - PCPU_INC(cnt.v_vnodein); - PCPU_ADD(cnt.v_vnodepgsin, npages); + VMMETER_INC(v_vnodein); + VMMETER_ADD(v_vnodepgsin, npages); iov.iov_base = (caddr_t) kva; iov.iov_len = count; @@ -308,8 +308,8 @@ ncl_putpages(struct vop_putpages_args *ap) kva = (vm_offset_t) bp->b_data; pmap_qenter(kva, pages, npages); - PCPU_INC(cnt.v_vnodeout); - PCPU_ADD(cnt.v_vnodepgsout, count); + VMMETER_INC(v_vnodeout); + VMMETER_ADD(v_vnodepgsout, count); iov.iov_base = (caddr_t) kva; iov.iov_len = count; diff --git a/sys/i386/i386/genassym.c b/sys/i386/i386/genassym.c index 2394c09..acc059f 100644 --- a/sys/i386/i386/genassym.c +++ b/sys/i386/i386/genassym.c @@ -98,9 +98,6 @@ ASSYM(MD_LDT, offsetof(struct mdproc, md_ldt)); ASSYM(TDF_ASTPENDING, TDF_ASTPENDING); ASSYM(TDF_NEEDRESCHED, TDF_NEEDRESCHED); -ASSYM(V_TRAP, offsetof(struct vmmeter, v_trap)); -ASSYM(V_SYSCALL, offsetof(struct vmmeter, v_syscall)); -ASSYM(V_INTR, offsetof(struct vmmeter, v_intr)); /* ASSYM(UPAGES, UPAGES);*/ ASSYM(KSTACK_PAGES, KSTACK_PAGES); ASSYM(PAGE_SIZE, PAGE_SIZE); diff --git a/sys/kern/init_main.c b/sys/kern/init_main.c index 5d5346e..a7a5085 100644 --- a/sys/kern/init_main.c +++ b/sys/kern/init_main.c @@ -549,7 +549,7 @@ proc0_init(void *dummy __unused) p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; p->p_limit->pl_rlimit[RLIMIT_STACK].rlim_max = maxssiz; /* Cast to avoid overflow on i386/PAE. */ - pageablemem = ptoa((vm_paddr_t)cnt.v_free_count); + pageablemem = ptoa((vm_paddr_t)vmmeter.v_free_count); p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_cur = p->p_limit->pl_rlimit[RLIMIT_RSS].rlim_max = pageablemem; p->p_limit->pl_rlimit[RLIMIT_MEMLOCK].rlim_cur = pageablemem / 3; diff --git a/sys/kern/kern_fork.c b/sys/kern/kern_fork.c index 741665f..10e38af 100644 --- a/sys/kern/kern_fork.c +++ b/sys/kern/kern_fork.c @@ -635,20 +635,20 @@ do_fork(struct thread *td, int flags, struct proc *p2, struct thread *td2, vm_forkproc(td, p2, td2, vm2, flags); if (flags == (RFFDG | RFPROC)) { - PCPU_INC(cnt.v_forks); - PCPU_ADD(cnt.v_forkpages, p2->p_vmspace->vm_dsize + + VMMETER_INC(v_forks); + VMMETER_ADD(v_forkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (flags == (RFFDG | RFPROC | RFPPWAIT | RFMEM)) { - PCPU_INC(cnt.v_vforks); - PCPU_ADD(cnt.v_vforkpages, p2->p_vmspace->vm_dsize + + VMMETER_INC(v_vforks); + VMMETER_ADD(v_vforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else if (p1 == &proc0) { - PCPU_INC(cnt.v_kthreads); - PCPU_ADD(cnt.v_kthreadpages, p2->p_vmspace->vm_dsize + + VMMETER_INC(v_kthreads); + VMMETER_ADD(v_kthreadpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } else { - PCPU_INC(cnt.v_rforks); - PCPU_ADD(cnt.v_rforkpages, p2->p_vmspace->vm_dsize + + VMMETER_INC(v_rforks); + VMMETER_ADD(v_rforkpages, p2->p_vmspace->vm_dsize + p2->p_vmspace->vm_ssize); } diff --git a/sys/kern/kern_intr.c b/sys/kern/kern_intr.c index 63d8469..fbe36c5 100644 --- a/sys/kern/kern_intr.c +++ b/sys/kern/kern_intr.c @@ -1158,7 +1158,7 @@ swi_sched(void *cookie, int flags) atomic_store_rel_int(&ih->ih_need, 1); if (!(flags & SWI_DELAY)) { - PCPU_INC(cnt.v_soft); + VMMETER_INC(v_soft); #ifdef INTR_FILTER error = intr_event_schedule_thread(ie, ie->ie_thread); #else diff --git a/sys/kern/kern_malloc.c b/sys/kern/kern_malloc.c index 05fd5e5..e49c748 100644 --- a/sys/kern/kern_malloc.c +++ b/sys/kern/kern_malloc.c @@ -705,7 +705,7 @@ kmeminit(void *dummy) * so make sure that there is enough space. */ vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; - mem_size = cnt.v_page_count; + mem_size = vmmeter.v_page_count; #if defined(VM_KMEM_SIZE_SCALE) vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; @@ -796,7 +796,7 @@ malloc_init(void *data) struct malloc_type_internal *mtip; struct malloc_type *mtp; - KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init")); + KASSERT(vmmeter.v_page_count != 0, ("malloc_register before vm_init")); mtp = data; if (mtp->ks_magic != M_MAGIC) diff --git a/sys/kern/kern_mib.c b/sys/kern/kern_mib.c index c84d4b2..bdc8c33 100644 --- a/sys/kern/kern_mib.c +++ b/sys/kern/kern_mib.c @@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include SYSCTL_NODE(, 0, sysctl, CTLFLAG_RW, 0, @@ -199,7 +200,7 @@ sysctl_hw_usermem(SYSCTL_HANDLER_ARGS) { u_long val; - val = ctob(physmem - cnt.v_wire_count); + val = ctob(physmem - vmmeter.v_wire_count); return (sysctl_handle_long(oidp, &val, 0, req)); } diff --git a/sys/kern/kern_synch.c b/sys/kern/kern_synch.c index 84b1c61..256018d 100644 --- a/sys/kern/kern_synch.c +++ b/sys/kern/kern_synch.c @@ -467,7 +467,7 @@ mi_switch(int flags, struct thread *newtd) td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); td->td_generation++; /* bump preempt-detect counter */ - PCPU_INC(cnt.v_swtch); + VMMETER_INC(v_swtch); PCPU_SET(switchticks, ticks); CTR4(KTR_PROC, "mi_switch: old thread %ld (td_sched %p, pid %ld, %s)", td->td_tid, td->td_sched, p->p_pid, td->td_name); diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index 5da4866..a9d1f20 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -50,6 +50,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #ifdef HWPMC_HOOKS #include @@ -479,7 +480,7 @@ thread_exit(void) td->td_incruntime += runtime; PCPU_SET(switchtime, new_switchtime); PCPU_SET(switchticks, ticks); - PCPU_INC(cnt.v_swtch); + VMMETER_INC(v_swtch); /* Save our resource usage in our process. */ td->td_ru.ru_nvcsw++; diff --git a/sys/kern/subr_syscall.c b/sys/kern/subr_syscall.c index 3d6dc5a..1711875 100644 --- a/sys/kern/subr_syscall.c +++ b/sys/kern/subr_syscall.c @@ -58,7 +58,7 @@ syscallenter(struct thread *td, struct syscall_args *sa) struct proc *p; int error, traced; - PCPU_INC(cnt.v_syscall); + VMMETER_INC(v_syscall); p = td->td_proc; td->td_pticks = 0; diff --git a/sys/kern/subr_trap.c b/sys/kern/subr_trap.c index 19729a4..b8b05af 100644 --- a/sys/kern/subr_trap.c +++ b/sys/kern/subr_trap.c @@ -215,7 +215,7 @@ ast(struct trapframe *framep) td->td_flags &= ~(TDF_ASTPENDING | TDF_NEEDSIGCHK | TDF_NEEDSUSPCHK | TDF_NEEDRESCHED | TDF_ALRMPEND | TDF_PROFPEND | TDF_MACPEND); thread_unlock(td); - PCPU_INC(cnt.v_trap); + VMMETER_INC(v_trap); if (td->td_ucred != p->p_ucred) cred_update_thread(td); diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c index f6d5a85..d77575d 100644 --- a/sys/kern/vfs_bio.c +++ b/sys/kern/vfs_bio.c @@ -4270,7 +4270,7 @@ vm_hold_free_pages(struct buf *bp, int newbsize) (intmax_t)bp->b_blkno, (intmax_t)bp->b_lblkno); p->wire_count--; vm_page_free(p); - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); } bp->b_npages = newnpages; } diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index e64f379..526fc02 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -330,8 +330,8 @@ vntblinit(void *dummy __unused) * size. The memory required by desiredvnodes vnodes and vm objects * may not exceed one seventh of the kernel's heap size. */ - physvnodes = maxproc + cnt.v_page_count / 16 + 3 * min(98304 * 4, - cnt.v_page_count) / 16; + physvnodes = maxproc + vmmeter.v_page_count / 16 + 3 * min(98304 * 4, + vmmeter.v_page_count) / 16; virtvnodes = vm_kmem_size / (7 * (sizeof(struct vm_object) + sizeof(struct vnode))); desiredvnodes = min(physvnodes, virtvnodes); @@ -708,7 +708,7 @@ vlrureclaim(struct mount *mp) usevnodes = desiredvnodes; if (usevnodes <= 0) usevnodes = 1; - trigger = cnt.v_page_count * 2 / usevnodes; + trigger = vmmeter.v_page_count * 2 / usevnodes; done = 0; vn_start_write(NULL, &mp, V_WAIT); MNT_ILOCK(mp); diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c index 630a7ff..21daccb 100644 --- a/sys/nfsclient/nfs_bio.c +++ b/sys/nfsclient/nfs_bio.c @@ -151,8 +151,8 @@ nfs_getpages(struct vop_getpages_args *ap) kva = (vm_offset_t) bp->b_data; pmap_qenter(kva, pages, npages); - PCPU_INC(cnt.v_vnodein); - PCPU_ADD(cnt.v_vnodepgsin, npages); + VMMETER_INC(v_vnodein); + VMMETER_ADD(v_vnodepgsin, npages); iov.iov_base = (caddr_t) kva; iov.iov_len = count; @@ -302,8 +302,8 @@ nfs_putpages(struct vop_putpages_args *ap) kva = (vm_offset_t) bp->b_data; pmap_qenter(kva, pages, npages); - PCPU_INC(cnt.v_vnodeout); - PCPU_ADD(cnt.v_vnodepgsout, count); + VMMETER_INC(v_vnodeout); + VMMETER_ADD(v_vnodepgsout, count); iov.iov_base = (caddr_t) kva; iov.iov_len = count; diff --git a/sys/sys/pcpu.h b/sys/sys/pcpu.h index c73cc53..17da861 100644 --- a/sys/sys/pcpu.h +++ b/sys/sys/pcpu.h @@ -43,7 +43,6 @@ #include #include #include -#include #include #include @@ -158,7 +157,6 @@ struct pcpu { u_int pc_cpuid; /* This cpu number */ STAILQ_ENTRY(pcpu) pc_allcpu; struct lock_list_entry *pc_spinlocks; - struct vmmeter pc_cnt; /* VM stats counters */ long pc_cp_time[CPUSTATES]; /* statclock ticks */ struct device *pc_device; void *pc_netisr; /* netisr SWI cookie */ @@ -185,7 +183,7 @@ struct pcpu { * To minimize memory waste in per-cpu UMA zones, size of struct pcpu * should be denominator of PAGE_SIZE. */ -CTASSERT((PAGE_SIZE / sizeof(struct pcpu)) * sizeof(struct pcpu) == PAGE_SIZE); +// CTASSERT((PAGE_SIZE / sizeof(struct pcpu)) * sizeof(struct pcpu) == PAGE_SIZE); #endif #ifdef _KERNEL diff --git a/sys/sys/vmmeter.h b/sys/sys/vmmeter.h index 59b430d..e006f2e 100644 --- a/sys/sys/vmmeter.h +++ b/sys/sys/vmmeter.h @@ -33,6 +33,8 @@ #ifndef _SYS_VMMETER_H_ #define _SYS_VMMETER_H_ +#include + /* * This value is used by ps(1) to change sleep state flag from 'S' to * 'I' and by the sched process to set the alarm clock. @@ -45,78 +47,82 @@ * a - locked by atomic operations * c - constant after initialization * f - locked by vm_page_queue_free_mtx - * p - locked by being in the PCPU and atomicity respect to interrupts + * p - uses per-cpu counter_u64_t * q - changes are synchronized by the corresponding vm_pagequeue lock */ struct vmmeter { /* * General system activity. */ - u_int v_swtch; /* (p) context switches */ - u_int v_trap; /* (p) calls to trap */ - u_int v_syscall; /* (p) calls to syscall() */ - u_int v_intr; /* (p) device interrupts */ - u_int v_soft; /* (p) software interrupts */ + counter_u64_t v_swtch; /* (p) context switches */ + counter_u64_t v_trap; /* (p) calls to trap */ + counter_u64_t v_syscall; /* (p) calls to syscall() */ + counter_u64_t v_intr; /* (p) device interrupts */ + counter_u64_t v_soft; /* (p) software interrupts */ /* - * Virtual memory activity. + * Fork/vfork/rfork activity. */ - u_int v_vm_faults; /* (p) address memory faults */ - u_int v_io_faults; /* (p) page faults requiring I/O */ - u_int v_cow_faults; /* (p) copy-on-writes faults */ - u_int v_cow_optim; /* (p) optimized copy-on-writes faults */ - u_int v_zfod; /* (p) pages zero filled on demand */ - u_int v_ozfod; /* (p) optimized zero fill pages */ - u_int v_swapin; /* (p) swap pager pageins */ - u_int v_swapout; /* (p) swap pager pageouts */ - u_int v_swappgsin; /* (p) swap pager pages paged in */ - u_int v_swappgsout; /* (p) swap pager pages paged out */ - u_int v_vnodein; /* (p) vnode pager pageins */ - u_int v_vnodeout; /* (p) vnode pager pageouts */ - u_int v_vnodepgsin; /* (p) vnode_pager pages paged in */ - u_int v_vnodepgsout; /* (p) vnode pager pages paged out */ - u_int v_intrans; /* (p) intransit blocking page faults */ - u_int v_reactivated; /* (f) pages reactivated from free list */ - u_int v_pdwakeups; /* (f) times daemon has awaken from sleep */ - u_int v_pdpages; /* (p) pages analyzed by daemon */ - - u_int v_tcached; /* (p) total pages cached */ - u_int v_dfree; /* (p) pages freed by daemon */ - u_int v_pfree; /* (p) pages freed by exiting processes */ - u_int v_tfree; /* (p) total pages freed */ + counter_u64_t v_forks; /* (p) fork() calls */ + counter_u64_t v_vforks; /* (p) vfork() calls */ + counter_u64_t v_rforks; /* (p) rfork() calls */ + counter_u64_t v_kthreads; /* (p) fork() calls by kernel */ + counter_u64_t v_forkpages; /* (p) VM pages affected by fork() */ + counter_u64_t v_vforkpages; /* (p) VM pages affected by vfork() */ + counter_u64_t v_rforkpages; /* (p) VM pages affected by rfork() */ + counter_u64_t v_kthreadpages; /* (p) VM pages affected by fork() by kernel */ /* - * Distribution of page usages. + * Virtual memory activity. */ - u_int v_page_size; /* (c) page size in bytes */ - u_int v_page_count; /* (c) total number of pages in system */ - u_int v_free_reserved; /* (c) pages reserved for deadlock */ - u_int v_free_target; /* (c) pages desired free */ - u_int v_free_min; /* (c) pages desired free */ - u_int v_free_count; /* (f) pages free */ - u_int v_wire_count; /* (a) pages wired down */ - u_int v_active_count; /* (q) pages active */ - u_int v_inactive_target; /* (c) pages desired inactive */ - u_int v_inactive_count; /* (q) pages inactive */ - u_int v_cache_count; /* (f) pages on cache queue */ - u_int v_cache_min; /* (c) min pages desired on cache queue */ - u_int v_cache_max; /* (c) max pages in cached obj */ - u_int v_pageout_free_min; /* (c) min pages reserved for kernel */ - u_int v_interrupt_free_min; /* (c) reserved pages for int code */ - u_int v_free_severe; /* (c) severe page depletion point */ + counter_u64_t v_vm_faults; /* (p) address memory faults */ + counter_u64_t v_io_faults; /* (p) page faults requiring I/O */ + counter_u64_t v_cow_faults; /* (p) copy-on-writes faults */ + counter_u64_t v_cow_optim; /* (p) optimized copy-on-writes faults */ + counter_u64_t v_zfod; /* (p) pages zero filled on demand */ + counter_u64_t v_ozfod; /* (p) optimized zero fill pages */ + counter_u64_t v_swapin; /* (p) swap pager pageins */ + counter_u64_t v_swapout; /* (p) swap pager pageouts */ + counter_u64_t v_swappgsin; /* (p) swap pager pages paged in */ + counter_u64_t v_swappgsout; /* (p) swap pager pages paged out */ + counter_u64_t v_vnodein; /* (p) vnode pager pageins */ + counter_u64_t v_vnodeout; /* (p) vnode pager pageouts */ + counter_u64_t v_vnodepgsin; /* (p) vnode_pager pages paged in */ + counter_u64_t v_vnodepgsout; /* (p) vnode pager pages paged out */ + counter_u64_t v_intrans; /* (p) intransit blocking page faults */ + u_int v_reactivated; /* (f) pages reactivated from free list */ + u_int v_pdwakeups; /* (f) times daemon has awaken from sleep */ + counter_u64_t v_pdpages; /* (p) pages analyzed by daemon */ + + counter_u64_t v_tcached; /* (p) total pages cached */ + counter_u64_t v_dfree; /* (p) pages freed by daemon */ + counter_u64_t v_pfree; /* (p) pages freed by exiting processes */ + counter_u64_t v_tfree; /* (p) total pages freed */ /* - * Fork/vfork/rfork activity. + * Distribution of page usages. */ - u_int v_forks; /* (p) fork() calls */ - u_int v_vforks; /* (p) vfork() calls */ - u_int v_rforks; /* (p) rfork() calls */ - u_int v_kthreads; /* (p) fork() calls by kernel */ - u_int v_forkpages; /* (p) VM pages affected by fork() */ - u_int v_vforkpages; /* (p) VM pages affected by vfork() */ - u_int v_rforkpages; /* (p) VM pages affected by rfork() */ - u_int v_kthreadpages; /* (p) VM pages affected by fork() by kernel */ + u_int v_page_size; /* (c) page size in bytes */ + u_int v_page_count; /* (c) total number of pages in system */ + u_int v_free_reserved; /* (c) pages reserved for deadlock */ + u_int v_free_target; /* (c) pages desired free */ + u_int v_free_min; /* (c) pages desired free */ + u_int v_free_count; /* (f) pages free */ + u_int v_wire_count; /* (a) pages wired down */ + u_int v_active_count; /* (q) pages active */ + u_int v_inactive_target; /* (c) pages desired inactive */ + u_int v_inactive_count; /* (q) pages inactive */ + u_int v_cache_count; /* (f) pages on cache queue */ + u_int v_cache_min; /* (c) min pages desired on cache queue */ + u_int v_cache_max; /* (c) max pages in cached obj */ + u_int v_pageout_free_min; /* (c) min pages reserved for kernel */ + u_int v_interrupt_free_min; /* (c) reserved pages for int code */ + u_int v_free_severe; /* (c) severe page depletion point */ }; #ifdef _KERNEL -extern struct vmmeter cnt; +extern struct vmmeter vmmeter; + +#define VMMETER_ADD(var, x) counter_u64_add(vmmeter.var, x) +#define VMMETER_INC(var) VMMETER_ADD(var, 1) +#define VMMETER_FETCH(var) counter_u64_fetch(vmmeter.var) /* * Return TRUE if we are under our severe low-free-pages threshold @@ -129,7 +135,8 @@ static __inline int vm_page_count_severe(void) { - return (cnt.v_free_severe > (cnt.v_free_count + cnt.v_cache_count)); + return (vmmeter.v_free_severe > + (vmmeter.v_free_count + vmmeter.v_cache_count)); } /* @@ -146,7 +153,8 @@ static __inline int vm_page_count_min(void) { - return (cnt.v_free_min > (cnt.v_free_count + cnt.v_cache_count)); + return (vmmeter.v_free_min > + (vmmeter.v_free_count + vmmeter.v_cache_count)); } /* @@ -158,7 +166,8 @@ static __inline int vm_page_count_target(void) { - return (cnt.v_free_target > (cnt.v_free_count + cnt.v_cache_count)); + return (vmmeter.v_free_target > + (vmmeter.v_free_count + vmmeter.v_cache_count)); } /* @@ -170,10 +179,10 @@ static __inline int vm_paging_target(void) { - return ( - (cnt.v_free_target + cnt.v_cache_min) - - (cnt.v_free_count + cnt.v_cache_count) - ); + return ( + (vmmeter.v_free_target + vmmeter.v_cache_min) - + (vmmeter.v_free_count + vmmeter.v_cache_count) + ); } /* @@ -184,10 +193,10 @@ static __inline int vm_paging_needed(void) { - return ( - (cnt.v_free_reserved + cnt.v_cache_min) > - (cnt.v_free_count + cnt.v_cache_count) - ); + return ( + (vmmeter.v_free_reserved + vmmeter.v_cache_min) > + (vmmeter.v_free_count + vmmeter.v_cache_count) + ); } #endif diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c index 57f092c..d8cf1f8 100644 --- a/sys/ufs/ffs/ffs_vfsops.c +++ b/sys/ufs/ffs/ffs_vfsops.c @@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include diff --git a/sys/vm/swap_pager.c b/sys/vm/swap_pager.c index d5e942e..b402767 100644 --- a/sys/vm/swap_pager.c +++ b/sys/vm/swap_pager.c @@ -203,7 +203,8 @@ swap_reserve_by_cred(vm_ooffset_t incr, struct ucred *cred) mtx_lock(&sw_dev_mtx); r = swap_reserved + incr; if (overcommit & SWAP_RESERVE_ALLOW_NONWIRED) { - s = cnt.v_page_count - cnt.v_free_reserved - cnt.v_wire_count; + s = vmmeter.v_page_count - vmmeter.v_free_reserved - + vmmeter.v_wire_count; s *= PAGE_SIZE; } else s = 0; @@ -545,7 +546,7 @@ swap_pager_swap_init(void) * can hold 16 pages, so this is probably overkill. This reservation * is typically limited to around 32MB by default. */ - n = cnt.v_page_count / 2; + n = vmmeter.v_page_count / 2; if (maxswzone && n > maxswzone / sizeof(struct swblock)) n = maxswzone / sizeof(struct swblock); n2 = n; @@ -1188,8 +1189,8 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) } bp->b_npages = j - i; - PCPU_INC(cnt.v_swapin); - PCPU_ADD(cnt.v_swappgsin, bp->b_npages); + VMMETER_INC(v_swapin); + VMMETER_ADD(v_swappgsin, bp->b_npages); /* * We still hold the lock on mreq, and our automatic completion routine @@ -1220,7 +1221,7 @@ swap_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage) VM_OBJECT_WLOCK(object); while ((mreq->oflags & VPO_SWAPINPROG) != 0) { mreq->oflags |= VPO_WANTED; - PCPU_INC(cnt.v_intrans); + VMMETER_INC(v_intrans); if (VM_OBJECT_SLEEP(object, mreq, PSWP, "swread", hz * 20)) { printf( "swap_pager: indefinite wait buffer: bufobj: %p, blkno: %jd, size: %ld\n", @@ -1408,8 +1409,8 @@ swap_pager_putpages(vm_object_t object, vm_page_t *m, int count, bp->b_dirtyoff = 0; bp->b_dirtyend = bp->b_bcount; - PCPU_INC(cnt.v_swapout); - PCPU_ADD(cnt.v_swappgsout, bp->b_npages); + VMMETER_INC(v_swapout); + VMMETER_ADD(v_swappgsout, bp->b_npages); /* * asynchronous @@ -2311,7 +2312,7 @@ swapoff_one(struct swdevt *sp, struct ucred *cred) * of data we will have to page back in, plus an epsilon so * the system doesn't become critically low on swap space. */ - if (cnt.v_free_count + cnt.v_cache_count + swap_pager_avail < + if (vmmeter.v_free_count + vmmeter.v_cache_count + swap_pager_avail < nblks + nswap_lowat) { return (ENOMEM); } diff --git a/sys/vm/vm_fault.c b/sys/vm/vm_fault.c index d7cc651..25f7605 100644 --- a/sys/vm/vm_fault.c +++ b/sys/vm/vm_fault.c @@ -135,6 +135,10 @@ struct faultstate { struct vnode *vp; }; +extern int vmmeter_use_early_counters; +extern uint64_t early_vm_faults; +extern uint64_t early_io_faults; + static void vm_fault_cache_behind(const struct faultstate *fs, int distance); static inline void @@ -248,7 +252,10 @@ vm_fault_hold(vm_map_t map, vm_offset_t vaddr, vm_prot_t fault_type, hardfault = 0; growstack = TRUE; - PCPU_INC(cnt.v_vm_faults); + if (__predict_false(!vmmeter_use_early_counters)) + VMMETER_INC(v_vm_faults); + else + early_vm_faults++; fs.vp = NULL; faultcount = reqpage = 0; @@ -384,7 +391,7 @@ RetryFault:; } vm_object_pip_wakeup(fs.object); VM_OBJECT_WUNLOCK(fs.object); - PCPU_INC(cnt.v_intrans); + VMMETER_INC(v_intrans); vm_object_deallocate(fs.first_object); goto RetryFault; } @@ -661,9 +668,9 @@ vnode_locked: if ((fs.m->flags & PG_ZERO) == 0) { pmap_zero_page(fs.m); } else { - PCPU_INC(cnt.v_ozfod); + VMMETER_INC(v_ozfod); } - PCPU_INC(cnt.v_zfod); + VMMETER_INC(v_zfod); fs.m->valid = VM_PAGE_BITS_ALL; break; /* break to PAGE HAS BEEN FOUND */ } else { @@ -747,7 +754,7 @@ vnode_locked: vm_page_busy(fs.m); fs.first_m = fs.m; fs.m = NULL; - PCPU_INC(cnt.v_cow_optim); + VMMETER_INC(v_cow_optim); } else { /* * Oh, well, lets copy it. @@ -783,7 +790,7 @@ vnode_locked: fs.m = fs.first_m; if (!is_first_object_locked) VM_OBJECT_WLOCK(fs.object); - PCPU_INC(cnt.v_cow_faults); + VMMETER_INC(v_cow_faults); curthread->td_cow++; } else { prot &= ~VM_PROT_WRITE; @@ -940,7 +947,10 @@ vnode_locked: */ unlock_and_deallocate(&fs); if (hardfault) { - PCPU_INC(cnt.v_io_faults); + if (__predict_false(!vmmeter_use_early_counters)) + VMMETER_INC(v_io_faults); + else + early_io_faults++; curthread->td_ru.ru_majflt++; } else curthread->td_ru.ru_minflt++; diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index 1fee839..35c94bc 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -303,7 +303,7 @@ vmspace_alloc(min, max) void vm_init2(void) { - uma_zone_reserve_kva(kmapentzone, lmin(cnt.v_page_count, + uma_zone_reserve_kva(kmapentzone, lmin(vmmeter.v_page_count, (VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / PAGE_SIZE) / 8 + maxproc * 2 + maxfiles); vmspace_zone = uma_zcreate("VMSPACE", sizeof(struct vmspace), NULL, @@ -1847,7 +1847,7 @@ vm_map_pmap_enter(vm_map_t map, vm_offset_t addr, vm_prot_t prot, * free pages allocating pv entries. */ if ((flags & MAP_PREFAULT_MADVISE) && - cnt.v_free_count < cnt.v_free_reserved) { + vmmeter.v_free_count < vmmeter.v_free_reserved) { psize = tmpidx; break; } diff --git a/sys/vm/vm_meter.c b/sys/vm/vm_meter.c index 713a2be..bac5692 100644 --- a/sys/vm/vm_meter.c +++ b/sys/vm/vm_meter.c @@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -53,24 +54,72 @@ __FBSDID("$FreeBSD$"); #include #include -struct vmmeter cnt; +int vmmeter_use_early_counters = 1; + +uint64_t early_vm_faults = 0; +uint64_t early_io_faults = 0; + +struct vmmeter vmmeter; + +static void +vmcounter_startup(void) +{ + + vmmeter.v_swtch = counter_u64_alloc(M_WAITOK); + vmmeter.v_trap = counter_u64_alloc(M_WAITOK); + vmmeter.v_syscall = counter_u64_alloc(M_WAITOK); + vmmeter.v_intr = counter_u64_alloc(M_WAITOK); + vmmeter.v_soft = counter_u64_alloc(M_WAITOK); + vmmeter.v_forks = counter_u64_alloc(M_WAITOK); + vmmeter.v_vforks = counter_u64_alloc(M_WAITOK); + vmmeter.v_rforks = counter_u64_alloc(M_WAITOK); + vmmeter.v_kthreads = counter_u64_alloc(M_WAITOK); + vmmeter.v_forkpages = counter_u64_alloc(M_WAITOK); + vmmeter.v_vforkpages = counter_u64_alloc(M_WAITOK); + vmmeter.v_rforkpages = counter_u64_alloc(M_WAITOK); + vmmeter.v_kthreadpages = counter_u64_alloc(M_WAITOK); + vmmeter.v_vm_faults = counter_u64_alloc(M_WAITOK); + vmmeter.v_io_faults = counter_u64_alloc(M_WAITOK); + vmmeter.v_cow_faults = counter_u64_alloc(M_WAITOK); + vmmeter.v_cow_optim = counter_u64_alloc(M_WAITOK); + vmmeter.v_zfod = counter_u64_alloc(M_WAITOK); + vmmeter.v_ozfod = counter_u64_alloc(M_WAITOK); + vmmeter.v_swapin = counter_u64_alloc(M_WAITOK); + vmmeter.v_swapout = counter_u64_alloc(M_WAITOK); + vmmeter.v_swappgsin = counter_u64_alloc(M_WAITOK); + vmmeter.v_swappgsout = counter_u64_alloc(M_WAITOK); + vmmeter.v_vnodein = counter_u64_alloc(M_WAITOK); + vmmeter.v_vnodeout = counter_u64_alloc(M_WAITOK); + vmmeter.v_vnodepgsin = counter_u64_alloc(M_WAITOK); + vmmeter.v_vnodepgsout = counter_u64_alloc(M_WAITOK); + vmmeter.v_intrans = counter_u64_alloc(M_WAITOK); + vmmeter.v_pdpages = counter_u64_alloc(M_WAITOK); + vmmeter.v_tcached = counter_u64_alloc(M_WAITOK); + vmmeter.v_dfree = counter_u64_alloc(M_WAITOK); + vmmeter.v_pfree = counter_u64_alloc(M_WAITOK); + vmmeter.v_tfree = counter_u64_alloc(M_WAITOK); + + counter_u64_add(vmmeter.v_vm_faults, early_vm_faults); + counter_u64_add(vmmeter.v_io_faults, early_io_faults); +} +SYSINIT(counter, SI_SUB_CPU, SI_ORDER_FOURTH + 1, vmcounter_startup, NULL); SYSCTL_UINT(_vm, VM_V_FREE_MIN, v_free_min, - CTLFLAG_RW, &cnt.v_free_min, 0, "Minimum low-free-pages threshold"); + CTLFLAG_RW, &vmmeter.v_free_min, 0, "Minimum low-free-pages threshold"); SYSCTL_UINT(_vm, VM_V_FREE_TARGET, v_free_target, - CTLFLAG_RW, &cnt.v_free_target, 0, "Desired free pages"); + CTLFLAG_RW, &vmmeter.v_free_target, 0, "Desired free pages"); SYSCTL_UINT(_vm, VM_V_FREE_RESERVED, v_free_reserved, - CTLFLAG_RW, &cnt.v_free_reserved, 0, "Pages reserved for deadlock"); + CTLFLAG_RW, &vmmeter.v_free_reserved, 0, "Pages reserved for deadlock"); SYSCTL_UINT(_vm, VM_V_INACTIVE_TARGET, v_inactive_target, - CTLFLAG_RW, &cnt.v_inactive_target, 0, "Pages desired inactive"); + CTLFLAG_RW, &vmmeter.v_inactive_target, 0, "Pages desired inactive"); SYSCTL_UINT(_vm, VM_V_CACHE_MIN, v_cache_min, - CTLFLAG_RW, &cnt.v_cache_min, 0, "Min pages on cache queue"); + CTLFLAG_RW, &vmmeter.v_cache_min, 0, "Min pages on cache queue"); SYSCTL_UINT(_vm, VM_V_CACHE_MAX, v_cache_max, - CTLFLAG_RW, &cnt.v_cache_max, 0, "Max pages on cache queue"); + CTLFLAG_RW, &vmmeter.v_cache_max, 0, "Max pages on cache queue"); SYSCTL_UINT(_vm, VM_V_PAGEOUT_FREE_MIN, v_pageout_free_min, - CTLFLAG_RW, &cnt.v_pageout_free_min, 0, "Min pages reserved for kernel"); + CTLFLAG_RW, &vmmeter.v_pageout_free_min, 0, "Min pages reserved for kernel"); SYSCTL_UINT(_vm, OID_AUTO, v_free_severe, - CTLFLAG_RW, &cnt.v_free_severe, 0, "Severe page depletion point"); + CTLFLAG_RW, &vmmeter.v_free_severe, 0, "Severe page depletion point"); static int sysctl_vm_loadavg(SYSCTL_HANDLER_ARGS) @@ -231,36 +280,10 @@ vmtotal(SYSCTL_HANDLER_ARGS) } } mtx_unlock(&vm_object_list_mtx); - total.t_free = cnt.v_free_count + cnt.v_cache_count; + total.t_free = vmmeter.v_free_count + vmmeter.v_cache_count; return (sysctl_handle_opaque(oidp, &total, sizeof(total), req)); } -/* - * vcnt() - accumulate statistics from all cpus and the global cnt - * structure. - * - * The vmmeter structure is now per-cpu as well as global. Those - * statistics which can be kept on a per-cpu basis (to avoid cache - * stalls between cpus) can be moved to the per-cpu vmmeter. Remaining - * statistics, such as v_free_reserved, are left in the global - * structure. - * - * (sysctl_oid *oidp, void *arg1, int arg2, struct sysctl_req *req) - */ -static int -vcnt(SYSCTL_HANDLER_ARGS) -{ - int count = *(int *)arg1; - int offset = (char *)arg1 - (char *)&cnt; - int i; - - CPU_FOREACH(i) { - struct pcpu *pcpu = pcpu_find(i); - count += *(int *)((char *)&pcpu->pc_cnt + offset); - } - return (SYSCTL_OUT(req, &count, sizeof(int))); -} - SYSCTL_PROC(_vm, VM_TOTAL, vmtotal, CTLTYPE_OPAQUE|CTLFLAG_RD|CTLFLAG_MPSAFE, 0, sizeof(struct vmtotal), vmtotal, "S,vmtotal", "System virtual memory statistics"); @@ -272,39 +295,44 @@ static SYSCTL_NODE(_vm_stats, OID_AUTO, vm, CTLFLAG_RW, 0, SYSCTL_NODE(_vm_stats, OID_AUTO, misc, CTLFLAG_RW, 0, "VM meter misc stats"); #define VM_STATS(parent, var, descr) \ - SYSCTL_PROC(parent, OID_AUTO, var, \ - CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, &cnt.var, 0, vcnt, \ - "IU", descr) + SYSCTL_UINT(parent, OID_AUTO, var, CTLFLAG_RD, &vmmeter.var, 0, descr) #define VM_STATS_VM(var, descr) VM_STATS(_vm_stats_vm, var, descr) #define VM_STATS_SYS(var, descr) VM_STATS(_vm_stats_sys, var, descr) -VM_STATS_SYS(v_swtch, "Context switches"); -VM_STATS_SYS(v_trap, "Traps"); -VM_STATS_SYS(v_syscall, "System calls"); -VM_STATS_SYS(v_intr, "Device interrupts"); -VM_STATS_SYS(v_soft, "Software interrupts"); -VM_STATS_VM(v_vm_faults, "Address memory faults"); -VM_STATS_VM(v_io_faults, "Page faults requiring I/O"); -VM_STATS_VM(v_cow_faults, "Copy-on-write faults"); -VM_STATS_VM(v_cow_optim, "Optimized COW faults"); -VM_STATS_VM(v_zfod, "Pages zero-filled on demand"); -VM_STATS_VM(v_ozfod, "Optimized zero fill pages"); -VM_STATS_VM(v_swapin, "Swap pager pageins"); -VM_STATS_VM(v_swapout, "Swap pager pageouts"); -VM_STATS_VM(v_swappgsin, "Swap pages swapped in"); -VM_STATS_VM(v_swappgsout, "Swap pages swapped out"); -VM_STATS_VM(v_vnodein, "Vnode pager pageins"); -VM_STATS_VM(v_vnodeout, "Vnode pager pageouts"); -VM_STATS_VM(v_vnodepgsin, "Vnode pages paged in"); -VM_STATS_VM(v_vnodepgsout, "Vnode pages paged out"); -VM_STATS_VM(v_intrans, "In transit page faults"); +#define VM_STATS_COUNTER(parent, var, descr) \ + SYSCTL_COUNTER_U64(parent, OID_AUTO, var, CTLFLAG_RD, &vmmeter.var, 0, descr) +#define VM_STATS_COUNTER_SYS(var, descr) \ + VM_STATS_COUNTER(_vm_stats_sys, var, descr) +#define VM_STATS_COUNTER_VM(var, descr) \ + VM_STATS_COUNTER(_vm_stats_vm, var, descr) + +VM_STATS_COUNTER_SYS(v_swtch, "Context switches"); +VM_STATS_COUNTER_SYS(v_trap, "Traps"); +VM_STATS_COUNTER_SYS(v_syscall, "System calls"); +VM_STATS_COUNTER_SYS(v_intr, "Device interrupts"); +VM_STATS_COUNTER_SYS(v_soft, "Software interrupts"); +VM_STATS_COUNTER_SYS(v_vm_faults, "Address memory faults"); +VM_STATS_COUNTER_SYS(v_io_faults, "Page faults requiring I/O"); +VM_STATS_COUNTER_SYS(v_cow_faults, "Copy-on-write faults"); +VM_STATS_COUNTER_SYS(v_cow_optim, "Optimized COW faults"); +VM_STATS_COUNTER_SYS(v_zfod, "Pages zero-filled on demand"); +VM_STATS_COUNTER_SYS(v_ozfod, "Optimized zero fill pages"); +VM_STATS_COUNTER_SYS(v_swapin, "Swap pager pageins"); +VM_STATS_COUNTER_SYS(v_swapout, "Swap pager pageouts"); +VM_STATS_COUNTER_SYS(v_swappgsin, "Swap pages swapped in"); +VM_STATS_COUNTER_SYS(v_swappgsout, "Swap pages swapped out"); +VM_STATS_COUNTER_SYS(v_vnodein, "Vnode pager pageins"); +VM_STATS_COUNTER_SYS(v_vnodeout, "Vnode pager pageouts"); +VM_STATS_COUNTER_SYS(v_vnodepgsin, "Vnode pages paged in"); +VM_STATS_COUNTER_SYS(v_vnodepgsout, "Vnode pages paged out"); +VM_STATS_COUNTER_SYS(v_intrans, "In transit page faults"); VM_STATS_VM(v_reactivated, "Pages reactivated from free list"); VM_STATS_VM(v_pdwakeups, "Pagedaemon wakeups"); -VM_STATS_VM(v_pdpages, "Pages analyzed by pagedaemon"); -VM_STATS_VM(v_tcached, "Total pages cached"); -VM_STATS_VM(v_dfree, "Pages freed by pagedaemon"); -VM_STATS_VM(v_pfree, "Pages freed by exiting processes"); -VM_STATS_VM(v_tfree, "Total pages freed"); +VM_STATS_COUNTER_SYS(v_pdpages, "Pages analyzed by pagedaemon"); +VM_STATS_COUNTER_SYS(v_tcached, "Total pages cached"); +VM_STATS_COUNTER_SYS(v_dfree, "Pages freed by pagedaemon"); +VM_STATS_COUNTER_SYS(v_pfree, "Pages freed by exiting processes"); +VM_STATS_COUNTER_SYS(v_tfree, "Total pages freed"); VM_STATS_VM(v_page_size, "Page size in bytes"); VM_STATS_VM(v_page_count, "Total number of pages in system"); VM_STATS_VM(v_free_reserved, "Pages reserved for deadlock"); @@ -320,14 +348,14 @@ VM_STATS_VM(v_cache_min, "Min pages on cache queue"); VM_STATS_VM(v_cache_max, "Max pages on cached queue"); VM_STATS_VM(v_pageout_free_min, "Min pages reserved for kernel"); VM_STATS_VM(v_interrupt_free_min, "Reserved pages for interrupt code"); -VM_STATS_VM(v_forks, "Number of fork() calls"); -VM_STATS_VM(v_vforks, "Number of vfork() calls"); -VM_STATS_VM(v_rforks, "Number of rfork() calls"); -VM_STATS_VM(v_kthreads, "Number of fork() calls by kernel"); -VM_STATS_VM(v_forkpages, "VM pages affected by fork()"); -VM_STATS_VM(v_vforkpages, "VM pages affected by vfork()"); -VM_STATS_VM(v_rforkpages, "VM pages affected by rfork()"); -VM_STATS_VM(v_kthreadpages, "VM pages affected by fork() by kernel"); +VM_STATS_COUNTER_VM(v_forks, "Number of fork() calls"); +VM_STATS_COUNTER_VM(v_vforks, "Number of vfork() calls"); +VM_STATS_COUNTER_VM(v_rforks, "Number of rfork() calls"); +VM_STATS_COUNTER_VM(v_kthreads, "Number of fork() calls by kernel"); +VM_STATS_COUNTER_VM(v_forkpages, "VM pages affected by fork()"); +VM_STATS_COUNTER_VM(v_vforkpages, "VM pages affected by vfork()"); +VM_STATS_COUNTER_VM(v_rforkpages, "VM pages affected by rfork()"); +VM_STATS_COUNTER_VM(v_kthreadpages, "VM pages affected by fork() by kernel"); SYSCTL_INT(_vm_stats_misc, OID_AUTO, zero_page_count, CTLFLAG_RD, &vm_page_zero_count, 0, "Number of zero-ed free pages"); diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index c7e22c3..caca656 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -1070,7 +1070,7 @@ vm_mlock(struct proc *proc, struct ucred *cred, const void *addr0, size_t len) return (ENOMEM); } PROC_UNLOCK(proc); - if (npages + cnt.v_wire_count > vm_page_max_wired) + if (npages + vmmeter.v_wire_count > vm_page_max_wired) return (EAGAIN); #ifdef RACCT PROC_LOCK(proc); diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index 1c20ca6..3a6ede6 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -757,7 +757,7 @@ vm_object_terminate(vm_object_t object) p->object = NULL; if (p->wire_count == 0) { vm_page_free(p); - PCPU_INC(cnt.v_pfree); + VMMETER_INC(v_pfree); } vm_page_unlock(p); } @@ -1925,7 +1925,7 @@ again: ("inconsistent wire count %d %d %p", p->wire_count, wirings, p)); p->wire_count = 0; - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); } } vm_page_free(p); diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index a47209f..8ac4006 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -128,13 +128,13 @@ struct vm_pagequeue vm_pagequeues[PQ_COUNT] = { [PQ_INACTIVE] = { .pq_pl = TAILQ_HEAD_INITIALIZER( vm_pagequeues[PQ_INACTIVE].pq_pl), - .pq_cnt = &cnt.v_inactive_count, + .pq_cnt = &vmmeter.v_inactive_count, .pq_name = "vm inactive pagequeue" }, [PQ_ACTIVE] = { .pq_pl = TAILQ_HEAD_INITIALIZER( vm_pagequeues[PQ_ACTIVE].pq_pl), - .pq_cnt = &cnt.v_active_count, + .pq_cnt = &vmmeter.v_active_count, .pq_name = "vm active pagequeue" } }; @@ -220,9 +220,9 @@ vm_page_pa_tryrelock(pmap_t pmap, vm_paddr_t pa, vm_paddr_t *locked) void vm_set_page_size(void) { - if (cnt.v_page_size == 0) - cnt.v_page_size = PAGE_SIZE; - if (((cnt.v_page_size - 1) & cnt.v_page_size) != 0) + if (vmmeter.v_page_size == 0) + vmmeter.v_page_size = PAGE_SIZE; + if (((vmmeter.v_page_size - 1) & vmmeter.v_page_size) != 0) panic("vm_set_page_size: page size not a power of two"); } @@ -436,8 +436,8 @@ vm_page_startup(vm_offset_t vaddr) * Add every available physical page that is not blacklisted to * the free lists. */ - cnt.v_page_count = 0; - cnt.v_free_count = 0; + vmmeter.v_page_count = 0; + vmmeter.v_free_count = 0; list = getenv("vm.blacklist"); for (i = 0; phys_avail[i + 1] != 0; i += 2) { pa = phys_avail[i]; @@ -1054,8 +1054,8 @@ vm_page_cache_free(vm_object_t object, vm_pindex_t start, vm_pindex_t end) m->flags ^= PG_CACHED | PG_FREE; KASSERT((m->flags & (PG_CACHED | PG_FREE)) == PG_FREE, ("vm_page_cache_free: page %p has inconsistent flags", m)); - cnt.v_cache_count--; - cnt.v_free_count++; + vmmeter.v_cache_count--; + vmmeter.v_free_count++; } empty = vm_radix_is_empty(&object->cache); mtx_unlock(&vm_page_queue_free_mtx); @@ -1092,7 +1092,7 @@ vm_page_cache_remove(vm_page_t m) ("vm_page_cache_remove: page %p is not cached", m)); vm_radix_remove(&m->object->cache, m->pindex); m->object = NULL; - cnt.v_cache_count--; + vmmeter.v_cache_count--; } /* @@ -1224,11 +1224,13 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) ("vm_page_alloc: pindex already allocated")); } mtx_lock(&vm_page_queue_free_mtx); - if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || + if (vmmeter.v_free_count + vmmeter.v_cache_count > + vmmeter.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || + vmmeter.v_free_count + vmmeter.v_cache_count > + vmmeter.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count > 0)) { + vmmeter.v_free_count + vmmeter.v_cache_count > 0)) { /* * Allocate from the free queue if the number of free pages * exceeds the minimum for the request class. @@ -1298,7 +1300,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) KASSERT(m->valid != 0, ("vm_page_alloc: cached page %p is invalid", m)); if (m->object == object && m->pindex == pindex) - cnt.v_reactivated++; + vmmeter.v_reactivated++; else m->valid = 0; m_object = m->object; @@ -1311,7 +1313,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) ("vm_page_alloc: page %p is not free", m)); KASSERT(m->valid == 0, ("vm_page_alloc: free page %p is valid", m)); - cnt.v_free_count--; + vmmeter.v_free_count--; } /* @@ -1338,7 +1340,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) * The page lock is not required for wiring a page until that * page is inserted into the object. */ - atomic_add_int(&cnt.v_wire_count, 1); + atomic_add_int(&vmmeter.v_wire_count, 1); m->wire_count = 1; } m->act_count = 0; @@ -1437,11 +1439,11 @@ vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, deferred_vdrop_list = NULL; mtx_lock(&vm_page_queue_free_mtx); - if (cnt.v_free_count + cnt.v_cache_count >= npages + - cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count >= npages + - cnt.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count >= npages)) { + if (vmmeter.v_free_count + vmmeter.v_cache_count >= npages + + vmmeter.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && + vmmeter.v_free_count + vmmeter.v_cache_count >= npages + + vmmeter.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && + vmmeter.v_free_count + vmmeter.v_cache_count >= npages)) { #if VM_NRESERVLEVEL > 0 retry: if (object == NULL || (object->flags & OBJ_COLORED) == 0 || @@ -1492,7 +1494,7 @@ retry: if ((req & VM_ALLOC_NODUMP) != 0) flags |= PG_NODUMP; if ((req & VM_ALLOC_WIRED) != 0) - atomic_add_int(&cnt.v_wire_count, npages); + atomic_add_int(&vmmeter.v_wire_count, npages); oflags = VPO_UNMANAGED; if (object != NULL) { if ((req & VM_ALLOC_NOBUSY) == 0) @@ -1569,7 +1571,7 @@ vm_page_alloc_init(vm_page_t m) ("vm_page_alloc_init: page %p is not free", m)); KASSERT(m->valid == 0, ("vm_page_alloc_init: free page %p is valid", m)); - cnt.v_free_count--; + vmmeter.v_free_count--; if ((m->flags & PG_ZERO) != 0) vm_page_zero_count--; } @@ -1618,11 +1620,13 @@ vm_page_alloc_freelist(int flind, int req) * Do not allocate reserved pages unless the req has asked for it. */ mtx_lock(&vm_page_queue_free_mtx); - if (cnt.v_free_count + cnt.v_cache_count > cnt.v_free_reserved || + if (vmmeter.v_free_count + vmmeter.v_cache_count > + vmmeter.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && - cnt.v_free_count + cnt.v_cache_count > cnt.v_interrupt_free_min) || + vmmeter.v_free_count + vmmeter.v_cache_count > + vmmeter.v_interrupt_free_min) || (req_class == VM_ALLOC_INTERRUPT && - cnt.v_free_count + cnt.v_cache_count > 0)) + vmmeter.v_free_count + vmmeter.v_cache_count > 0)) m = vm_phys_alloc_freelist_pages(flind, VM_FREEPOOL_DIRECT, 0); else { mtx_unlock(&vm_page_queue_free_mtx); @@ -1651,7 +1655,7 @@ vm_page_alloc_freelist(int flind, int req) * The page lock is not required for wiring a page that does * not belong to an object. */ - atomic_add_int(&cnt.v_wire_count, 1); + atomic_add_int(&vmmeter.v_wire_count, 1); m->wire_count = 1; } /* Unmanaged pages don't use "act_count". */ @@ -1683,7 +1687,7 @@ vm_wait(void) vm_pages_needed = 1; wakeup(&vm_pages_needed); } - msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, + msleep(&vmmeter.v_free_count, &vm_page_queue_free_mtx, PDROP | PVM, "vmwait", 0); } } @@ -1707,7 +1711,7 @@ vm_waitpfault(void) vm_pages_needed = 1; wakeup(&vm_pages_needed); } - msleep(&cnt.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, + msleep(&vmmeter.v_free_count, &vm_page_queue_free_mtx, PDROP | PUSER, "pfault", 0); } @@ -1867,7 +1871,8 @@ vm_page_free_wakeup(void) * some free. */ if (vm_pageout_pages_needed && - cnt.v_cache_count + cnt.v_free_count >= cnt.v_pageout_free_min) { + vmmeter.v_cache_count + vmmeter.v_free_count >= + vmmeter.v_pageout_free_min) { wakeup(&vm_pageout_pages_needed); vm_pageout_pages_needed = 0; } @@ -1878,7 +1883,7 @@ vm_page_free_wakeup(void) */ if (vm_pages_needed && !vm_page_count_min()) { vm_pages_needed = 0; - wakeup(&cnt.v_free_count); + wakeup(&vmmeter.v_free_count); } } @@ -1901,7 +1906,7 @@ vm_page_free_toq(vm_page_t m) } else KASSERT(m->queue == PQ_NONE, ("vm_page_free_toq: unmanaged page %p is queued", m)); - PCPU_INC(cnt.v_tfree); + VMMETER_INC(v_tfree); if (VM_PAGE_IS_FREE(m)) panic("vm_page_free: freeing free page %p", m); @@ -1948,7 +1953,7 @@ vm_page_free_toq(vm_page_t m) */ mtx_lock(&vm_page_queue_free_mtx); m->flags |= PG_FREE; - cnt.v_free_count++; + vmmeter.v_free_count++; #if VM_NRESERVLEVEL > 0 if (!vm_reserv_free_page(m)) #else @@ -1996,7 +2001,7 @@ vm_page_wire(vm_page_t m) m->queue == PQ_NONE, ("vm_page_wire: unmanaged page %p is queued", m)); vm_page_remque(m); - atomic_add_int(&cnt.v_wire_count, 1); + atomic_add_int(&vmmeter.v_wire_count, 1); } m->wire_count++; KASSERT(m->wire_count != 0, ("vm_page_wire: wire_count overflow m=%p", m)); @@ -2032,7 +2037,7 @@ vm_page_unwire(vm_page_t m, int activate) if (m->wire_count > 0) { m->wire_count--; if (m->wire_count == 0) { - atomic_subtract_int(&cnt.v_wire_count, 1); + atomic_subtract_int(&vmmeter.v_wire_count, 1); if ((m->oflags & VPO_UNMANAGED) != 0 || m->object == NULL) return; @@ -2088,7 +2093,7 @@ _vm_page_deactivate(vm_page_t m, int athead) TAILQ_INSERT_HEAD(&pq->pq_pl, m, pageq); else TAILQ_INSERT_TAIL(&pq->pq_pl, m, pageq); - cnt.v_inactive_count++; + vmmeter.v_inactive_count++; vm_pagequeue_unlock(pq); } } @@ -2184,7 +2189,7 @@ vm_page_cache(vm_page_t m) } KASSERT((m->flags & PG_CACHED) == 0, ("vm_page_cache: page %p is already cached", m)); - PCPU_INC(cnt.v_tcached); + VMMETER_INC(v_tcached); /* * Remove the page from the paging queues. @@ -2212,7 +2217,7 @@ vm_page_cache(vm_page_t m) m->flags &= ~PG_ZERO; mtx_lock(&vm_page_queue_free_mtx); m->flags |= PG_CACHED; - cnt.v_cache_count++; + vmmeter.v_cache_count++; cache_was_empty = vm_radix_is_empty(&object->cache); vm_radix_insert(&object->cache, m); #if VM_NRESERVLEVEL > 0 @@ -2876,27 +2881,27 @@ vm_page_object_lock_assert(vm_page_t m) DB_SHOW_COMMAND(page, vm_page_print_page_info) { - db_printf("cnt.v_free_count: %d\n", cnt.v_free_count); - db_printf("cnt.v_cache_count: %d\n", cnt.v_cache_count); - db_printf("cnt.v_inactive_count: %d\n", cnt.v_inactive_count); - db_printf("cnt.v_active_count: %d\n", cnt.v_active_count); - db_printf("cnt.v_wire_count: %d\n", cnt.v_wire_count); - db_printf("cnt.v_free_reserved: %d\n", cnt.v_free_reserved); - db_printf("cnt.v_free_min: %d\n", cnt.v_free_min); - db_printf("cnt.v_free_target: %d\n", cnt.v_free_target); - db_printf("cnt.v_cache_min: %d\n", cnt.v_cache_min); - db_printf("cnt.v_inactive_target: %d\n", cnt.v_inactive_target); + db_printf("vmmeter.v_free_count: %d\n", vmmeter.v_free_count); + db_printf("vmmeter.v_cache_count: %d\n", vmmeter.v_cache_count); + db_printf("vmmeter.v_inactive_count: %d\n", vmmeter.v_inactive_count); + db_printf("vmmeter.v_active_count: %d\n", vmmeter.v_active_count); + db_printf("vmmeter.v_wire_count: %d\n", vmmeter.v_wire_count); + db_printf("vmmeter.v_free_reserved: %d\n", vmmeter.v_free_reserved); + db_printf("vmmeter.v_free_min: %d\n", vmmeter.v_free_min); + db_printf("vmmeter.v_free_target: %d\n", vmmeter.v_free_target); + db_printf("vmmeter.v_cache_min: %d\n", vmmeter.v_cache_min); + db_printf("vmmeter.v_inactive_target: %d\n", vmmeter.v_inactive_target); } DB_SHOW_COMMAND(pageq, vm_page_print_pageq_info) { db_printf("PQ_FREE:"); - db_printf(" %d", cnt.v_free_count); + db_printf(" %d", vmmeter.v_free_count); db_printf("\n"); db_printf("PQ_CACHE:"); - db_printf(" %d", cnt.v_cache_count); + db_printf(" %d", vmmeter.v_cache_count); db_printf("\n"); db_printf("PQ_ACTIVE: %d, PQ_INACTIVE: %d\n", diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index 6d6e626..ab5df84 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -682,9 +682,9 @@ vm_pageout_grow_cache(int tries, vm_paddr_t low, vm_paddr_t high) uma_reclaim(); } inactl = 0; - inactmax = cnt.v_inactive_count; + inactmax = vmmeter.v_inactive_count; actl = 0; - actmax = tries < 2 ? 0 : cnt.v_active_count; + actmax = tries < 2 ? 0 : vmmeter.v_active_count; again: if (inactl < inactmax && vm_pageout_launder(PQ_INACTIVE, tries, low, high)) { @@ -736,7 +736,7 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object, goto unlock_return; if ((p->oflags & VPO_BUSY) != 0 || p->busy != 0) continue; - PCPU_INC(cnt.v_pdpages); + VMMETER_INC(v_pdpages); vm_page_lock(p); if (p->wire_count != 0 || p->hold_count != 0 || !pmap_page_exists_quick(pmap, p)) { @@ -894,7 +894,7 @@ vm_pageout_scan(int pass) /* * The addl_page_shortage is the number of temporarily * stuck pages in the inactive queue. In other words, the - * number of pages from cnt.v_inactive_count that should be + * number of pages from vmmeter.v_inactive_count that should be * discounted in setting the target for the active queue scan. */ addl_page_shortage = atomic_readandclear_int(&vm_pageout_deficit); @@ -920,7 +920,7 @@ vm_pageout_scan(int pass) if (pass) maxlaunder = 10000; - maxscan = cnt.v_inactive_count; + maxscan = vmmeter.v_inactive_count; /* * Start scanning the inactive queue for pages we can move to the @@ -939,7 +939,7 @@ vm_pageout_scan(int pass) KASSERT(queues_locked, ("unlocked queues")); KASSERT(m->queue == PQ_INACTIVE, ("Inactive queue %p", m)); - PCPU_INC(cnt.v_pdpages); + VMMETER_INC(v_pdpages); next = TAILQ_NEXT(m, pageq); /* @@ -1044,7 +1044,7 @@ vm_pageout_scan(int pass) /* * Held pages are essentially stuck in the * queue. So, they ought to be discounted - * from cnt.v_inactive_count. See the + * from vmmeter.v_inactive_count. See the * calculation of the page_shortage for the * loop over the active queue below. */ @@ -1069,7 +1069,7 @@ vm_pageout_scan(int pass) * Invalid pages can be easily freed */ vm_page_free(m); - PCPU_INC(cnt.v_dfree); + VMMETER_INC(v_dfree); --page_shortage; } else if (m->dirty == 0) { /* @@ -1268,7 +1268,7 @@ relock_queues: * active queue to the inactive queue. */ page_shortage = vm_paging_target() + - cnt.v_inactive_target - cnt.v_inactive_count; + vmmeter.v_inactive_target - vmmeter.v_inactive_count; page_shortage += addl_page_shortage; /* @@ -1276,7 +1276,7 @@ relock_queues: * track the per-page activity counter and use it to locate * deactivation candidates. */ - pcount = cnt.v_active_count; + pcount = vmmeter.v_active_count; pq = &vm_pagequeues[PQ_ACTIVE]; vm_pagequeue_lock(pq); m = TAILQ_FIRST(&pq->pq_pl); @@ -1325,7 +1325,7 @@ relock_queues: * The count for pagedaemon pages is done after checking the * page for eligibility... */ - PCPU_INC(cnt.v_pdpages); + VMMETER_INC(v_pdpages); /* * Check to see "how much" the page has been used. @@ -1511,7 +1511,7 @@ vm_pageout_oom(int shortage) killproc(bigproc, "out of swap space"); sched_nice(bigproc, PRIO_MIN); PROC_UNLOCK(bigproc); - wakeup(&cnt.v_free_count); + wakeup(&vmmeter.v_free_count); } } @@ -1532,18 +1532,20 @@ vm_pageout_page_stats(void) int page_shortage; page_shortage = - (cnt.v_inactive_target + cnt.v_cache_max + cnt.v_free_min) - - (cnt.v_free_count + cnt.v_inactive_count + cnt.v_cache_count); + (vmmeter.v_inactive_target + vmmeter.v_cache_max + + vmmeter.v_free_min) - + (vmmeter.v_free_count + vmmeter.v_inactive_count + + vmmeter.v_cache_count); if (page_shortage <= 0) return; - pcount = cnt.v_active_count; + pcount = vmmeter.v_active_count; fullintervalcount += vm_pageout_stats_interval; if (fullintervalcount < vm_pageout_full_stats_interval) { vm_pageout_stats++; - tpcount = (int64_t)vm_pageout_stats_max * cnt.v_active_count / - cnt.v_page_count; + tpcount = (int64_t)vm_pageout_stats_max * + vmmeter.v_active_count / vmmeter.v_page_count; if (pcount > tpcount) pcount = tpcount; } else { @@ -1643,8 +1645,8 @@ vm_pageout(void) /* * Initialize some paging parameters. */ - cnt.v_interrupt_free_min = 2; - if (cnt.v_page_count < 2000) + vmmeter.v_interrupt_free_min = 2; + if (vmmeter.v_page_count < 2000) vm_pageout_page_count = 8; /* @@ -1652,17 +1654,17 @@ vm_pageout(void) * swap pager structures plus enough for any pv_entry structs * when paging. */ - if (cnt.v_page_count > 1024) - cnt.v_free_min = 4 + (cnt.v_page_count - 1024) / 200; + if (vmmeter.v_page_count > 1024) + vmmeter.v_free_min = 4 + (vmmeter.v_page_count - 1024) / 200; else - cnt.v_free_min = 4; - cnt.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + - cnt.v_interrupt_free_min; - cnt.v_free_reserved = vm_pageout_page_count + - cnt.v_pageout_free_min + (cnt.v_page_count / 768); - cnt.v_free_severe = cnt.v_free_min / 2; - cnt.v_free_min += cnt.v_free_reserved; - cnt.v_free_severe += cnt.v_free_reserved; + vmmeter.v_free_min = 4; + vmmeter.v_pageout_free_min = (2*MAXBSIZE)/PAGE_SIZE + + vmmeter.v_interrupt_free_min; + vmmeter.v_free_reserved = vm_pageout_page_count + + vmmeter.v_pageout_free_min + (vmmeter.v_page_count / 768); + vmmeter.v_free_severe = vmmeter.v_free_min / 2; + vmmeter.v_free_min += vmmeter.v_free_reserved; + vmmeter.v_free_severe += vmmeter.v_free_reserved; /* * v_free_target and v_cache_min control pageout hysteresis. Note @@ -1675,29 +1677,31 @@ vm_pageout(void) * be big enough to handle memory needs while the pageout daemon * is signalled and run to free more pages. */ - if (cnt.v_free_count > 6144) - cnt.v_free_target = 4 * cnt.v_free_min + cnt.v_free_reserved; + if (vmmeter.v_free_count > 6144) + vmmeter.v_free_target = 4 * vmmeter.v_free_min + + vmmeter.v_free_reserved; else - cnt.v_free_target = 2 * cnt.v_free_min + cnt.v_free_reserved; + vmmeter.v_free_target = 2 * vmmeter.v_free_min + + vmmeter.v_free_reserved; - if (cnt.v_free_count > 2048) { - cnt.v_cache_min = cnt.v_free_target; - cnt.v_cache_max = 2 * cnt.v_cache_min; - cnt.v_inactive_target = (3 * cnt.v_free_target) / 2; + if (vmmeter.v_free_count > 2048) { + vmmeter.v_cache_min = vmmeter.v_free_target; + vmmeter.v_cache_max = 2 * vmmeter.v_cache_min; + vmmeter.v_inactive_target = (3 * vmmeter.v_free_target) / 2; } else { - cnt.v_cache_min = 0; - cnt.v_cache_max = 0; - cnt.v_inactive_target = cnt.v_free_count / 4; + vmmeter.v_cache_min = 0; + vmmeter.v_cache_max = 0; + vmmeter.v_inactive_target = vmmeter.v_free_count / 4; } - if (cnt.v_inactive_target > cnt.v_free_count / 3) - cnt.v_inactive_target = cnt.v_free_count / 3; + if (vmmeter.v_inactive_target > vmmeter.v_free_count / 3) + vmmeter.v_inactive_target = vmmeter.v_free_count / 3; /* XXX does not really belong here */ if (vm_page_max_wired == 0) - vm_page_max_wired = cnt.v_free_count / 3; + vm_page_max_wired = vmmeter.v_free_count / 3; if (vm_pageout_stats_max == 0) - vm_pageout_stats_max = cnt.v_free_target; + vm_pageout_stats_max = vmmeter.v_free_target; /* * Set interval in seconds for stats scan. @@ -1723,7 +1727,7 @@ vm_pageout(void) if (vm_pages_needed && !vm_page_count_min()) { if (!vm_paging_needed()) vm_pages_needed = 0; - wakeup(&cnt.v_free_count); + wakeup(&vmmeter.v_free_count); } if (vm_pages_needed) { /* @@ -1756,7 +1760,7 @@ vm_pageout(void) } } if (vm_pages_needed) - cnt.v_pdwakeups++; + vmmeter.v_pdwakeups++; mtx_unlock(&vm_page_queue_free_mtx); vm_pageout_scan(pass); } @@ -1765,7 +1769,7 @@ vm_pageout(void) /* * Unless the free page queue lock is held by the caller, this function * should be regarded as advisory. Specifically, the caller should - * not msleep() on &cnt.v_free_count following this function unless + * not msleep() on &vmmeter.v_free_count following this function unless * the free page queue lock is held until the msleep() is performed. */ void diff --git a/sys/vm/vm_phys.c b/sys/vm/vm_phys.c index 66f3b0c..a7cf3c3 100644 --- a/sys/vm/vm_phys.c +++ b/sys/vm/vm_phys.c @@ -379,7 +379,7 @@ vm_phys_add_page(vm_paddr_t pa) { vm_page_t m; - cnt.v_page_count++; + vmmeter.v_page_count++; m = vm_phys_paddr_to_vm_page(pa); m->phys_addr = pa; m->queue = PQ_NONE; @@ -391,7 +391,7 @@ vm_phys_add_page(vm_paddr_t pa) m->pool = VM_FREEPOOL_DEFAULT; pmap_page_init(m); mtx_lock(&vm_page_queue_free_mtx); - cnt.v_free_count++; + vmmeter.v_free_count++; vm_phys_free_pages(m, 0); mtx_unlock(&vm_page_queue_free_mtx); } @@ -813,12 +813,12 @@ vm_phys_zero_pages_idle(void) for (m_tmp = m; m_tmp < &m[1 << oind]; m_tmp++) { if ((m_tmp->flags & (PG_CACHED | PG_ZERO)) == 0) { vm_phys_unfree_page(m_tmp); - cnt.v_free_count--; + vmmeter.v_free_count--; mtx_unlock(&vm_page_queue_free_mtx); pmap_zero_page_idle(m_tmp); m_tmp->flags |= PG_ZERO; mtx_lock(&vm_page_queue_free_mtx); - cnt.v_free_count++; + vmmeter.v_free_count++; vm_phys_free_pages(m_tmp, 0); vm_page_zero_count++; cnt_prezero++; diff --git a/sys/vm/vm_radix.c b/sys/vm/vm_radix.c index 113a226..c36c2b7 100644 --- a/sys/vm/vm_radix.c +++ b/sys/vm/vm_radix.c @@ -320,7 +320,7 @@ vm_radix_prealloc(void *arg __unused) * Calculate the number of reserved nodes, discounting the pages that * are needed to store them. */ - nodes = ((vm_paddr_t)cnt.v_page_count * PAGE_SIZE) / (PAGE_SIZE + + nodes = ((vm_paddr_t)vmmeter.v_page_count * PAGE_SIZE) / (PAGE_SIZE + sizeof(struct vm_radix_node)); if (!uma_zone_reserve_kva(vm_radix_node_zone, nodes)) panic("%s: unable to create new zone", __func__); diff --git a/sys/vm/vm_zeroidle.c b/sys/vm/vm_zeroidle.c index 6ba96e1..0d3c9ad 100644 --- a/sys/vm/vm_zeroidle.c +++ b/sys/vm/vm_zeroidle.c @@ -83,9 +83,9 @@ vm_page_zero_check(void) * fast sleeps. We also do not want to be continuously zeroing * pages because doing so may flush our L1 and L2 caches too much. */ - if (zero_state && vm_page_zero_count >= ZIDLE_LO(cnt.v_free_count)) + if (zero_state && vm_page_zero_count >= ZIDLE_LO(vmmeter.v_free_count)) return (0); - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + if (vm_page_zero_count >= ZIDLE_HI(vmmeter.v_free_count)) return (0); return (1); } @@ -97,7 +97,7 @@ vm_page_zero_idle(void) mtx_assert(&vm_page_queue_free_mtx, MA_OWNED); zero_state = 0; if (vm_phys_zero_pages_idle()) { - if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) + if (vm_page_zero_count >= ZIDLE_HI(vmmeter.v_free_count)) zero_state = 1; } } diff --git a/sys/vm/vnode_pager.c b/sys/vm/vnode_pager.c index fa92768..153252c 100644 --- a/sys/vm/vnode_pager.c +++ b/sys/vm/vnode_pager.c @@ -738,8 +738,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) vm_page_free(m[i]); vm_page_unlock(m[i]); } - PCPU_INC(cnt.v_vnodein); - PCPU_INC(cnt.v_vnodepgsin); + VMMETER_INC(v_vnodein); + VMMETER_INC(v_vnodepgsin); error = vnode_pager_input_old(object, m[reqpage]); VM_OBJECT_WUNLOCK(object); return (error); @@ -769,8 +769,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) vm_page_unlock(m[i]); } VM_OBJECT_WUNLOCK(object); - PCPU_INC(cnt.v_vnodein); - PCPU_INC(cnt.v_vnodepgsin); + VMMETER_INC(v_vnodein); + VMMETER_INC(v_vnodepgsin); return vnode_pager_input_smlfs(object, m[reqpage]); } @@ -940,8 +940,8 @@ vnode_pager_generic_getpages(vp, m, bytecount, reqpage) bp->b_runningbufspace = bp->b_bufsize; atomic_add_long(&runningbufspace, bp->b_runningbufspace); - PCPU_INC(cnt.v_vnodein); - PCPU_ADD(cnt.v_vnodepgsin, count); + VMMETER_INC(v_vnodein); + VMMETER_ADD(v_vnodepgsin, count); /* do the input */ bp->b_iooffset = dbtob(bp->b_blkno); @@ -1052,7 +1052,8 @@ vnode_pager_putpages(object, m, count, sync, rtvals) * daemon up. This should be probably be addressed XXX. */ - if ((cnt.v_free_count + cnt.v_cache_count) < cnt.v_pageout_free_min) + if ((vmmeter.v_free_count + vmmeter.v_cache_count) < + vmmeter.v_pageout_free_min) sync |= OBJPC_SYNC; /* @@ -1184,8 +1185,8 @@ vnode_pager_generic_putpages(struct vnode *vp, vm_page_t *ma, int bytecount, auio.uio_resid = maxsize; auio.uio_td = (struct thread *) 0; error = VOP_WRITE(vp, &auio, ioflags, curthread->td_ucred); - PCPU_INC(cnt.v_vnodeout); - PCPU_ADD(cnt.v_vnodepgsout, ncount); + VMMETER_INC(v_vnodeout); + VMMETER_ADD(v_vnodepgsout, ncount); if (error) { if ((ppscheck = ppsratecheck(&lastfail, &curfail, 1))) diff --git a/sys/x86/x86/intr_machdep.c b/sys/x86/x86/intr_machdep.c index e21635f..b8beeff 100644 --- a/sys/x86/x86/intr_machdep.c +++ b/sys/x86/x86/intr_machdep.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -250,7 +251,7 @@ intr_execute_handlers(struct intsrc *isrc, struct trapframe *frame) * processed too. */ (*isrc->is_count)++; - PCPU_INC(cnt.v_intr); + VMMETER_INC(v_intr); ie = isrc->is_event;