--- vm_page.c 2010-09-03 12:40:53.000000000 +0200 +++ vm_page.c.stats 2010-09-24 17:53:28.000000000 +0200 @@ -113,6 +113,7 @@ #include #include #include +#include #include #include @@ -131,6 +132,7 @@ #include #include +#include #if defined(__amd64__) || defined (__i386__) extern struct sysctl_oid_list sysctl__vm_pmap_children; @@ -150,6 +152,29 @@ SYSCTL_INT(_vm_pmap, OID_AUTO, tryrelock_race, CTLFLAG_RD, &pmap_tryrelock_race, 0, "Number of tryrelock pmap race cases"); +static int vmpage_splay_lookup = 0; +static int vmpage_splay_cachehit = 0; +static int vmpage_splay_rotate = 0; +static int vmpage_splay_rotatetouch = 0; +static int vmpage_splay_insert = 0; +static int vmpage_splay_remove = 0; + +SYSCTL_NODE(_vm, OID_AUTO, page, CTLFLAG_RD, 0, "VM/page statistics"); +SYSCTL_INT(_vm_pmap, OID_AUTO, vmpage_splay_lookup, CTLFLAG_RD, + &vmpage_splay_lookup, 0, ""); +SYSCTL_INT(_vm_pmap, OID_AUTO, vmpage_splay_cachehit, CTLFLAG_RD, + &vmpage_splay_cachehit, 0, ""); +SYSCTL_INT(_vm_pmap, OID_AUTO, vmpage_splay_rotate, CTLFLAG_RD, + &vmpage_splay_rotate, 0, ""); +SYSCTL_INT(_vm_pmap, OID_AUTO, vmpage_splay_rotatetouch, CTLFLAG_RD, + &vmpage_splay_rotatetouch, 0, ""); +SYSCTL_INT(_vm_pmap, OID_AUTO, vmpage_splay_insert, CTLFLAG_RD, + &vmpage_splay_insert, 0, ""); +SYSCTL_INT(_vm_pmap, OID_AUTO, vmpage_splay_remove, CTLFLAG_RD, + &vmpage_splay_remove, 0, ""); + +#define PAGESTAT(x) atomic_add_int(&(x), 1) + /* * Associated with page of user-allocatable memory is a * page structure. @@ -686,6 +711,9 @@ if (root == NULL) return (root); lefttreemax = righttreemin = &dummy; + + PAGESTAT(vmpage_splay_rotate); + for (;; root = y) { if (pindex < root->pindex) { if ((y = root->left) == NULL) @@ -717,6 +745,7 @@ lefttreemax = root; } else break; + PAGESTAT(vmpage_splay_rotatetouch); } /* Assemble the new root. */ lefttreemax->right = root->left; @@ -754,6 +783,8 @@ m->object = object; m->pindex = pindex; + PAGESTAT(vmpage_splay_insert); + /* * Now link into the object's ordered list of backed pages. */ @@ -827,6 +858,8 @@ vm_page_flash(m); } + PAGESTAT(vmpage_splay_remove); + /* * Now remove from the object's list of backed pages. */ @@ -871,11 +904,16 @@ vm_page_t m; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + + PAGESTAT(vmpage_splay_lookup); + if ((m = object->root) != NULL && m->pindex != pindex) { m = vm_page_splay(pindex, m); if ((object->root = m)->pindex != pindex) m = NULL; - } + } else if (m != NULL) + PAGESTAT(vmpage_splay_cachehit); + return (m); } @@ -894,6 +932,9 @@ vm_page_t m; VM_OBJECT_LOCK_ASSERT(object, MA_OWNED); + + PAGESTAT(vmpage_splay_lookup); + if ((m = TAILQ_FIRST(&object->memq)) != NULL) { if (m->pindex < pindex) { m = vm_page_splay(pindex, object->root); @@ -1051,7 +1092,8 @@ m = vm_page_splay(pindex, m); if ((object->cache = m)->pindex != pindex) m = NULL; - } + } else if (m != NULL) + PAGESTAT(vmpage_splay_cachehit); return (m); } @@ -1071,6 +1113,9 @@ KASSERT((m->flags & PG_CACHED) != 0, ("vm_page_cache_remove: page %p is not cached", m)); object = m->object; + + PAGESTAT(vmpage_splay_remove); + if (m != object->cache) { root = vm_page_splay(m->pindex, object->cache); KASSERT(root == m, --- vm_map.c 2010-09-19 19:43:22.000000000 +0200 +++ vm_map.c.stats 2010-09-30 18:10:35.000000000 +0200 @@ -67,17 +67,21 @@ #include #include +#include #include #include +#include +#include +#include #include #include -#include -#include -#include #include -#include #include #include +#include +#include +#include +#include #include #include @@ -91,6 +95,32 @@ #include #include +#include + +static int vmmap_splay_lookup = 0; +static int vmmap_splay_cachehit = 0; +static int vmmap_splay_rotate = 0; +static int vmmap_splay_rotatetouch = 0; +static int vmmap_splay_insert = 0; +static int vmmap_splay_remove = 0; + +static SYSCTL_NODE(_vm, OID_AUTO, map, CTLFLAG_RD, 0, "VM/map statistics"); + +SYSCTL_INT(_vm_map, OID_AUTO, vmmap_splay_lookup, CTLFLAG_RD, + &vmmap_splay_lookup, 0, ""); +SYSCTL_INT(_vm_map, OID_AUTO, vmmap_splay_cachehit, CTLFLAG_RD, + &vmmap_splay_cachehit, 0, ""); +SYSCTL_INT(_vm_map, OID_AUTO, vmmap_splay_rotate, CTLFLAG_RD, + &vmmap_splay_rotate, 0, ""); +SYSCTL_INT(_vm_map, OID_AUTO, vmmap_splay_rotatetouch, CTLFLAG_RD, + &vmmap_splay_rotatetouch, 0, ""); +SYSCTL_INT(_vm_map, OID_AUTO, vmmap_splay_insert, CTLFLAG_RD, + &vmmap_splay_insert, 0, ""); +SYSCTL_INT(_vm_map, OID_AUTO, vmmap_splay_remove, CTLFLAG_RD, + &vmmap_splay_remove, 0, ""); + +#define MAPSTAT(x) atomic_add_int(&(x), 1) + /* * Virtual memory maps provide for the mapping, protection, * and sharing of virtual memory objects. In addition, @@ -819,6 +849,8 @@ if (root == NULL) return (root); + MAPSTAT(vmmap_splay_rotate); + /* * Pass One: Splay down the tree until we find addr or a NULL * pointer where addr would go. llist and rlist are the two @@ -869,6 +901,7 @@ } } else break; + MAPSTAT(vmmap_splay_rotatetouch); } /* @@ -924,6 +957,8 @@ entry->next->prev = entry; after_where->next = entry; + MAPSTAT(vmmap_splay_insert); + if (after_where != &map->header) { if (after_where != map->root) vm_map_entry_splay(after_where->start, map->root); @@ -933,6 +968,7 @@ after_where->adj_free = entry->start - after_where->end; vm_map_entry_set_max_free(after_where); } else { + MAPSTAT(vmmap_splay_cachehit); entry->right = map->root; entry->left = NULL; } @@ -949,11 +985,15 @@ vm_map_entry_t next, prev, root; VM_MAP_ASSERT_LOCKED(map); + + MAPSTAT(vmmap_splay_remove); + if (entry != map->root) vm_map_entry_splay(entry->start, map->root); - if (entry->left == NULL) + if (entry->left == NULL) { + MAPSTAT(vmmap_splay_cachehit); root = entry->right; - else { + }else { root = vm_map_entry_splay(entry->start, entry->left); root->right = entry->right; root->adj_free = (entry->next == &map->header ? map->max_offset : @@ -1017,6 +1057,8 @@ vm_map_entry_t cur; boolean_t locked; + MAPSTAT(vmmap_splay_lookup); + /* * If the map is empty, then the map entry immediately preceding * "address" is the map's header. @@ -1025,6 +1067,7 @@ if (cur == NULL) *entry = &map->header; else if (address >= cur->start && cur->end > address) { + MAPSTAT(vmmap_splay_cachehit); *entry = cur; return (TRUE); } else if ((locked = vm_map_locked(map)) ||