From 13f2ed510daed272cb7879b3044e0e8a89579dd7 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Fri, 11 Mar 2016 23:56:40 -0800 Subject: [PATCH 14/16] Update comments now that cache pages are no more. --- sys/vm/vm_object.c | 7 +++--- sys/vm/vm_object.h | 11 --------- sys/vm/vm_page.c | 66 ++++++++++++++++++----------------------------------- sys/vm/vm_page.h | 7 ------ sys/vm/vm_pageout.c | 16 ++++++------- sys/vm/vm_reserv.c | 17 +++++++------- 6 files changed, 40 insertions(+), 84 deletions(-) diff --git a/sys/vm/vm_object.c b/sys/vm/vm_object.c index bba9451..63086e2 100644 --- a/sys/vm/vm_object.c +++ b/sys/vm/vm_object.c @@ -1353,7 +1353,7 @@ retry: goto retry; } - /* vm_page_rename() will handle dirty and cache. */ + /* vm_page_rename() will dirty the page. */ if (vm_page_rename(m, new_object, idx)) { VM_OBJECT_WUNLOCK(new_object); VM_OBJECT_WUNLOCK(orig_object); @@ -1443,7 +1443,7 @@ vm_object_scan_all_shadowed(vm_object_t object) /* * Initial conditions: * - * We do not want to have to test for the existence of cache or swap + * We do not want to have to test for the existence of swap * pages in the backing object. XXX but with the new swapper this * would be pretty easy to do. */ @@ -1587,8 +1587,7 @@ vm_object_collapse_scan(vm_object_t object, int op) * backing object to the main object. * * If the page was mapped to a process, it can remain mapped - * through the rename. vm_page_rename() will handle dirty and - * cache. + * through the rename. vm_page_rename() will dirty the page. */ if (vm_page_rename(p, object, new_pindex)) { next = vm_object_collapse_scan_wait(object, NULL, next, diff --git a/sys/vm/vm_object.h b/sys/vm/vm_object.h index a58fa1c..bfad43b 100644 --- a/sys/vm/vm_object.h +++ b/sys/vm/vm_object.h @@ -79,17 +79,6 @@ * * vm_object_t Virtual memory object. * - * The root of cached pages pool is protected by both the per-object lock - * and the free pages queue mutex. - * On insert in the cache radix trie, the per-object lock is expected - * to be already held and the free pages queue mutex will be - * acquired during the operation too. - * On remove and lookup from the cache radix trie, only the free - * pages queue mutex is expected to be locked. - * These rules allow for reliably checking for the presence of cached - * pages with only the per-object lock held, thereby reducing contention - * for the free pages queue mutex. - * * List of locks * (c) const until freed * (o) per-object lock diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c index 7d964e9..52f4191 100644 --- a/sys/vm/vm_page.c +++ b/sys/vm/vm_page.c @@ -1366,9 +1366,7 @@ vm_page_replace(vm_page_t mnew, vm_object_t object, vm_pindex_t pindex) * * Note: we *always* dirty the page. It is necessary both for the * fact that we moved it, and because we may be invalidating - * swap. If the page is on the cache, we have to deactivate it - * or vm_page_dirty() will panic. Dirty pages are not allowed - * on the cache. + * swap. * * The objects must be locked. */ @@ -1469,11 +1467,7 @@ vm_page_alloc(vm_object_t object, vm_pindex_t pindex, int req) ("vm_page_alloc: pindex already allocated")); } - /* - * The page allocation request can came from consumers which already - * hold the free page queue mutex, like vm_page_insert() in - * vm_page_cache(). - */ + /* XXX is it still possible for this to recurse? */ mtx_lock_flags(&vm_page_queue_free_mtx, MTX_RECURSE); if (vm_cnt.v_free_count + vm_cnt.v_cache_count > vm_cnt.v_free_reserved || (req_class == VM_ALLOC_SYSTEM && @@ -2007,17 +2001,17 @@ unlock: } else if (level >= 0) { /* * The page is reserved but not yet allocated. In - * other words, it is still cached or free. Extend - * the current run by one page. + * other words, it is free. Extend the current run by + * one page. */ run_ext = 1; #endif } else if ((order = m->order) < VM_NFREEORDER) { /* * The page is enqueued in the physical memory - * allocator's cache/free page queues. Moreover, it + * allocator's free page queues. Moreover, it * is the first page in a power-of-two-sized run of - * contiguous cache/free pages. Add these pages to + * contiguous free pages. Add these pages to * the end of the current run, and jump ahead. */ run_ext = 1 << order; @@ -2026,8 +2020,8 @@ unlock: /* * Skip the page for one of the following reasons: (1) * It is enqueued in the physical memory allocator's - * cache/free page queues. However, it is not the - * first page in a run of contiguous cache/free pages. + * free page queues. However, it is not the + * first page in a run of contiguous free pages. * (This case rarely occurs because the scan is * performed in ascending order.) (2) It is not * reserved, and it is transitioning from free to @@ -2249,9 +2243,9 @@ unlock: if (order < VM_NFREEORDER) { /* * The page is enqueued in the physical memory - * allocator's cache/free page queues. + * allocator's free page queues. * Moreover, it is the first page in a power- - * of-two-sized run of contiguous cache/free + * of-two-sized run of contiguous free * pages. Jump ahead to the last page within * that run, and continue from there. */ @@ -2302,7 +2296,7 @@ CTASSERT(powerof2(NRUNS)); * conditions by relocating the virtual pages using that physical memory. * Returns true if reclamation is successful and false otherwise. Since * relocation requires the allocation of physical pages, reclamation may - * fail due to a shortage of cache/free pages. When reclamation fails, + * fail due to a shortage of free pages. When reclamation fails, * callers are expected to perform VM_WAIT before retrying a failed * allocation operation, e.g., vm_page_alloc_contig(). * @@ -2339,8 +2333,8 @@ vm_page_reclaim_contig(int req, u_long npages, vm_paddr_t low, vm_paddr_t high, req_class = VM_ALLOC_SYSTEM; /* - * Return if the number of cached and free pages cannot satisfy the - * requested allocation. + * Return if the number of free pages cannot satisfy the requested + * allocation. */ count = vm_cnt.v_free_count + vm_cnt.v_cache_count; if (count < npages + vm_cnt.v_free_reserved || (count < npages + @@ -2600,9 +2594,8 @@ vm_page_activate(vm_page_t m) /* * vm_page_free_wakeup: * - * Helper routine for vm_page_free_toq() and vm_page_cache(). This - * routine is called when a page has been added to the cache or free - * queues. + * Helper routine for vm_page_free_toq() and vm_page_reclaim_run(). This + * routine is called when a page has been added to the free queues. * * The page queues must be locked. */ @@ -2691,7 +2684,7 @@ vm_page_free_toq(vm_page_t m) /* * Insert the page into the physical memory allocator's - * cache/free page queues. + * free page queues. */ mtx_lock(&vm_page_queue_free_mtx); vm_phys_freecnt_adj(m, 1); @@ -2795,22 +2788,10 @@ vm_page_unwire(vm_page_t m, uint8_t queue) /* * Move the specified page to the inactive queue. * - * Many pages placed on the inactive queue should actually go - * into the cache, but it is difficult to figure out which. What - * we do instead, if the inactive target is well met, is to put - * clean pages at the head of the inactive queue instead of the tail. - * This will cause them to be moved to the cache more quickly and - * if not actively re-referenced, reclaimed more quickly. If we just - * stick these pages at the end of the inactive queue, heavy filesystem - * meta-data accesses can cause an unnecessary paging load on memory bound - * processes. This optimization causes one-time-use metadata to be - * reused more quickly. - * * Normally noreuse is FALSE, resulting in LRU operation. noreuse is set - * to TRUE if we want this page to be 'as if it were placed in the cache', - * except without unmapping it from the process address space. In - * practice this is implemented by inserting the page at the head of the - * queue, using a marker page to guide FIFO insertion ordering. + * to TRUE if the caller believes the page is unlikely to be reactivated. + * In this case, the page is placed near the head of the inactive queue, + * using a marker page to guide FIFO insertion ordering. * * The page must be locked. */ @@ -2935,12 +2916,9 @@ vm_page_advise(vm_page_t m, int advice) /* * Mark the page clean. This will allow the page to be freed * up by the system. However, such pages are often reused - * quickly by malloc() so we do not do anything that would - * cause a page fault if we can help it. - * - * Specifically, we do not try to actually free the page now - * nor do we try to put it in the cache (which would cause a - * page fault on reuse). + * quickly by malloc(), so we do not go as far as actually + * freeing them (since this would cause a page fault upon + * reuse). * * But we do make the page as freeable as we can without * actually taking the step of unmapping it. diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h index 5d45a50..a22c0dd 100644 --- a/sys/vm/vm_page.h +++ b/sys/vm/vm_page.h @@ -352,10 +352,6 @@ extern struct mtx_padalign pa_lock[]; * free * Available for allocation now. * - * cache - * Almost available for allocation. Still associated with - * an object, but clean and immediately freeable. - * * The following lists are LRU sorted: * * inactive @@ -452,8 +448,6 @@ vm_page_t vm_page_alloc_contig(vm_object_t object, vm_pindex_t pindex, int req, vm_paddr_t boundary, vm_memattr_t memattr); vm_page_t vm_page_alloc_freelist(int, int); vm_page_t vm_page_grab (vm_object_t, vm_pindex_t, int); -void vm_page_cache_free(vm_object_t, vm_pindex_t, vm_pindex_t); -void vm_page_cache_transfer(vm_object_t, vm_pindex_t, vm_object_t); int vm_page_try_to_free (vm_page_t); void vm_page_deactivate (vm_page_t); void vm_page_deactivate_noreuse(vm_page_t); @@ -463,7 +457,6 @@ vm_page_t vm_page_find_least(vm_object_t, vm_pindex_t); vm_page_t vm_page_getfake(vm_paddr_t paddr, vm_memattr_t memattr); void vm_page_initfake(vm_page_t m, vm_paddr_t paddr, vm_memattr_t memattr); int vm_page_insert (vm_page_t, vm_object_t, vm_pindex_t); -boolean_t vm_page_is_cached(vm_object_t object, vm_pindex_t pindex); void vm_page_launder(vm_page_t m); vm_page_t vm_page_lookup (vm_object_t, vm_pindex_t); vm_page_t vm_page_next(vm_page_t m); diff --git a/sys/vm/vm_pageout.c b/sys/vm/vm_pageout.c index fc0693f..fcd0df3 100644 --- a/sys/vm/vm_pageout.c +++ b/sys/vm/vm_pageout.c @@ -1083,7 +1083,7 @@ vm_pageout_laundry_worker(void *arg) * vm_pageout_scan does the dirty work for the pageout daemon. * * pass 0 - Update active LRU/deactivate pages - * pass 1 - Move inactive to cache or free + * pass 1 - Move inactive to free */ static void vm_pageout_scan(struct vm_domain *vmd, int pass) @@ -1124,8 +1124,7 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) addl_page_shortage = 0; /* - * Calculate the number of pages we want to either free or move - * to the cache. + * Calculate the number of pages we want to free. */ if (pass > 0) { deficit = atomic_readandclear_int(&vm_pageout_deficit); @@ -1135,11 +1134,10 @@ vm_pageout_scan(struct vm_domain *vmd, int pass) starting_page_shortage = page_shortage; /* - * Start scanning the inactive queue for pages we can move to the - * cache or free. The scan will stop when the target is reached or - * we have scanned the entire inactive queue. Note that m->act_count - * is not used to form decisions for the inactive queue, only for the - * active queue. + * Start scanning the inactive queue for pages we can reclaim. + * The scan will stop when the target is reached or we have scanned the + * entire inactive queue. Note that m->act_count is not used to form + * decisions for the inactive queue, only for the active queue. */ pq = &vmd->vmd_pagequeues[PQ_INACTIVE]; maxscan = pq->pq_cnt; @@ -1317,7 +1315,7 @@ drop_page: #if !defined(NO_SWAPPING) /* - * Wakeup the swapout daemon if we didn't cache or free the targeted + * Wakeup the swapout daemon if we didn't free the targeted * number of pages. */ if (vm_swap_enabled && page_shortage > 0) diff --git a/sys/vm/vm_reserv.c b/sys/vm/vm_reserv.c index 5519d2f..e9d8faf 100644 --- a/sys/vm/vm_reserv.c +++ b/sys/vm/vm_reserv.c @@ -198,7 +198,7 @@ static vm_reserv_t vm_reserv_array; /* * The partially-populated reservation queue * - * This queue enables the fast recovery of an unused cached or free small page + * This queue enables the fast recovery of an unused free small page * from a partially-populated reservation. The reservation at the head of * this queue is the least-recently-changed, partially-populated reservation. * @@ -721,8 +721,8 @@ found: } /* - * Breaks the given reservation. Except for the specified cached or free - * page, all cached and free pages in the reservation are returned to the + * Breaks the given reservation. Except for the specified free + * page, all free pages in the reservation are returned to the * physical memory allocator. The reservation's population count and map are * reset to their initial state. * @@ -908,7 +908,7 @@ vm_reserv_level_iffullpop(vm_page_t m) } /* - * Breaks the given partially-populated reservation, releasing its cached and + * Breaks the given partially-populated reservation, releasing its * free pages to the physical memory allocator. * * The free page queue lock must be held. @@ -928,7 +928,7 @@ vm_reserv_reclaim(vm_reserv_t rv) /* * Breaks the reservation at the head of the partially-populated reservation - * queue, releasing its cached and free pages to the physical memory + * queue, releasing its free pages to the physical memory * allocator. Returns TRUE if a reservation is broken and FALSE otherwise. * * The free page queue lock must be held. @@ -948,10 +948,9 @@ vm_reserv_reclaim_inactive(void) /* * Searches the partially-populated reservation queue for the least recently - * active reservation with unused pages, i.e., cached or free, that satisfy the - * given request for contiguous physical memory. If a satisfactory reservation - * is found, it is broken. Returns TRUE if a reservation is broken and FALSE - * otherwise. + * active reservation with free pages that satisfy the given request for + * contiguous physical memory. If a satisfactory reservation is found, it is + * broken. Returns TRUE if a reservation is broken and FALSE otherwise. * * The free page queue lock must be held. */ -- 2.8.1