lkml.org 
[lkml]   [2016]   [May]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH 6/7] mm: Cleanup - Reorganize code to group handling of page
From
Date
In this patch, we reorganize the paging operations so the paging
operations of pages to the same swap device can be grouped together.
This prepares for the next patch that remove multiple pages from
the same swap cache together once they have been paged out.

The patch creates a new function handle_pgout_batch that takes
the code of handle_pgout and put a loop around handle_pgout code for
multiple pages.

Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
---
 mm/vmscan.c | 338 +++++++++++++++++++++++++++++++++++-------------------------
 1 file changed, 196 insertions(+), 142 deletions(-)

diff --git a/mm/vmscan.c b/mm/vmscan.c
index fab61f1..9fc04e1 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -884,154 +884,218 @@ enum pg_result {
  PG_UNKNOWN,
 };
 
-static enum pg_result handle_pgout(struct list_head *page_list,
+static void handle_pgout_batch(struct list_head *page_list,
  struct zone *zone,
  struct scan_control *sc,
  enum ttu_flags ttu_flags,
  enum page_references references,
  bool may_enter_fs,
  bool lazyfree,
- int  *swap_ret,
- struct page *page)
+ struct page *pages[],
+ int  swap_ret[],
+ int ret[],
+ int nr)
 {
  struct address_space *mapping;
+ struct page *page;
+ int i;
 
- mapping =  page_mapping(page);
+ for (i = 0; i < nr; ++i) {
+ page = pages[i];
+ mapping =  page_mapping(page);
 
- /*
-  * The page is mapped into the page tables of one or more
-  * processes. Try to unmap it here.
-  */
- if (page_mapped(page) && mapping) {
- switch (*swap_ret = try_to_unmap(page, lazyfree ?
- (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
- (ttu_flags | TTU_BATCH_FLUSH))) {
- case SWAP_FAIL:
- return PG_ACTIVATE_LOCKED;
- case SWAP_AGAIN:
- return PG_KEEP_LOCKED;
- case SWAP_MLOCK:
- return PG_MLOCKED;
- case SWAP_LZFREE:
- goto lazyfree;
- case SWAP_SUCCESS:
- ; /* try to free the page below */
+ /* check outcome of cache addition */
+ if (!ret[i]) {
+ ret[i] = PG_ACTIVATE_LOCKED;
+ continue;
  }
- }
-
- if (PageDirty(page)) {
  /*
-  * Only kswapd can writeback filesystem pages to
-  * avoid risk of stack overflow but only writeback
-  * if many dirty pages have been encountered.
+  * The page is mapped into the page tables of one or more
+  * processes. Try to unmap it here.
   */
- if (page_is_file_cache(page) &&
- (!current_is_kswapd() ||
-  !test_bit(ZONE_DIRTY, &zone->flags))) {
+ if (page_mapped(page) && mapping) {
+ switch (swap_ret[i] = try_to_unmap(page, lazyfree ?
+ (ttu_flags | TTU_BATCH_FLUSH | TTU_LZFREE) :
+ (ttu_flags | TTU_BATCH_FLUSH))) {
+ case SWAP_FAIL:
+ ret[i] = PG_ACTIVATE_LOCKED;
+ continue;
+ case SWAP_AGAIN:
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ case SWAP_MLOCK:
+ ret[i] = PG_MLOCKED;
+ continue;
+ case SWAP_LZFREE:
+ goto lazyfree;
+ case SWAP_SUCCESS:
+ ; /* try to free the page below */
+ }
+ }
+
+ if (PageDirty(page)) {
  /*
-  * Immediately reclaim when written back.
-  * Similar in principal to deactivate_page()
-  * except we already have the page isolated
-  * and know it's dirty
+  * Only kswapd can writeback filesystem pages to
+  * avoid risk of stack overflow but only writeback
+  * if many dirty pages have been encountered.
   */
- inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
- SetPageReclaim(page);
-
- return PG_KEEP_LOCKED;
- }
+ if (page_is_file_cache(page) &&
+ (!current_is_kswapd() ||
+  !test_bit(ZONE_DIRTY, &zone->flags))) {
+ /*
+  * Immediately reclaim when written back.
+  * Similar in principal to deactivate_page()
+  * except we already have the page isolated
+  * and know it's dirty
+  */
+ inc_zone_page_state(page, NR_VMSCAN_IMMEDIATE);
+ SetPageReclaim(page);
 
- if (references == PAGEREF_RECLAIM_CLEAN)
- return PG_KEEP_LOCKED;
- if (!may_enter_fs)
- return PG_KEEP_LOCKED;
- if (!sc->may_writepage)
- return PG_KEEP_LOCKED;
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ }
 
- /*
-  * Page is dirty. Flush the TLB if a writable entry
-  * potentially exists to avoid CPU writes after IO
-  * starts and then write it out here.
-  */
- try_to_unmap_flush_dirty();
- switch (pageout(page, mapping, sc)) {
- case PAGE_KEEP:
- return PG_KEEP_LOCKED;
- case PAGE_ACTIVATE:
- return PG_ACTIVATE_LOCKED;
- case PAGE_SUCCESS:
- if (PageWriteback(page))
- return PG_KEEP;
- if (PageDirty(page))
- return PG_KEEP;
+ if (references == PAGEREF_RECLAIM_CLEAN) {
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ }
+ if (!may_enter_fs) {
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ }
+ if (!sc->may_writepage) {
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ }
 
  /*
-  * A synchronous write - probably a ramdisk.  Go
-  * ahead and try to reclaim the page.
+  * Page is dirty. Flush the TLB if a writable entry
+  * potentially exists to avoid CPU writes after IO
+  * starts and then write it out here.
   */
- if (!trylock_page(page))
- return PG_KEEP;
- if (PageDirty(page) || PageWriteback(page))
- return PG_KEEP_LOCKED;
- mapping = page_mapping(page);
- case PAGE_CLEAN:
- ; /* try to free the page below */
- }
- }
+ try_to_unmap_flush_dirty();
+ switch (pageout(page, mapping, sc)) {
+ case PAGE_KEEP:
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ case PAGE_ACTIVATE:
+ ret[i] = PG_ACTIVATE_LOCKED;
+ continue;
+ case PAGE_SUCCESS:
+ if (PageWriteback(page)) {
+ ret[i] = PG_KEEP;
+ continue;
+ }
+ if (PageDirty(page)) {
+ ret[i] = PG_KEEP;
+ continue;
+ }
 
- /*
-  * If the page has buffers, try to free the buffer mappings
-  * associated with this page. If we succeed we try to free
-  * the page as well.
-  *
-  * We do this even if the page is PageDirty().
-  * try_to_release_page() does not perform I/O, but it is
-  * possible for a page to have PageDirty set, but it is actually
-  * clean (all its buffers are clean).  This happens if the
-  * buffers were written out directly, with submit_bh(). ext3
-  * will do this, as well as the blockdev mapping.
-  * try_to_release_page() will discover that cleanness and will
-  * drop the buffers and mark the page clean - it can be freed.
-  *
-  * Rarely, pages can have buffers and no ->mapping.  These are
-  * the pages which were not successfully invalidated in
-  * truncate_complete_page().  We try to drop those buffers here
-  * and if that worked, and the page is no longer mapped into
-  * process address space (page_count == 1) it can be freed.
-  * Otherwise, leave the page on the LRU so it is swappable.
-  */
- if (page_has_private(page)) {
- if (!try_to_release_page(page, sc->gfp_mask))
- return PG_ACTIVATE_LOCKED;
- if (!mapping && page_count(page) == 1) {
- unlock_page(page);
- if (put_page_testzero(page))
- return PG_FREE;
- else {
  /*
-  * rare race with speculative reference.
-  * the speculative reference will free
-  * this page shortly, so we may
-  * increment nr_reclaimed (and
-  * leave it off the LRU).
+  * A synchronous write - probably a ramdisk.  Go
+  * ahead and try to reclaim the page.
   */
- return PG_SPECULATIVE_REF;
+ if (!trylock_page(page)) {
+ ret[i] = PG_KEEP;
+ continue;
+ }
+ if (PageDirty(page) || PageWriteback(page)) {
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ }
+ mapping = page_mapping(page);
+ case PAGE_CLEAN:
+ ; /* try to free the page below */
  }
  }
- }
 
+ /*
+  * If the page has buffers, try to free the buffer mappings
+  * associated with this page. If we succeed we try to free
+  * the page as well.
+  *
+  * We do this even if the page is PageDirty().
+  * try_to_release_page() does not perform I/O, but it is
+  * possible for a page to have PageDirty set, but it is actually
+  * clean (all its buffers are clean).  This happens if the
+  * buffers were written out directly, with submit_bh(). ext3
+  * will do this, as well as the blockdev mapping.
+  * try_to_release_page() will discover that cleanness and will
+  * drop the buffers and mark the page clean - it can be freed.
+  *
+  * Rarely, pages can have buffers and no ->mapping.  These are
+  * the pages which were not successfully invalidated in
+  * truncate_complete_page().  We try to drop those buffers here
+  * and if that worked, and the page is no longer mapped into
+  * process address space (page_count == 1) it can be freed.
+  * Otherwise, leave the page on the LRU so it is swappable.
+  */
+ if (page_has_private(page)) {
+ if (!try_to_release_page(page, sc->gfp_mask)) {
+ ret[i] = PG_ACTIVATE_LOCKED;
+ continue;
+ }
+ if (!mapping && page_count(page) == 1) {
+ unlock_page(page);
+ if (put_page_testzero(page)) {
+ ret[i] = PG_FREE;
+ continue;
+ } else {
+ /*
+  * rare race with speculative reference.
+  * the speculative reference will free
+  * this page shortly, so we may
+  * increment nr_reclaimed (and
+  * leave it off the LRU).
+  */
+ ret[i] = PG_SPECULATIVE_REF;
+ continue;
+ }
+ }
+ }
 lazyfree:
- if (!mapping || !__remove_mapping(mapping, page, true))
- return PG_KEEP_LOCKED;
+ if (!mapping || !__remove_mapping(mapping, page, true)) {
+ ret[i] = PG_KEEP_LOCKED;
+ continue;
+ }
+
+ /*
+  * At this point, we have no other references and there is
+  * no way to pick any more up (removed from LRU, removed
+  * from pagecache). Can use non-atomic bitops now (and
+  * we obviously don't have to worry about waking up a process
+  * waiting on the page lock, because there are no references.
+  */
+ __ClearPageLocked(page);
+ ret[i] = PG_FREE;
+ }
+}
+
+static enum pg_result handle_pgout(struct list_head *page_list,
+ struct zone *zone,
+ struct scan_control *sc,
+ enum ttu_flags ttu_flags,
+ enum page_references references,
+ bool may_enter_fs,
+ bool lazyfree,
+ int  *swap_ret,
+ struct page *page)
+{
+ struct page *pages[1];
+ int ret[1];
+ int sret[1];
+
+ pages[0] = page;
 
  /*
-  * At this point, we have no other references and there is
-  * no way to pick any more up (removed from LRU, removed
-  * from pagecache). Can use non-atomic bitops now (and
-  * we obviously don't have to worry about waking up a process
-  * waiting on the page lock, because there are no references.
+  * page is in swap cache or page cache, indicate that
+  * by setting ret[0] to 1
   */
- __ClearPageLocked(page);
- return PG_FREE;
+ ret[0] = 1;
+ handle_pgout_batch(page_list, zone, sc, ttu_flags, references,
+ may_enter_fs, lazyfree, pages, sret, ret, 1);
+ *swap_ret = sret[0];
+ return ret[0];
 }
 
 static void pg_finish(struct page *page,
@@ -1095,14 +1159,13 @@ static unsigned long shrink_anon_page_list(struct list_head *page_list,
  bool clean)
 {
  unsigned long nr_reclaimed = 0;
- enum pg_result pg_dispose;
  swp_entry_t swp_entries[SWAP_BATCH];
  struct page *pages[SWAP_BATCH];
  int m, i, k, ret[SWAP_BATCH];
  struct page *page;
 
  while (n > 0) {
- int swap_ret = SWAP_SUCCESS;
+ int swap_ret[SWAP_BATCH];
 
  m = get_swap_pages(n, swp_entries);
  if (!m)
@@ -1127,28 +1190,19 @@ static unsigned long shrink_anon_page_list(struct list_head *page_list,
  */
  add_to_swap_batch(pages, page_list, swp_entries, ret, m);
 
- for (i = 0; i < m; ++i) {
- page = pages[i];
-
- if (!ret[i]) {
- pg_finish(page, PG_ACTIVATE_LOCKED, swap_ret,
- &nr_reclaimed, pgactivate,
- ret_pages, free_pages);
- continue;
- }
-
- if (clean)
- pg_dispose = handle_pgout(page_list, zone, sc,
- ttu_flags, PAGEREF_RECLAIM_CLEAN,
- true, true, &swap_ret, page);
- else
- pg_dispose = handle_pgout(page_list, zone, sc,
- ttu_flags, PAGEREF_RECLAIM,
- true, true, &swap_ret, page);
-
- pg_finish(page, pg_dispose, swap_ret, &nr_reclaimed,
- pgactivate, ret_pages, free_pages);
- }
+ if (clean)
+ handle_pgout_batch(page_list, zone, sc, ttu_flags,
+ PAGEREF_RECLAIM_CLEAN, true, true,
+ pages, swap_ret, ret, m);
+ else
+ handle_pgout_batch(page_list, zone, sc, ttu_flags,
+ PAGEREF_RECLAIM, true, true,
+ pages, swap_ret, ret, m);
+
+ for (i = 0; i < m; ++i)
+ pg_finish(pages[i], ret[i], swap_ret[i],
+ &nr_reclaimed, pgactivate,
+ ret_pages, free_pages);
  }
  return nr_reclaimed;
 
-- 
2.5.5

\
 
 \ /
  Last update: 2016-05-03 23:21    [W:0.049 / U:0.148 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site