lkml.org 
[lkml]   [2020]   [Nov]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH next] mm/vmscan: __isolate_lru_page_prepare clean up
From
Date


在 2020/11/22 下午8:35, Matthew Wilcox 写道:
> On Sun, Nov 22, 2020 at 08:00:19PM +0800, Alex Shi wrote:
>> mm/compaction.c | 2 +-
>> mm/vmscan.c | 69 +++++++++++++++++++++++--------------------------
>> 2 files changed, 34 insertions(+), 37 deletions(-)
>
> How is it possible you're changing the signature of a function without
> touching a header file? Surely __isolate_lru_page_prepare() must be declared
> in mm/internal.h ?
>
>> +++ b/mm/vmscan.c
>> @@ -1536,19 +1536,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
>> * page: page to consider
>> * mode: one of the LRU isolation modes defined above
>> *
>> - * returns 0 on success, -ve errno on failure.
>> + * returns ture on success, false on failure.
>
> "true".
>
>> @@ -1674,35 +1672,34 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
>> * only when the page is being freed somewhere else.
>> */
>> scan += nr_pages;
>> - switch (__isolate_lru_page_prepare(page, mode)) {
>> - case 0:
>> + if (!__isolate_lru_page_prepare(page, mode)) {
>> + /* else it is being freed elsewhere */
>
> I don't think the word "else" helps here. Just
> /* It is being freed elsewhere */
>
>> + if (!TestClearPageLRU(page)) {
>> /*
>> + * This page may in other isolation path,
>> + * but we still hold lru_lock.
>> */
>
> I don't think this comment helps me understand what's going on here.
> Maybe:
>
> /* Another thread is already isolating this page */
>
>> + put_page(page);
>> list_move(&page->lru, src);
>> + continue;
>> }

Hi Matthew,

Thanks a lot for all comments, I picked all up and here is the v3:

From 167131dd106a96fd08af725df850e0da6ec899af Mon Sep 17 00:00:00 2001
From: Alex Shi <alex.shi@linux.alibaba.com>
Date: Fri, 20 Nov 2020 14:49:16 +0800
Subject: [PATCH v3 next] mm/vmscan: __isolate_lru_page_prepare clean up

The function just return 2 results, so use a 'switch' to deal with its
result is unnecessary, and simplify it to a bool func as Vlastimil
suggested.

Also remove 'goto' by reusing list_move(), and take Matthew Wilcox's
suggestion to update comments in function.

Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Yu Zhao <yuzhao@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Michal Hocko <mhocko@suse.com>
Cc: linux-mm@kvack.org
Cc: linux-kernel@vger.kernel.org
---
include/linux/swap.h | 2 +-
mm/compaction.c | 2 +-
mm/vmscan.c | 68 ++++++++++++++++++++------------------------
3 files changed, 33 insertions(+), 39 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index 596bc2f4d9b0..5bba15ac5a2e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -356,7 +356,7 @@ extern void lru_cache_add_inactive_or_unevictable(struct page *page,
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
+extern bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode);
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
diff --git a/mm/compaction.c b/mm/compaction.c
index b68931854253..8d71ffebe6cb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -988,7 +988,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
if (unlikely(!get_page_unless_zero(page)))
goto isolate_fail;

- if (__isolate_lru_page_prepare(page, isolate_mode) != 0)
+ if (!__isolate_lru_page_prepare(page, isolate_mode))
goto isolate_fail_put;

/* Try isolate the page */
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c6f94e55c3fe..4d2703c43310 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1536,19 +1536,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
* page: page to consider
* mode: one of the LRU isolation modes defined above
*
- * returns 0 on success, -ve errno on failure.
+ * returns true on success, false on failure.
*/
-int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
+bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
{
- int ret = -EBUSY;
-
/* Only take pages on the LRU. */
if (!PageLRU(page))
- return ret;
+ return false;

/* Compaction should not handle unevictable pages but CMA can do so */
if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
- return ret;
+ return false;

/*
* To minimise LRU disruption, the caller can indicate that it only
@@ -1561,7 +1559,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
if (mode & ISOLATE_ASYNC_MIGRATE) {
/* All the caller can do on PageWriteback is block */
if (PageWriteback(page))
- return ret;
+ return false;

if (PageDirty(page)) {
struct address_space *mapping;
@@ -1577,20 +1575,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
* from the page cache.
*/
if (!trylock_page(page))
- return ret;
+ return false;

mapping = page_mapping(page);
migrate_dirty = !mapping || mapping->a_ops->migratepage;
unlock_page(page);
if (!migrate_dirty)
- return ret;
+ return false;
}
}

if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
- return ret;
+ return false;

- return 0;
+ return true;
}

/*
@@ -1674,35 +1672,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* only when the page is being freed somewhere else.
*/
scan += nr_pages;
- switch (__isolate_lru_page_prepare(page, mode)) {
- case 0:
- /*
- * Be careful not to clear PageLRU until after we're
- * sure the page is not being freed elsewhere -- the
- * page release code relies on it.
- */
- if (unlikely(!get_page_unless_zero(page)))
- goto busy;
-
- if (!TestClearPageLRU(page)) {
- /*
- * This page may in other isolation path,
- * but we still hold lru_lock.
- */
- put_page(page);
- goto busy;
- }
-
- nr_taken += nr_pages;
- nr_zone_taken[page_zonenum(page)] += nr_pages;
- list_move(&page->lru, dst);
- break;
+ if (!__isolate_lru_page_prepare(page, mode)) {
+ /* It is being freed elsewhere */
+ list_move(&page->lru, src);
+ continue;
+ }
+ /*
+ * Be careful not to clear PageLRU until after we're
+ * sure the page is not being freed elsewhere -- the
+ * page release code relies on it.
+ */
+ if (unlikely(!get_page_unless_zero(page))) {
+ list_move(&page->lru, src);
+ continue;
+ }

- default:
-busy:
- /* else it is being freed elsewhere */
+ if (!TestClearPageLRU(page)) {
+ /* Another thread is already isolating this page */
+ put_page(page);
list_move(&page->lru, src);
+ continue;
}
+
+ nr_taken += nr_pages;
+ nr_zone_taken[page_zonenum(page)] += nr_pages;
+ list_move(&page->lru, dst);
}

/*
--
2.29.GIT
\
 
 \ /
  Last update: 2020-11-22 15:03    [W:0.057 / U:0.136 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site