lkml.org 
[lkml]   [2020]   [Jul]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v16 19/22] mm/lru: introduce the relock_page_lruvec function
    Date
    Use this new function to replace repeated same code, no func change.

    Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
    Cc: Johannes Weiner <hannes@cmpxchg.org>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
    Cc: Matthew Wilcox <willy@infradead.org>
    Cc: Mel Gorman <mgorman@techsingularity.net>
    Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
    Cc: Hugh Dickins <hughd@google.com>
    Cc: Tejun Heo <tj@kernel.org>
    Cc: linux-kernel@vger.kernel.org
    Cc: cgroups@vger.kernel.org
    Cc: linux-mm@kvack.org
    ---
    mm/mlock.c | 9 +--------
    mm/swap.c | 33 +++++++--------------------------
    mm/vmscan.c | 8 +-------
    3 files changed, 9 insertions(+), 41 deletions(-)

    diff --git a/mm/mlock.c b/mm/mlock.c
    index cb23a0c2cfbf..4f40fc091cf9 100644
    --- a/mm/mlock.c
    +++ b/mm/mlock.c
    @@ -289,17 +289,10 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
    /* Phase 1: page isolation */
    for (i = 0; i < nr; i++) {
    struct page *page = pvec->pages[i];
    - struct lruvec *new_lruvec;
    bool clearlru;

    clearlru = TestClearPageLRU(page);
    -
    - new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
    - if (new_lruvec != lruvec) {
    - if (lruvec)
    - unlock_page_lruvec_irq(lruvec);
    - lruvec = lock_page_lruvec_irq(page);
    - }
    + lruvec = relock_page_lruvec_irq(page, lruvec);

    if (!TestClearPageMlocked(page)) {
    delta_munlocked++;
    diff --git a/mm/swap.c b/mm/swap.c
    index 129c532357a4..9fb906fbaed5 100644
    --- a/mm/swap.c
    +++ b/mm/swap.c
    @@ -209,19 +209,12 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,

    for (i = 0; i < pagevec_count(pvec); i++) {
    struct page *page = pvec->pages[i];
    - struct lruvec *new_lruvec;
    -
    - new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
    - if (lruvec != new_lruvec) {
    - if (lruvec)
    - unlock_page_lruvec_irqrestore(lruvec, flags);
    - lruvec = lock_page_lruvec_irqsave(page, &flags);
    - }

    /* block memcg migration during page moving between lru */
    if (!TestClearPageLRU(page))
    continue;

    + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
    (*move_fn)(page, lruvec);

    SetPageLRU(page);
    @@ -866,17 +859,12 @@ void release_pages(struct page **pages, int nr)
    }

    if (PageLRU(page)) {
    - struct lruvec *new_lruvec;
    -
    - new_lruvec = mem_cgroup_page_lruvec(page,
    - page_pgdat(page));
    - if (new_lruvec != lruvec) {
    - if (lruvec)
    - unlock_page_lruvec_irqrestore(lruvec,
    - flags);
    + struct lruvec *pre_lruvec = lruvec;
    +
    + lruvec = relock_page_lruvec_irqsave(page, lruvec,
    + &flags);
    + if (pre_lruvec != lruvec)
    lock_batch = 0;
    - lruvec = lock_page_lruvec_irqsave(page, &flags);
    - }

    __ClearPageLRU(page);
    del_page_from_lru_list(page, lruvec, page_off_lru(page));
    @@ -982,15 +970,8 @@ void __pagevec_lru_add(struct pagevec *pvec)

    for (i = 0; i < pagevec_count(pvec); i++) {
    struct page *page = pvec->pages[i];
    - struct lruvec *new_lruvec;
    -
    - new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
    - if (lruvec != new_lruvec) {
    - if (lruvec)
    - unlock_page_lruvec_irqrestore(lruvec, flags);
    - lruvec = lock_page_lruvec_irqsave(page, &flags);
    - }

    + lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags);
    __pagevec_lru_add_fn(page, lruvec);
    }
    if (lruvec)
    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index 168c1659e430..bdb53a678e7e 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -4292,15 +4292,9 @@ void check_move_unevictable_pages(struct pagevec *pvec)

    for (i = 0; i < pvec->nr; i++) {
    struct page *page = pvec->pages[i];
    - struct lruvec *new_lruvec;

    pgscanned++;
    - new_lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
    - if (lruvec != new_lruvec) {
    - if (lruvec)
    - unlock_page_lruvec_irq(lruvec);
    - lruvec = lock_page_lruvec_irq(page);
    - }
    + lruvec = relock_page_lruvec_irq(page, lruvec);

    if (!PageLRU(page) || !PageUnevictable(page))
    continue;
    --
    1.8.3.1
    \
     
     \ /
      Last update: 2020-07-11 03:00    [W:4.125 / U:0.020 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site