lkml.org 
[lkml]   [2020]   [Apr]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v10 08/15] mm/lru: introduce TestClearPageLRU
    Date
    Combine PageLRU check and ClearPageLRU into a function by new
    introduced func TestClearPageLRU. This function will be used as page
    isolation precondition to prevent other isolations some where else.

    Then there are may non PageLRU page on lru list, need to remove BUG
    checking accordingly.

    As Andrew Morton mentioned this change would dirty cacheline for page
    isn't on LRU. But the lost would be acceptable with Rong Chen
    <rong.a.chen@intel.com> report:
    https://lkml.org/lkml/2020/3/4/173

    Suggested-by: Johannes Weiner <hannes@cmpxchg.org>
    Signed-off-by: Alex Shi <alex.shi@linux.alibaba.com>
    Cc: Johannes Weiner <hannes@cmpxchg.org>
    Cc: Michal Hocko <mhocko@kernel.org>
    Cc: Vladimir Davydov <vdavydov.dev@gmail.com>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Cc: linux-kernel@vger.kernel.org
    Cc: cgroups@vger.kernel.org
    Cc: linux-mm@kvack.org
    ---
    include/linux/page-flags.h | 1 +
    mm/mlock.c | 3 +--
    mm/swap.c | 8 ++------
    mm/vmscan.c | 29 +++++++++++++----------------
    4 files changed, 17 insertions(+), 24 deletions(-)

    diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
    index 222f6f7b2bb3..45a576631a94 100644
    --- a/include/linux/page-flags.h
    +++ b/include/linux/page-flags.h
    @@ -326,6 +326,7 @@ static inline void page_init_poison(struct page *page, size_t size)
    PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD)
    __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD)
    PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD)
    + TESTCLEARFLAG(LRU, lru, PF_HEAD)
    PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD)
    TESTCLEARFLAG(Active, active, PF_HEAD)
    PAGEFLAG(Workingset, workingset, PF_HEAD)
    diff --git a/mm/mlock.c b/mm/mlock.c
    index a72c1eeded77..03b3a5d99ad7 100644
    --- a/mm/mlock.c
    +++ b/mm/mlock.c
    @@ -108,13 +108,12 @@ void mlock_vma_page(struct page *page)
    */
    static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
    {
    - if (PageLRU(page)) {
    + if (TestClearPageLRU(page)) {
    struct lruvec *lruvec;

    lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page));
    if (getpage)
    get_page(page);
    - ClearPageLRU(page);
    del_page_from_lru_list(page, lruvec, page_lru(page));
    return true;
    }
    diff --git a/mm/swap.c b/mm/swap.c
    index 6203e35652f4..73f3a46eb160 100644
    --- a/mm/swap.c
    +++ b/mm/swap.c
    @@ -59,15 +59,13 @@
    */
    static void __page_cache_release(struct page *page)
    {
    - if (PageLRU(page)) {
    + if (TestClearPageLRU(page)) {
    pg_data_t *pgdat = page_pgdat(page);
    struct lruvec *lruvec;
    unsigned long flags;

    spin_lock_irqsave(&pgdat->lru_lock, flags);
    lruvec = mem_cgroup_page_lruvec(page, pgdat);
    - VM_BUG_ON_PAGE(!PageLRU(page), page);
    - __ClearPageLRU(page);
    del_page_from_lru_list(page, lruvec, page_off_lru(page));
    spin_unlock_irqrestore(&pgdat->lru_lock, flags);
    }
    @@ -831,7 +829,7 @@ void release_pages(struct page **pages, int nr)
    continue;
    }

    - if (PageLRU(page)) {
    + if (TestClearPageLRU(page)) {
    struct pglist_data *pgdat = page_pgdat(page);

    if (pgdat != locked_pgdat) {
    @@ -844,8 +842,6 @@ void release_pages(struct page **pages, int nr)
    }

    lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
    - VM_BUG_ON_PAGE(!PageLRU(page), page);
    - __ClearPageLRU(page);
    del_page_from_lru_list(page, lruvec, page_off_lru(page));
    }

    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index 3910277da39e..50566056b95a 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -1525,16 +1525,16 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
    {
    int ret = -EINVAL;

    - /* Only take pages on the LRU. */
    - if (!PageLRU(page))
    - return ret;
    -
    /* Compaction should not handle unevictable pages but CMA can do so */
    if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
    return ret;

    ret = -EBUSY;

    + /* Only take pages on the LRU. */
    + if (!PageLRU(page))
    + return ret;
    +
    /*
    * To minimise LRU disruption, the caller can indicate that it only
    * wants to isolate pages it will be able to operate on without
    @@ -1652,8 +1652,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
    page = lru_to_page(src);
    prefetchw_prev_lru_page(page, src, flags);

    - VM_BUG_ON_PAGE(!PageLRU(page), page);
    -
    nr_pages = compound_nr(page);
    total_scan += nr_pages;

    @@ -1750,21 +1748,20 @@ int isolate_lru_page(struct page *page)
    VM_BUG_ON_PAGE(!page_count(page), page);
    WARN_RATELIMIT(PageTail(page), "trying to isolate tail page");

    - if (PageLRU(page)) {
    + get_page(page);
    + if (TestClearPageLRU(page)) {
    pg_data_t *pgdat = page_pgdat(page);
    struct lruvec *lruvec;
    + int lru = page_lru(page);

    - spin_lock_irq(&pgdat->lru_lock);
    lruvec = mem_cgroup_page_lruvec(page, pgdat);
    - if (PageLRU(page)) {
    - int lru = page_lru(page);
    - get_page(page);
    - ClearPageLRU(page);
    - del_page_from_lru_list(page, lruvec, lru);
    - ret = 0;
    - }
    + spin_lock_irq(&pgdat->lru_lock);
    + del_page_from_lru_list(page, lruvec, lru);
    spin_unlock_irq(&pgdat->lru_lock);
    - }
    + ret = 0;
    + } else
    + put_page(page);
    +
    return ret;
    }

    --
    1.8.3.1
    \
     
     \ /
      Last update: 2020-04-27 09:04    [W:2.802 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site