lkml.org 
[lkml]   [2018]   [Dec]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.19 007/139] mm/khugepaged: minor reorderings in collapse_shmem()
    Date
    4.19-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    commit 042a30824871fa3149b0127009074b75cc25863c upstream.

    Several cleanups in collapse_shmem(): most of which probably do not
    really matter, beyond doing things in a more familiar and reassuring
    order. Simplify the failure gotos in the main loop, and on success
    update stats while interrupts still disabled from the last iteration.

    Link: http://lkml.kernel.org/r/alpine.LSU.2.11.1811261526400.2275@eggly.anvils
    Fixes: f3f0e1d2150b2 ("khugepaged: add support of collapse for tmpfs/shmem pages")
    Signed-off-by: Hugh Dickins <hughd@google.com>
    Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    Cc: Jerome Glisse <jglisse@redhat.com>
    Cc: Konstantin Khlebnikov <khlebnikov@yandex-team.ru>
    Cc: Matthew Wilcox <willy@infradead.org>
    Cc: <stable@vger.kernel.org> [4.8+]
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    mm/khugepaged.c | 73 ++++++++++++++++++++-----------------------------
    1 file changed, 30 insertions(+), 43 deletions(-)

    diff --git a/mm/khugepaged.c b/mm/khugepaged.c
    index 068868763b78..d0a347e6fd08 100644
    --- a/mm/khugepaged.c
    +++ b/mm/khugepaged.c
    @@ -1330,13 +1330,12 @@ static void collapse_shmem(struct mm_struct *mm,
    goto out;
    }

    + __SetPageLocked(new_page);
    + __SetPageSwapBacked(new_page);
    new_page->index = start;
    new_page->mapping = mapping;
    - __SetPageSwapBacked(new_page);
    - __SetPageLocked(new_page);
    BUG_ON(!page_ref_freeze(new_page, 1));

    -
    /*
    * At this point the new_page is 'frozen' (page_count() is zero), locked
    * and not up-to-date. It's safe to insert it into radix tree, because
    @@ -1365,13 +1364,13 @@ static void collapse_shmem(struct mm_struct *mm,
    */
    if (n && !shmem_charge(mapping->host, n)) {
    result = SCAN_FAIL;
    - break;
    + goto tree_locked;
    }
    - nr_none += n;
    for (; index < min(iter.index, end); index++) {
    radix_tree_insert(&mapping->i_pages, index,
    new_page + (index % HPAGE_PMD_NR));
    }
    + nr_none += n;

    /* We are done. */
    if (index >= end)
    @@ -1387,12 +1386,12 @@ static void collapse_shmem(struct mm_struct *mm,
    result = SCAN_FAIL;
    goto tree_unlocked;
    }
    - xa_lock_irq(&mapping->i_pages);
    } else if (trylock_page(page)) {
    get_page(page);
    + xa_unlock_irq(&mapping->i_pages);
    } else {
    result = SCAN_PAGE_LOCK;
    - break;
    + goto tree_locked;
    }

    /*
    @@ -1407,11 +1406,10 @@ static void collapse_shmem(struct mm_struct *mm,
    result = SCAN_TRUNCATED;
    goto out_unlock;
    }
    - xa_unlock_irq(&mapping->i_pages);

    if (isolate_lru_page(page)) {
    result = SCAN_DEL_PAGE_LRU;
    - goto out_isolate_failed;
    + goto out_unlock;
    }

    if (page_mapped(page))
    @@ -1432,7 +1430,9 @@ static void collapse_shmem(struct mm_struct *mm,
    */
    if (!page_ref_freeze(page, 3)) {
    result = SCAN_PAGE_COUNT;
    - goto out_lru;
    + xa_unlock_irq(&mapping->i_pages);
    + putback_lru_page(page);
    + goto out_unlock;
    }

    /*
    @@ -1448,17 +1448,10 @@ static void collapse_shmem(struct mm_struct *mm,
    slot = radix_tree_iter_resume(slot, &iter);
    index++;
    continue;
    -out_lru:
    - xa_unlock_irq(&mapping->i_pages);
    - putback_lru_page(page);
    -out_isolate_failed:
    - unlock_page(page);
    - put_page(page);
    - goto tree_unlocked;
    out_unlock:
    unlock_page(page);
    put_page(page);
    - break;
    + goto tree_unlocked;
    }

    /*
    @@ -1466,7 +1459,7 @@ static void collapse_shmem(struct mm_struct *mm,
    * This code only triggers if there's nothing in radix tree
    * beyond 'end'.
    */
    - if (result == SCAN_SUCCEED && index < end) {
    + if (index < end) {
    int n = end - index;

    /* Stop if extent has been truncated, and is now empty */
    @@ -1478,7 +1471,6 @@ static void collapse_shmem(struct mm_struct *mm,
    result = SCAN_FAIL;
    goto tree_locked;
    }
    -
    for (; index < end; index++) {
    radix_tree_insert(&mapping->i_pages, index,
    new_page + (index % HPAGE_PMD_NR));
    @@ -1486,14 +1478,19 @@ static void collapse_shmem(struct mm_struct *mm,
    nr_none += n;
    }

    + __inc_node_page_state(new_page, NR_SHMEM_THPS);
    + if (nr_none) {
    + struct zone *zone = page_zone(new_page);
    +
    + __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
    + __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
    + }
    +
    tree_locked:
    xa_unlock_irq(&mapping->i_pages);
    tree_unlocked:

    if (result == SCAN_SUCCEED) {
    - unsigned long flags;
    - struct zone *zone = page_zone(new_page);
    -
    /*
    * Replacing old pages with new one has succeed, now we need to
    * copy the content and free old pages.
    @@ -1507,11 +1504,11 @@ static void collapse_shmem(struct mm_struct *mm,
    copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
    page);
    list_del(&page->lru);
    - unlock_page(page);
    - page_ref_unfreeze(page, 1);
    page->mapping = NULL;
    + page_ref_unfreeze(page, 1);
    ClearPageActive(page);
    ClearPageUnevictable(page);
    + unlock_page(page);
    put_page(page);
    index++;
    }
    @@ -1520,28 +1517,17 @@ static void collapse_shmem(struct mm_struct *mm,
    index++;
    }

    - local_irq_save(flags);
    - __inc_node_page_state(new_page, NR_SHMEM_THPS);
    - if (nr_none) {
    - __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
    - __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
    - }
    - local_irq_restore(flags);
    -
    - /*
    - * Remove pte page tables, so we can re-faulti
    - * the page as huge.
    - */
    - retract_page_tables(mapping, start);
    -
    /* Everything is ready, let's unfreeze the new_page */
    - set_page_dirty(new_page);
    SetPageUptodate(new_page);
    page_ref_unfreeze(new_page, HPAGE_PMD_NR);
    + set_page_dirty(new_page);
    mem_cgroup_commit_charge(new_page, memcg, false, true);
    lru_cache_add_anon(new_page);
    - unlock_page(new_page);

    + /*
    + * Remove pte page tables, so we can re-fault the page as huge.
    + */
    + retract_page_tables(mapping, start);
    *hpage = NULL;

    khugepaged_pages_collapsed++;
    @@ -1573,8 +1559,8 @@ static void collapse_shmem(struct mm_struct *mm,
    radix_tree_replace_slot(&mapping->i_pages, slot, page);
    slot = radix_tree_iter_resume(slot, &iter);
    xa_unlock_irq(&mapping->i_pages);
    - putback_lru_page(page);
    unlock_page(page);
    + putback_lru_page(page);
    xa_lock_irq(&mapping->i_pages);
    }
    VM_BUG_ON(nr_none);
    @@ -1583,9 +1569,10 @@ static void collapse_shmem(struct mm_struct *mm,
    /* Unfreeze new_page, caller would take care about freeing it */
    page_ref_unfreeze(new_page, 1);
    mem_cgroup_cancel_charge(new_page, memcg, true);
    - unlock_page(new_page);
    new_page->mapping = NULL;
    }
    +
    + unlock_page(new_page);
    out:
    VM_BUG_ON(!list_empty(&pagelist));
    /* TODO: tracepoints */
    --
    2.17.1


    \
     
     \ /
      Last update: 2018-12-04 12:32    [W:5.896 / U:0.000 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site