lkml.org 
[lkml]   [2017]   [Apr]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 09/11] mm: mempolicy: mbind and migrate_pages support thp migration
    Date
    From: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>

    This patch enables thp migration for mbind(2) and migrate_pages(2).

    Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
    ---
    ChangeLog v1 -> v2:
    - support pte-mapped and doubly-mapped thp
    ---
    mm/mempolicy.c | 108 +++++++++++++++++++++++++++++++++++++++++----------------
    1 file changed, 79 insertions(+), 29 deletions(-)

    diff --git a/mm/mempolicy.c b/mm/mempolicy.c
    index fb18ce891586..c2550e7307bb 100644
    --- a/mm/mempolicy.c
    +++ b/mm/mempolicy.c
    @@ -94,6 +94,7 @@
    #include <linux/mm_inline.h>
    #include <linux/mmu_notifier.h>
    #include <linux/printk.h>
    +#include <linux/swapops.h>

    #include <asm/tlbflush.h>
    #include <linux/uaccess.h>
    @@ -486,6 +487,49 @@ static inline bool queue_pages_node_check(struct page *page,
    return node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT);
    }

    +static int queue_pages_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
    + unsigned long end, struct mm_walk *walk)
    +{
    + int ret = 0;
    + struct page *page;
    + struct queue_pages *qp = walk->private;
    + unsigned long flags;
    +
    + if (unlikely(is_pmd_migration_entry(*pmd))) {
    + ret = 1;
    + goto unlock;
    + }
    + page = pmd_page(*pmd);
    + if (is_huge_zero_page(page)) {
    + spin_unlock(ptl);
    + __split_huge_pmd(walk->vma, pmd, addr, false, NULL);
    + goto out;
    + }
    + if (!thp_migration_supported()) {
    + get_page(page);
    + spin_unlock(ptl);
    + lock_page(page);
    + ret = split_huge_page(page);
    + unlock_page(page);
    + put_page(page);
    + goto out;
    + }
    + if (queue_pages_node_check(page, qp)) {
    + ret = 1;
    + goto unlock;
    + }
    +
    + ret = 1;
    + flags = qp->flags;
    + /* go to thp migration */
    + if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
    + migrate_page_add(page, qp->pagelist, flags);
    +unlock:
    + spin_unlock(ptl);
    +out:
    + return ret;
    +}
    +
    /*
    * Scan through pages checking if pages follow certain conditions,
    * and move them to the pagelist if they do.
    @@ -497,30 +541,15 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
    struct page *page;
    struct queue_pages *qp = walk->private;
    unsigned long flags = qp->flags;
    - int nid, ret;
    + int ret;
    pte_t *pte;
    spinlock_t *ptl;

    - if (pmd_trans_huge(*pmd)) {
    - ptl = pmd_lock(walk->mm, pmd);
    - if (pmd_trans_huge(*pmd)) {
    - page = pmd_page(*pmd);
    - if (is_huge_zero_page(page)) {
    - spin_unlock(ptl);
    - __split_huge_pmd(vma, pmd, addr, false, NULL);
    - } else {
    - get_page(page);
    - spin_unlock(ptl);
    - lock_page(page);
    - ret = split_huge_page(page);
    - unlock_page(page);
    - put_page(page);
    - if (ret)
    - return 0;
    - }
    - } else {
    - spin_unlock(ptl);
    - }
    + ptl = pmd_trans_huge_lock(pmd, vma);
    + if (ptl) {
    + ret = queue_pages_pmd(pmd, ptl, addr, end, walk);
    + if (ret)
    + return 0;
    }

    if (pmd_trans_unstable(pmd))
    @@ -541,7 +570,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
    continue;
    if (queue_pages_node_check(page, qp))
    continue;
    - if (PageTransCompound(page)) {
    + if (PageTransCompound(page) && !thp_migration_supported()) {
    get_page(page);
    pte_unmap_unlock(pte, ptl);
    lock_page(page);
    @@ -959,19 +988,21 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,

    #ifdef CONFIG_MIGRATION
    /*
    - * page migration
    + * page migration, thp tail pages can be passed.
    */
    static void migrate_page_add(struct page *page, struct list_head *pagelist,
    unsigned long flags)
    {
    + struct page *head = compound_head(page);
    /*
    * Avoid migrating a page that is shared with others.
    */
    - if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
    - if (!isolate_lru_page(page)) {
    - list_add_tail(&page->lru, pagelist);
    - inc_node_page_state(page, NR_ISOLATED_ANON +
    - page_is_file_cache(page));
    + if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
    + if (!isolate_lru_page(head)) {
    + list_add_tail(&head->lru, pagelist);
    + mod_node_page_state(page_pgdat(head),
    + NR_ISOLATED_ANON + page_is_file_cache(head),
    + hpage_nr_pages(head));
    }
    }
    }
    @@ -981,7 +1012,17 @@ static struct page *new_node_page(struct page *page, unsigned long node, int **x
    if (PageHuge(page))
    return alloc_huge_page_node(page_hstate(compound_head(page)),
    node);
    - else
    + else if (thp_migration_supported() && PageTransHuge(page)) {
    + struct page *thp;
    +
    + thp = alloc_pages_node(node,
    + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_RECLAIM,
    + HPAGE_PMD_ORDER);
    + if (!thp)
    + return NULL;
    + prep_transhuge_page(thp);
    + return thp;
    + } else
    return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
    __GFP_THISNODE, 0);
    }
    @@ -1147,6 +1188,15 @@ static struct page *new_page(struct page *page, unsigned long start, int **x)
    if (PageHuge(page)) {
    BUG_ON(!vma);
    return alloc_huge_page_noerr(vma, address, 1);
    + } else if (thp_migration_supported() && PageTransHuge(page)) {
    + struct page *thp;
    +
    + thp = alloc_hugepage_vma(GFP_TRANSHUGE, vma, address,
    + HPAGE_PMD_ORDER);
    + if (!thp)
    + return NULL;
    + prep_transhuge_page(thp);
    + return thp;
    }
    /*
    * if !vma, alloc_page_vma() will use task or system default policy
    --
    2.11.0
    \
     
     \ /
      Last update: 2017-04-20 23:03    [W:2.845 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site