lkml.org 
[lkml]   [2015]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCHv9 30/36] thp: add option to setup migration entiries during PMD split
    Date
    We are going to use migration PTE entires to stabilize page counts.
    If the page is mapped with PMDs we need to split the PMD and setup
    migration enties. It's reasonable to combine these operations to avoid
    double-scanning over the page table.

    Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    Tested-by: Sasha Levin <sasha.levin@oracle.com>
    Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
    Acked-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    mm/huge_memory.c | 23 +++++++++++++++--------
    1 file changed, 15 insertions(+), 8 deletions(-)

    diff --git a/mm/huge_memory.c b/mm/huge_memory.c
    index 1f7a7288ffa3..103fa12cf3a4 100644
    --- a/mm/huge_memory.c
    +++ b/mm/huge_memory.c
    @@ -25,6 +25,7 @@
    #include <linux/migrate.h>
    #include <linux/hashtable.h>
    #include <linux/userfaultfd_k.h>
    +#include <linux/swapops.h>

    #include <asm/tlb.h>
    #include <asm/pgalloc.h>
    @@ -2628,7 +2629,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
    }

    static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
    - unsigned long haddr)
    + unsigned long haddr, bool freeze)
    {
    struct mm_struct *mm = vma->vm_mm;
    struct page *page;
    @@ -2670,12 +2671,18 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
    * transferred to avoid any possibility of altering
    * permissions across VMAs.
    */
    - entry = mk_pte(page + i, vma->vm_page_prot);
    - entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    - if (!write)
    - entry = pte_wrprotect(entry);
    - if (!young)
    - entry = pte_mkold(entry);
    + if (freeze) {
    + swp_entry_t swp_entry;
    + swp_entry = make_migration_entry(page + i, write);
    + entry = swp_entry_to_pte(swp_entry);
    + } else {
    + entry = mk_pte(page + i, vma->vm_page_prot);
    + entry = maybe_mkwrite(pte_mkdirty(entry), vma);
    + if (!write)
    + entry = pte_wrprotect(entry);
    + if (!young)
    + entry = pte_mkold(entry);
    + }
    pte = pte_offset_map(&_pmd, haddr);
    BUG_ON(!pte_none(*pte));
    set_pte_at(mm, haddr, pte, entry);
    @@ -2716,7 +2723,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
    mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
    ptl = pmd_lock(mm, pmd);
    if (likely(pmd_trans_huge(*pmd)))
    - __split_huge_pmd_locked(vma, pmd, haddr);
    + __split_huge_pmd_locked(vma, pmd, haddr, false);
    spin_unlock(ptl);
    mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
    }
    --
    2.1.4


    \
     
     \ /
      Last update: 2015-07-20 16:41    [W:4.138 / U:0.068 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site