lkml.org 
[lkml]   [2017]   [Feb]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 01/14] mm: thp: make __split_huge_pmd_locked visible.
Date
From: Zi Yan <ziy@nvidia.com>

It allows splitting huge pmd while you are holding the pmd lock.
It is prepared for future zap_pmd_range() use.

Signed-off-by: Zi Yan <zi.yan@cs.rutgers.edu>
---
include/linux/huge_mm.h | 2 ++
mm/huge_memory.c | 22 ++++++++++++----------
2 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a3762d49ba39..2036f69c8284 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -120,6 +120,8 @@ static inline int split_huge_page(struct page *page)
}
void deferred_split_huge_page(struct page *page);

+void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long haddr, bool freeze);
void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long address, bool freeze, struct page *page);

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 03e4566fc226..cd66532ef667 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1877,8 +1877,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma,
pmd_populate(mm, pmd, pgtable);
}

-static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
- unsigned long haddr, bool freeze)
+void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long address, bool freeze)
{
struct mm_struct *mm = vma->vm_mm;
struct page *page;
@@ -1887,6 +1887,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
bool young, write, dirty, soft_dirty;
unsigned long addr;
int i;
+ unsigned long haddr = address & HPAGE_PMD_MASK;

VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
@@ -1895,6 +1896,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,

count_vm_event(THP_SPLIT_PMD);

+ mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
+
if (!vma_is_anonymous(vma)) {
_pmd = pmdp_huge_clear_flush_notify(vma, haddr, pmd);
/*
@@ -1904,16 +1907,17 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
if (arch_needs_pgtable_deposit())
zap_deposited_table(mm, pmd);
if (vma_is_dax(vma))
- return;
+ goto out;
page = pmd_page(_pmd);
if (!PageReferenced(page) && pmd_young(_pmd))
SetPageReferenced(page);
page_remove_rmap(page, true);
put_page(page);
add_mm_counter(mm, MM_FILEPAGES, -HPAGE_PMD_NR);
- return;
+ goto out;
} else if (is_huge_zero_pmd(*pmd)) {
- return __split_huge_zero_page_pmd(vma, haddr, pmd);
+ __split_huge_zero_page_pmd(vma, haddr, pmd);
+ goto out;
}

page = pmd_page(*pmd);
@@ -2010,6 +2014,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
put_page(page + i);
}
}
+out:
+ mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
}

void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
@@ -2017,11 +2023,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{
spinlock_t *ptl;
struct mm_struct *mm = vma->vm_mm;
- unsigned long haddr = address & HPAGE_PMD_MASK;

- mmu_notifier_invalidate_range_start(mm, haddr, haddr + HPAGE_PMD_SIZE);
ptl = pmd_lock(mm, pmd);
-
/*
* If caller asks to setup a migration entries, we need a page to check
* pmd against. Otherwise we can end up replacing wrong page.
@@ -2036,10 +2039,9 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
clear_page_mlock(page);
} else if (!pmd_devmap(*pmd))
goto out;
- __split_huge_pmd_locked(vma, pmd, haddr, freeze);
+ __split_huge_pmd_locked(vma, pmd, address, freeze);
out:
spin_unlock(ptl);
- mmu_notifier_invalidate_range_end(mm, haddr, haddr + HPAGE_PMD_SIZE);
}

void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
--
2.11.0
\
 
 \ /
  Last update: 2017-02-05 17:15    [W:1.439 / U:0.040 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site