lkml.org 
[lkml]   [2012]   [Aug]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH, RFC 7/9] thp: implement splitting pmd for huge zero page
Date
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>

We can't split huge zero page itself, but we can split the pmd which
points to it.

On splitting the pmd we create a table with all ptes set to normal zero
page.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
mm/huge_memory.c | 36 ++++++++++++++++++++++++++++++++++++
1 files changed, 36 insertions(+), 0 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c8948d6..4df5841 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1599,6 +1599,7 @@ int split_huge_page(struct page *page)
struct anon_vma *anon_vma;
int ret = 1;

+ BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
BUG_ON(!PageAnon(page));
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
@@ -2503,6 +2504,36 @@ static int khugepaged(void *none)
return 0;
}

+static void __split_huge_zero_page_pmd(struct mm_struct *mm, pmd_t *pmd,
+ unsigned long address)
+{
+ pgtable_t pgtable;
+ pmd_t _pmd;
+ unsigned long haddr = address & HPAGE_PMD_MASK;
+ struct vm_area_struct *vma;
+ int i;
+
+ vma = find_vma(mm, address);
+ VM_BUG_ON(vma == NULL);
+ pmdp_clear_flush_notify(vma, haddr, pmd);
+ /* leave pmd empty until pte is filled */
+
+ pgtable = get_pmd_huge_pte(mm);
+ pmd_populate(mm, &_pmd, pgtable);
+
+ for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ pte_t *pte, entry;
+ entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot);
+ entry = pte_mkspecial(entry);
+ pte = pte_offset_map(&_pmd, haddr);
+ VM_BUG_ON(!pte_none(*pte));
+ set_pte_at(mm, haddr, pte, entry);
+ pte_unmap(pte);
+ }
+ smp_wmb(); /* make pte visible before pmd */
+ pmd_populate(mm, pmd, pgtable);
+}
+
void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd,
unsigned long address)
{
@@ -2513,6 +2544,11 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd,
spin_unlock(&mm->page_table_lock);
return;
}
+ if (is_huge_zero_pmd(*pmd)) {
+ __split_huge_zero_page_pmd(mm, pmd, address);
+ spin_unlock(&mm->page_table_lock);
+ return;
+ }
page = pmd_page(*pmd);
VM_BUG_ON(!page_count(page));
get_page(page);
--
1.7.7.6


\
 
 \ /
  Last update: 2012-08-09 12:02    [W:0.242 / U:0.144 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site