lkml.org 
[lkml]   [2010]   [Mar]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[rfc 5/5] mincore: transparent huge page support
Date
Handle transparent huge page pmd entries natively instead of splitting
them into subpages.

Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
---
mm/mincore.c | 37 ++++++++++++++++++++++++++++++++++---
1 files changed, 34 insertions(+), 3 deletions(-)

diff --git a/mm/mincore.c b/mm/mincore.c
index 28cab9d..d4cddc1 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -15,6 +15,7 @@
#include <linux/swap.h>
#include <linux/swapops.h>
#include <linux/hugetlb.h>
+#include <linux/rmap.h>

#include <asm/uaccess.h>
#include <asm/pgtable.h>
@@ -144,6 +145,35 @@ static void mincore_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
pte_unmap_unlock(ptep - 1, ptl);
}

+static int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
+ unsigned long addr, unsigned long end,
+ unsigned char *vec)
+{
+ int huge = 0;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ spin_lock(&vma->vm_mm->page_table_lock);
+ if (likely(pmd_trans_huge(*pmd))) {
+ huge = !pmd_trans_splitting(*pmd);
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ /*
+ * If we have an intact huge pmd entry, all pages in
+ * the range are present in the mincore() sense of
+ * things.
+ *
+ * But if the entry is currently being split into
+ * normal page mappings, wait for it to finish and
+ * signal the fallback to ptes.
+ */
+ if (huge)
+ memset(vec, 1, (end - addr) >> PAGE_SHIFT);
+ else
+ wait_split_huge_page(vma->anon_vma, pmd);
+ } else
+ spin_unlock(&vma->vm_mm->page_table_lock);
+#endif
+ return huge;
+}
+
static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
unsigned long addr, unsigned long end,
unsigned char *vec)
@@ -152,12 +182,13 @@ static void mincore_pmd_range(struct vm_area_struct *vma, pud_t *pud,
pmd_t *pmd;

pmd = pmd_offset(pud, addr);
- split_huge_page_vma(vma, pmd);
do {
next = pmd_addr_end(addr, end);
- if (pmd_none_or_clear_bad(pmd))
+ /* XXX: pmd_none_or_clear_bad() triggers on _PAGE_PSE */
+ if (pmd_none(*pmd))
mincore_unmapped_range(vma, addr, next, vec);
- else
+ else if (!pmd_trans_huge(*pmd) ||
+ !mincore_huge_pmd(vma, pmd, addr, next, vec))
mincore_pte_range(vma, pmd, addr, next, vec);
vec += (next - addr) >> PAGE_SHIFT;
} while (pmd++, addr = next, addr != end);
--
1.7.0.2


\
 
 \ /
  Last update: 2010-03-23 15:39    [W:0.699 / U:0.244 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site