lkml.org 
[lkml]   [2016]   [Mar]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v12 14/29] HMM: Add support for hugetlb.
Date
Support hugetlb vma allmost like other vma. Exception being that we
will not support migration of hugetlb memory.

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
---
mm/hmm.c | 62 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 61 insertions(+), 1 deletion(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 7cab6cb..ad44325 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -813,6 +813,65 @@ static int hmm_mirror_fault_pmd(pmd_t *pmdp,
return ret;
}

+static int hmm_mirror_fault_hugetlb_entry(pte_t *ptep,
+ unsigned long hmask,
+ unsigned long addr,
+ unsigned long end,
+ struct mm_walk *walk)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ struct hmm_mirror_fault *mirror_fault = walk->private;
+ struct hmm_event *event = mirror_fault->event;
+ struct hmm_pt_iter *iter = mirror_fault->iter;
+ bool write = (event->etype == HMM_DEVICE_WFAULT);
+ unsigned long pfn, next;
+ dma_addr_t *hmm_pte;
+ pte_t pte;
+
+ /*
+ * Hugepages under user process are always in RAM and never
+ * swapped out, but theoretically it needs to be checked.
+ */
+ if (!ptep)
+ return -ENOENT;
+
+ pte = huge_ptep_get(ptep);
+ pfn = pte_pfn(pte);
+ if (!huge_pte_none(pte) || (write && !huge_pte_write(pte)))
+ return -ENOENT;
+
+ hmm_pte = hmm_pt_iter_populate(iter, addr, &next);
+ if (!hmm_pte)
+ return -ENOMEM;
+ hmm_pt_iter_directory_lock(iter);
+ for (; addr != end; addr += PAGE_SIZE, ++pfn, ++hmm_pte) {
+ /* Switch to another HMM page table directory ? */
+ if (addr == next) {
+ hmm_pt_iter_directory_unlock(iter);
+ hmm_pte = hmm_pt_iter_populate(iter, addr, &next);
+ if (!hmm_pte)
+ return -ENOMEM;
+ hmm_pt_iter_directory_lock(iter);
+ }
+
+ if (hmm_pte_test_valid_dma(hmm_pte))
+ continue;
+
+ if (!hmm_pte_test_valid_pfn(hmm_pte)) {
+ *hmm_pte = hmm_pte_from_pfn(pfn);
+ hmm_pt_iter_directory_ref(iter);
+ }
+ BUG_ON(hmm_pte_pfn(*hmm_pte) != pfn);
+ if (write)
+ hmm_pte_set_write(hmm_pte);
+ }
+ hmm_pt_iter_directory_unlock(iter);
+#else
+ BUG();
+#endif
+ return 0;
+}
+
static int hmm_mirror_dma_map(struct hmm_mirror *mirror,
struct hmm_pt_iter *iter,
unsigned long start,
@@ -920,6 +979,7 @@ static int hmm_mirror_handle_fault(struct hmm_mirror *mirror,
walk.mm = mirror->hmm->mm;
walk.private = &mirror_fault;
walk.pmd_entry = hmm_mirror_fault_pmd;
+ walk.hugetlb_entry = hmm_mirror_fault_hugetlb_entry;
walk.pte_hole = hmm_pte_hole;
ret = walk_page_range(addr, event->end, &walk);
if (ret)
@@ -1006,7 +1066,7 @@ retry:
goto out;
}
event->end = min(event->end, vma->vm_end) & PAGE_MASK;
- if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP | VM_HUGETLB))) {
+ if ((vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))) {
ret = -EFAULT;
goto out;
}
--
2.4.3
\
 
 \ /
  Last update: 2016-03-08 21:21    [W:0.182 / U:0.212 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site