lkml.org 
[lkml]   [2019]   [Sep]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/4] mm/hmm: make full use of walk_page_range()
Date
hmm_range_fault() calls find_vma() and walk_page_range() in a loop.
This is unnecessary duplication since walk_page_range() calls find_vma()
in a loop already.
Simplify hmm_range_fault() by defining a walk_test() callback function
to filter unhandled vmas.
This also fixes a bug where hmm_range_fault() was not checking
start >= vma->vm_start before checking vma->vm_flags so hmm_range_fault()
could return an error based on the wrong vma for the requested range.

Signed-off-by: Ralph Campbell <rcampbell@nvidia.com>
Cc: "Jérôme Glisse" <jglisse@redhat.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
---
mm/hmm.c | 113 ++++++++++++++++++++++++-------------------------------
1 file changed, 50 insertions(+), 63 deletions(-)

diff --git a/mm/hmm.c b/mm/hmm.c
index 902f5fa6bf93..06041d4399ff 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -252,18 +252,17 @@ static int hmm_vma_do_fault(struct mm_walk *walk, unsigned long addr,
return -EFAULT;
}

-static int hmm_pfns_bad(unsigned long addr,
- unsigned long end,
- struct mm_walk *walk)
+static int hmm_pfns_fill(unsigned long addr,
+ unsigned long end,
+ struct hmm_range *range,
+ enum hmm_pfn_value_e value)
{
- struct hmm_vma_walk *hmm_vma_walk = walk->private;
- struct hmm_range *range = hmm_vma_walk->range;
uint64_t *pfns = range->pfns;
unsigned long i;

i = (addr - range->start) >> PAGE_SHIFT;
for (; addr < end; addr += PAGE_SIZE, i++)
- pfns[i] = range->values[HMM_PFN_ERROR];
+ pfns[i] = range->values[value];

return 0;
}
@@ -584,7 +583,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
}
return 0;
} else if (!pmd_present(pmd))
- return hmm_pfns_bad(start, end, walk);
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);

if (pmd_devmap(pmd) || pmd_trans_huge(pmd)) {
/*
@@ -612,7 +611,7 @@ static int hmm_vma_walk_pmd(pmd_t *pmdp,
* recover.
*/
if (pmd_bad(pmd))
- return hmm_pfns_bad(start, end, walk);
+ return hmm_pfns_fill(start, end, range, HMM_PFN_ERROR);

ptep = pte_offset_map(pmdp, addr);
i = (addr - range->start) >> PAGE_SHIFT;
@@ -770,13 +769,36 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
#define hmm_vma_walk_hugetlb_entry NULL
#endif /* CONFIG_HUGETLB_PAGE */

-static void hmm_pfns_clear(struct hmm_range *range,
- uint64_t *pfns,
- unsigned long addr,
- unsigned long end)
+static int hmm_vma_walk_test(unsigned long start,
+ unsigned long end,
+ struct mm_walk *walk)
{
- for (; addr < end; addr += PAGE_SIZE, pfns++)
- *pfns = range->values[HMM_PFN_NONE];
+ struct hmm_vma_walk *hmm_vma_walk = walk->private;
+ struct hmm_range *range = hmm_vma_walk->range;
+ struct vm_area_struct *vma = walk->vma;
+
+ /* If range is no longer valid, force retry. */
+ if (!range->valid)
+ return -EBUSY;
+
+ /*
+ * Skip vma ranges that don't have struct page backing them or
+ * map I/O devices directly.
+ */
+ if (vma->vm_flags & (VM_IO | VM_PFNMAP | VM_MIXEDMAP))
+ return -EFAULT;
+
+ /*
+ * If the vma does not allow read access, then assume that it does not
+ * allow write access either. HMM does not support architectures
+ * that allow write without read.
+ */
+ if (!(vma->vm_flags & VM_READ)) {
+ (void) hmm_pfns_fill(start, end, range, HMM_PFN_NONE);
+ return -EPERM;
+ }
+
+ return 0;
}

/*
@@ -857,6 +879,7 @@ static const struct mm_walk_ops hmm_walk_ops = {
.pmd_entry = hmm_vma_walk_pmd,
.pte_hole = hmm_vma_walk_hole,
.hugetlb_entry = hmm_vma_walk_hugetlb_entry,
+ .test_walk = hmm_vma_walk_test,
};

/**
@@ -889,63 +912,27 @@ static const struct mm_walk_ops hmm_walk_ops = {
*/
long hmm_range_fault(struct hmm_range *range, unsigned int flags)
{
- const unsigned long device_vma = VM_IO | VM_PFNMAP | VM_MIXEDMAP;
- unsigned long start = range->start, end;
- struct hmm_vma_walk hmm_vma_walk;
+ unsigned long start = range->start;
+ struct hmm_vma_walk hmm_vma_walk = {
+ .range = range,
+ .last = start,
+ .flags = flags,
+ };
struct hmm *hmm = range->hmm;
- struct vm_area_struct *vma;
int ret;

lockdep_assert_held(&hmm->mmu_notifier.mm->mmap_sem);

do {
- /* If range is no longer valid force retry. */
- if (!range->valid)
- return -EBUSY;
-
- vma = find_vma(hmm->mmu_notifier.mm, start);
- if (vma == NULL || (vma->vm_flags & device_vma))
- return -EFAULT;
-
- if (!(vma->vm_flags & VM_READ)) {
- /*
- * If vma do not allow read access, then assume that it
- * does not allow write access, either. HMM does not
- * support architecture that allow write without read.
- */
- hmm_pfns_clear(range, range->pfns,
- range->start, range->end);
- return -EPERM;
- }
+ ret = walk_page_range(hmm->mmu_notifier.mm, start, range->end,
+ &hmm_walk_ops, &hmm_vma_walk);
+ start = hmm_vma_walk.last;

- hmm_vma_walk.pgmap = NULL;
- hmm_vma_walk.last = start;
- hmm_vma_walk.flags = flags;
- hmm_vma_walk.range = range;
- end = min(range->end, vma->vm_end);
+ /* Keep trying while the range is valid. */
+ } while (ret == -EBUSY && range->valid);

- walk_page_range(vma->vm_mm, start, end, &hmm_walk_ops,
- &hmm_vma_walk);
-
- do {
- ret = walk_page_range(vma->vm_mm, start, end,
- &hmm_walk_ops, &hmm_vma_walk);
- start = hmm_vma_walk.last;
-
- /* Keep trying while the range is valid. */
- } while (ret == -EBUSY && range->valid);
-
- if (ret) {
- unsigned long i;
-
- i = (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
- hmm_pfns_clear(range, &range->pfns[i],
- hmm_vma_walk.last, range->end);
- return ret;
- }
- start = end;
-
- } while (start < range->end);
+ if (ret)
+ return ret;

return (hmm_vma_walk.last - range->start) >> PAGE_SHIFT;
}
--
2.20.1
\
 
 \ /
  Last update: 2019-09-12 00:30    [W:0.046 / U:0.560 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site