lkml.org 
[lkml]   [2022]   [Sep]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.19 101/155] mm: pagewalk: Fix race between unmap and page walker
    Date
    From: Steven Price <steven.price@arm.com>

    [ Upstream commit 8782fb61cc848364e1e1599d76d3c9dd58a1cc06 ]

    The mmap lock protects the page walker from changes to the page tables
    during the walk. However a read lock is insufficient to protect those
    areas which don't have a VMA as munmap() detaches the VMAs before
    downgrading to a read lock and actually tearing down PTEs/page tables.

    For users of walk_page_range() the solution is to simply call pte_hole()
    immediately without checking the actual page tables when a VMA is not
    present. We now never call __walk_page_range() without a valid vma.

    For walk_page_range_novma() the locking requirements are tightened to
    require the mmap write lock to be taken, and then walking the pgd
    directly with 'no_vma' set.

    This in turn means that all page walkers either have a valid vma, or
    it's that special 'novma' case for page table debugging. As a result,
    all the odd '(!walk->vma && !walk->no_vma)' tests can be removed.

    Fixes: dd2283f2605e ("mm: mmap: zap pages with read mmap_sem in munmap")
    Reported-by: Jann Horn <jannh@google.com>
    Signed-off-by: Steven Price <steven.price@arm.com>
    Cc: Vlastimil Babka <vbabka@suse.cz>
    Cc: Thomas Hellström <thomas.hellstrom@linux.intel.com>
    Cc: Konstantin Khlebnikov <koct9i@gmail.com>
    Cc: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    arch/riscv/mm/pageattr.c | 4 ++--
    mm/pagewalk.c | 21 ++++++++++++---------
    mm/ptdump.c | 4 ++--
    3 files changed, 16 insertions(+), 13 deletions(-)

    diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
    index 5e49e4b4a4ccc..86c56616e5dea 100644
    --- a/arch/riscv/mm/pageattr.c
    +++ b/arch/riscv/mm/pageattr.c
    @@ -118,10 +118,10 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
    if (!numpages)
    return 0;

    - mmap_read_lock(&init_mm);
    + mmap_write_lock(&init_mm);
    ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
    &masks);
    - mmap_read_unlock(&init_mm);
    + mmap_write_unlock(&init_mm);

    flush_tlb_kernel_range(start, end);

    diff --git a/mm/pagewalk.c b/mm/pagewalk.c
    index 9b3db11a4d1db..fa7a3d21a7518 100644
    --- a/mm/pagewalk.c
    +++ b/mm/pagewalk.c
    @@ -110,7 +110,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
    do {
    again:
    next = pmd_addr_end(addr, end);
    - if (pmd_none(*pmd) || (!walk->vma && !walk->no_vma)) {
    + if (pmd_none(*pmd)) {
    if (ops->pte_hole)
    err = ops->pte_hole(addr, next, depth, walk);
    if (err)
    @@ -171,7 +171,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
    do {
    again:
    next = pud_addr_end(addr, end);
    - if (pud_none(*pud) || (!walk->vma && !walk->no_vma)) {
    + if (pud_none(*pud)) {
    if (ops->pte_hole)
    err = ops->pte_hole(addr, next, depth, walk);
    if (err)
    @@ -366,19 +366,19 @@ static int __walk_page_range(unsigned long start, unsigned long end,
    struct vm_area_struct *vma = walk->vma;
    const struct mm_walk_ops *ops = walk->ops;

    - if (vma && ops->pre_vma) {
    + if (ops->pre_vma) {
    err = ops->pre_vma(start, end, walk);
    if (err)
    return err;
    }

    - if (vma && is_vm_hugetlb_page(vma)) {
    + if (is_vm_hugetlb_page(vma)) {
    if (ops->hugetlb_entry)
    err = walk_hugetlb_range(start, end, walk);
    } else
    err = walk_pgd_range(start, end, walk);

    - if (vma && ops->post_vma)
    + if (ops->post_vma)
    ops->post_vma(walk);

    return err;
    @@ -450,9 +450,13 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
    if (!vma) { /* after the last vma */
    walk.vma = NULL;
    next = end;
    + if (ops->pte_hole)
    + err = ops->pte_hole(start, next, -1, &walk);
    } else if (start < vma->vm_start) { /* outside vma */
    walk.vma = NULL;
    next = min(end, vma->vm_start);
    + if (ops->pte_hole)
    + err = ops->pte_hole(start, next, -1, &walk);
    } else { /* inside vma */
    walk.vma = vma;
    next = min(end, vma->vm_end);
    @@ -470,9 +474,8 @@ int walk_page_range(struct mm_struct *mm, unsigned long start,
    }
    if (err < 0)
    break;
    - }
    - if (walk.vma || walk.ops->pte_hole)
    err = __walk_page_range(start, next, &walk);
    + }
    if (err)
    break;
    } while (start = next, start < end);
    @@ -501,9 +504,9 @@ int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
    if (start >= end || !walk.mm)
    return -EINVAL;

    - mmap_assert_locked(walk.mm);
    + mmap_assert_write_locked(walk.mm);

    - return __walk_page_range(start, end, &walk);
    + return walk_pgd_range(start, end, &walk);
    }

    int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
    diff --git a/mm/ptdump.c b/mm/ptdump.c
    index eea3d28d173c2..8adab455a68b3 100644
    --- a/mm/ptdump.c
    +++ b/mm/ptdump.c
    @@ -152,13 +152,13 @@ void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd)
    {
    const struct ptdump_range *range = st->range;

    - mmap_read_lock(mm);
    + mmap_write_lock(mm);
    while (range->start != range->end) {
    walk_page_range_novma(mm, range->start, range->end,
    &ptdump_ops, pgd, st);
    range++;
    }
    - mmap_read_unlock(mm);
    + mmap_write_unlock(mm);

    /* Flush out the last page */
    st->note_page(st, 0, -1, 0);
    --
    2.35.1


    \
     
     \ /
      Last update: 2022-09-06 16:18    [W:6.449 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site