lkml.org 
[lkml]   [2022]   [Mar]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 02/15] mm/hugetlb: take src_mm->write_protect_seq in copy_hugetlb_page_range()
    Date
    Let's do it just like copy_page_range(), taking the seqlock and making
    sure the mmap_lock is held in write mode.

    This allows for add a VM_BUG_ON to page_needs_cow_for_dma() and
    properly synchronizes cocnurrent fork() with GUP-fast of hugetlb pages,
    which will be relevant for further changes.

    Signed-off-by: David Hildenbrand <david@redhat.com>
    ---
    include/linux/mm.h | 4 ++++
    mm/hugetlb.c | 8 ++++++--
    2 files changed, 10 insertions(+), 2 deletions(-)

    diff --git a/include/linux/mm.h b/include/linux/mm.h
    index 9530f950f156..391b950e919d 100644
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -1323,6 +1323,8 @@ static inline bool is_cow_mapping(vm_flags_t flags)
    /*
    * This should most likely only be called during fork() to see whether we
    * should break the cow immediately for a page on the src mm.
    + *
    + * The caller has to hold the PT lock and the vma->vm_mm->->write_protect_seq.
    */
    static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
    struct page *page)
    @@ -1330,6 +1332,8 @@ static inline bool page_needs_cow_for_dma(struct vm_area_struct *vma,
    if (!is_cow_mapping(vma->vm_flags))
    return false;

    + VM_BUG_ON(!(raw_read_seqcount(&vma->vm_mm->write_protect_seq) & 1));
    +
    if (!test_bit(MMF_HAS_PINNED, &vma->vm_mm->flags))
    return false;

    diff --git a/mm/hugetlb.c b/mm/hugetlb.c
    index f294db835f4b..d3ce89697855 100644
    --- a/mm/hugetlb.c
    +++ b/mm/hugetlb.c
    @@ -4710,6 +4710,8 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
    vma->vm_start,
    vma->vm_end);
    mmu_notifier_invalidate_range_start(&range);
    + mmap_assert_write_locked(src);
    + raw_write_seqcount_begin(&src->write_protect_seq);
    } else {
    /*
    * For shared mappings i_mmap_rwsem must be held to call
    @@ -4842,10 +4844,12 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
    spin_unlock(dst_ptl);
    }

    - if (cow)
    + if (cow) {
    + raw_write_seqcount_end(&src->write_protect_seq);
    mmu_notifier_invalidate_range_end(&range);
    - else
    + } else {
    i_mmap_unlock_read(mapping);
    + }

    return ret;
    }
    --
    2.35.1
    \
     
     \ /
      Last update: 2022-09-17 16:16    [W:4.237 / U:0.344 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site