lkml.org 
[lkml]   [2016]   [Mar]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3.19.y-ckt 068/196] mm: replace vma_lock_anon_vma with anon_vma_lock_read/write
    Date
    3.19.8-ckt16 -stable review patch.  If anyone has any objections, please let me know.

    ---8<------------------------------------------------------------

    From: Konstantin Khlebnikov <koct9i@gmail.com>

    commit 12352d3cae2cebe18805a91fab34b534d7444231 upstream.

    Sequence vma_lock_anon_vma() - vma_unlock_anon_vma() isn't safe if
    anon_vma appeared between lock and unlock. We have to check anon_vma
    first or call anon_vma_prepare() to be sure that it's here. There are
    only few users of these legacy helpers. Let's get rid of them.

    This patch fixes anon_vma lock imbalance in validate_mm(). Write lock
    isn't required here, read lock is enough.

    And reorders expand_downwards/expand_upwards: security_mmap_addr() and
    wrapping-around check don't have to be under anon vma lock.

    Link: https://lkml.kernel.org/r/CACT4Y+Y908EjM2z=706dv4rV6dWtxTLK9nFg9_7DhRMLppBo2g@mail.gmail.com
    Signed-off-by: Konstantin Khlebnikov <koct9i@gmail.com>
    Reported-by: Dmitry Vyukov <dvyukov@google.com>
    Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
    Cc: Andrea Arcangeli <aarcange@redhat.com>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
    Signed-off-by: Kamal Mostafa <kamal@canonical.com>
    ---
    include/linux/rmap.h | 14 -------------
    mm/mmap.c | 55 ++++++++++++++++++++++++----------------------------
    2 files changed, 25 insertions(+), 44 deletions(-)

    diff --git a/include/linux/rmap.h b/include/linux/rmap.h
    index d9d7e7e..89195a45 100644
    --- a/include/linux/rmap.h
    +++ b/include/linux/rmap.h
    @@ -113,20 +113,6 @@ static inline struct anon_vma *page_anon_vma(struct page *page)
    return page_rmapping(page);
    }

    -static inline void vma_lock_anon_vma(struct vm_area_struct *vma)
    -{
    - struct anon_vma *anon_vma = vma->anon_vma;
    - if (anon_vma)
    - down_write(&anon_vma->root->rwsem);
    -}
    -
    -static inline void vma_unlock_anon_vma(struct vm_area_struct *vma)
    -{
    - struct anon_vma *anon_vma = vma->anon_vma;
    - if (anon_vma)
    - up_write(&anon_vma->root->rwsem);
    -}
    -
    static inline void anon_vma_lock_write(struct anon_vma *anon_vma)
    {
    down_write(&anon_vma->root->rwsem);
    diff --git a/mm/mmap.c b/mm/mmap.c
    index 0bc66f1..5d1c930 100644
    --- a/mm/mmap.c
    +++ b/mm/mmap.c
    @@ -443,12 +443,16 @@ static void validate_mm(struct mm_struct *mm)
    struct vm_area_struct *vma = mm->mmap;

    while (vma) {
    + struct anon_vma *anon_vma = vma->anon_vma;
    struct anon_vma_chain *avc;

    - vma_lock_anon_vma(vma);
    - list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
    - anon_vma_interval_tree_verify(avc);
    - vma_unlock_anon_vma(vma);
    + if (anon_vma) {
    + anon_vma_lock_read(anon_vma);
    + list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
    + anon_vma_interval_tree_verify(avc);
    + anon_vma_unlock_read(anon_vma);
    + }
    +
    highest_address = vma->vm_end;
    vma = vma->vm_next;
    i++;
    @@ -2150,32 +2154,27 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
    */
    int expand_upwards(struct vm_area_struct *vma, unsigned long address)
    {
    - int error;
    + int error = 0;

    if (!(vma->vm_flags & VM_GROWSUP))
    return -EFAULT;

    - /*
    - * We must make sure the anon_vma is allocated
    - * so that the anon_vma locking is not a noop.
    - */
    + /* Guard against wrapping around to address 0. */
    + if (address < PAGE_ALIGN(address+4))
    + address = PAGE_ALIGN(address+4);
    + else
    + return -ENOMEM;
    +
    + /* We must make sure the anon_vma is allocated. */
    if (unlikely(anon_vma_prepare(vma)))
    return -ENOMEM;
    - vma_lock_anon_vma(vma);

    /*
    * vma->vm_start/vm_end cannot change under us because the caller
    * is required to hold the mmap_sem in read mode. We need the
    * anon_vma lock to serialize against concurrent expand_stacks.
    - * Also guard against wrapping around to address 0.
    */
    - if (address < PAGE_ALIGN(address+4))
    - address = PAGE_ALIGN(address+4);
    - else {
    - vma_unlock_anon_vma(vma);
    - return -ENOMEM;
    - }
    - error = 0;
    + anon_vma_lock_write(vma->anon_vma);

    /* Somebody else might have raced and expanded it already */
    if (address > vma->vm_end) {
    @@ -2193,7 +2192,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
    * updates, but we only hold a shared mmap_sem
    * lock here, so we need to protect against
    * concurrent vma expansions.
    - * vma_lock_anon_vma() doesn't help here, as
    + * anon_vma_lock_write() doesn't help here, as
    * we don't guarantee that all growable vmas
    * in a mm share the same root anon vma.
    * So, we reuse mm->page_table_lock to guard
    @@ -2213,7 +2212,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
    }
    }
    }
    - vma_unlock_anon_vma(vma);
    + anon_vma_unlock_write(vma->anon_vma);
    khugepaged_enter_vma_merge(vma, vma->vm_flags);
    validate_mm(vma->vm_mm);
    return error;
    @@ -2228,25 +2227,21 @@ int expand_downwards(struct vm_area_struct *vma,
    {
    int error;

    - /*
    - * We must make sure the anon_vma is allocated
    - * so that the anon_vma locking is not a noop.
    - */
    - if (unlikely(anon_vma_prepare(vma)))
    - return -ENOMEM;
    -
    address &= PAGE_MASK;
    error = security_mmap_addr(address);
    if (error)
    return error;

    - vma_lock_anon_vma(vma);
    + /* We must make sure the anon_vma is allocated. */
    + if (unlikely(anon_vma_prepare(vma)))
    + return -ENOMEM;

    /*
    * vma->vm_start/vm_end cannot change under us because the caller
    * is required to hold the mmap_sem in read mode. We need the
    * anon_vma lock to serialize against concurrent expand_stacks.
    */
    + anon_vma_lock_write(vma->anon_vma);

    /* Somebody else might have raced and expanded it already */
    if (address < vma->vm_start) {
    @@ -2264,7 +2259,7 @@ int expand_downwards(struct vm_area_struct *vma,
    * updates, but we only hold a shared mmap_sem
    * lock here, so we need to protect against
    * concurrent vma expansions.
    - * vma_lock_anon_vma() doesn't help here, as
    + * anon_vma_lock_write() doesn't help here, as
    * we don't guarantee that all growable vmas
    * in a mm share the same root anon vma.
    * So, we reuse mm->page_table_lock to guard
    @@ -2282,7 +2277,7 @@ int expand_downwards(struct vm_area_struct *vma,
    }
    }
    }
    - vma_unlock_anon_vma(vma);
    + anon_vma_unlock_write(vma->anon_vma);
    khugepaged_enter_vma_merge(vma, vma->vm_flags);
    validate_mm(vma->vm_mm);
    return error;
    --
    2.7.0
    \
     
     \ /
      Last update: 2016-03-09 02:21    [W:5.384 / U:0.028 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site