lkml.org 
[lkml]   [2020]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.7 239/244] mm/memory.c: properly pte_offset_map_lock/unlock in vm_insert_pages()
    Date
    From: Arjun Roy <arjunroy@google.com>

    commit 7f70c2a68a51496289df163f6969d4db7c383f30 upstream.

    Calls to pte_offset_map() in vm_insert_pages() are erroneously not
    matched with a call to pte_unmap(). This would cause problems on
    architectures where that is not a no-op.

    This patch does away with the non-traditional locking in the existing
    code, and instead uses pte_offset_map_lock/unlock() as usual,
    incrementing PTE as necessary. The PTE pointer is kept within bounds
    since we clamp it with PTRS_PER_PTE.

    Link: http://lkml.kernel.org/r/20200618220446.20284-1-arjunroy.kdev@gmail.com
    Fixes: 8cd3984d81d5 ("mm/memory.c: add vm_insert_pages()")
    Signed-off-by: Arjun Roy <arjunroy@google.com>
    Acked-by: David Rientjes <rientjes@google.com>
    Cc: Eric Dumazet <edumazet@google.com>
    Cc: Hugh Dickins <hughd@google.com>
    Cc: Soheil Hassas Yeganeh <soheil@google.com>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    mm/memory.c | 21 +++++++++++----------
    1 file changed, 11 insertions(+), 10 deletions(-)

    --- a/mm/memory.c
    +++ b/mm/memory.c
    @@ -1501,7 +1501,7 @@ out:
    }

    #ifdef pte_index
    -static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
    +static int insert_page_in_batch_locked(struct mm_struct *mm, pte_t *pte,
    unsigned long addr, struct page *page, pgprot_t prot)
    {
    int err;
    @@ -1509,8 +1509,9 @@ static int insert_page_in_batch_locked(s
    if (!page_count(page))
    return -EINVAL;
    err = validate_page_before_insert(page);
    - return err ? err : insert_page_into_pte_locked(
    - mm, pte_offset_map(pmd, addr), addr, page, prot);
    + if (err)
    + return err;
    + return insert_page_into_pte_locked(mm, pte, addr, page, prot);
    }

    /* insert_pages() amortizes the cost of spinlock operations
    @@ -1520,7 +1521,8 @@ static int insert_pages(struct vm_area_s
    struct page **pages, unsigned long *num, pgprot_t prot)
    {
    pmd_t *pmd = NULL;
    - spinlock_t *pte_lock = NULL;
    + pte_t *start_pte, *pte;
    + spinlock_t *pte_lock;
    struct mm_struct *const mm = vma->vm_mm;
    unsigned long curr_page_idx = 0;
    unsigned long remaining_pages_total = *num;
    @@ -1539,18 +1541,17 @@ more:
    ret = -ENOMEM;
    if (pte_alloc(mm, pmd))
    goto out;
    - pte_lock = pte_lockptr(mm, pmd);

    while (pages_to_write_in_pmd) {
    int pte_idx = 0;
    const int batch_size = min_t(int, pages_to_write_in_pmd, 8);

    - spin_lock(pte_lock);
    - for (; pte_idx < batch_size; ++pte_idx) {
    - int err = insert_page_in_batch_locked(mm, pmd,
    + start_pte = pte_offset_map_lock(mm, pmd, addr, &pte_lock);
    + for (pte = start_pte; pte_idx < batch_size; ++pte, ++pte_idx) {
    + int err = insert_page_in_batch_locked(mm, pte,
    addr, pages[curr_page_idx], prot);
    if (unlikely(err)) {
    - spin_unlock(pte_lock);
    + pte_unmap_unlock(start_pte, pte_lock);
    ret = err;
    remaining_pages_total -= pte_idx;
    goto out;
    @@ -1558,7 +1559,7 @@ more:
    addr += PAGE_SIZE;
    ++curr_page_idx;
    }
    - spin_unlock(pte_lock);
    + pte_unmap_unlock(start_pte, pte_lock);
    pages_to_write_in_pmd -= batch_size;
    remaining_pages_total -= batch_size;
    }

    \
     
     \ /
      Last update: 2020-07-20 18:17    [W:4.077 / U:0.056 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site