lkml.org 
[lkml]   [2021]   [Mar]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v4 14/25] mm/filemap: Add lock_folio_killable
    Date
    This is like lock_page_killable() but for use by callers who
    know they have a folio. Convert __lock_page_killable() to be
    __lock_folio_killable(). This saves one call to compound_head() per
    contended call to lock_page_killable().

    __lock_folio_killable() is 20 bytes smaller than __lock_page_killable()
    was. lock_page_maybe_drop_mmap() shrinks by 68 bytes and
    __lock_page_or_retry() shrinks by 66 bytes. That's a total of 154 bytes
    of text saved.

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    ---
    include/linux/pagemap.h | 15 ++++++++++-----
    mm/filemap.c | 17 +++++++++--------
    2 files changed, 19 insertions(+), 13 deletions(-)

    diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
    index 034e41256340..0fa1a0338e54 100644
    --- a/include/linux/pagemap.h
    +++ b/include/linux/pagemap.h
    @@ -636,7 +636,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
    }

    void __lock_folio(struct folio *folio);
    -extern int __lock_page_killable(struct page *page);
    +int __lock_folio_killable(struct folio *folio);
    extern int __lock_page_async(struct page *page, struct wait_page_queue *wait);
    extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    unsigned int flags);
    @@ -676,6 +676,14 @@ static inline void lock_page(struct page *page)
    __lock_folio(folio);
    }

    +static inline int lock_folio_killable(struct folio *folio)
    +{
    + might_sleep();
    + if (!trylock_folio(folio))
    + return __lock_folio_killable(folio);
    + return 0;
    +}
    +
    /*
    * lock_page_killable is like lock_page but can be interrupted by fatal
    * signals. It returns 0 if it locked the page and -EINTR if it was
    @@ -683,10 +691,7 @@ static inline void lock_page(struct page *page)
    */
    static inline int lock_page_killable(struct page *page)
    {
    - might_sleep();
    - if (!trylock_page(page))
    - return __lock_page_killable(page);
    - return 0;
    + return lock_folio_killable(page_folio(page));
    }

    /*
    diff --git a/mm/filemap.c b/mm/filemap.c
    index 3e3e3c666b94..5acadffed25d 100644
    --- a/mm/filemap.c
    +++ b/mm/filemap.c
    @@ -1499,14 +1499,13 @@ void __lock_folio(struct folio *folio)
    }
    EXPORT_SYMBOL(__lock_folio);

    -int __lock_page_killable(struct page *__page)
    +int __lock_folio_killable(struct folio *folio)
    {
    - struct page *page = compound_head(__page);
    - wait_queue_head_t *q = page_waitqueue(page);
    - return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE,
    + wait_queue_head_t *q = page_waitqueue(&folio->page);
    + return wait_on_page_bit_common(q, &folio->page, PG_locked, TASK_KILLABLE,
    EXCLUSIVE);
    }
    -EXPORT_SYMBOL_GPL(__lock_page_killable);
    +EXPORT_SYMBOL_GPL(__lock_folio_killable);

    int __lock_page_async(struct page *page, struct wait_page_queue *wait)
    {
    @@ -1548,6 +1547,8 @@ int __lock_page_async(struct page *page, struct wait_page_queue *wait)
    int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    unsigned int flags)
    {
    + struct folio *folio = page_folio(page);
    +
    if (fault_flag_allow_retry_first(flags)) {
    /*
    * CAUTION! In this case, mmap_lock is not released
    @@ -1566,13 +1567,13 @@ int __lock_page_or_retry(struct page *page, struct mm_struct *mm,
    if (flags & FAULT_FLAG_KILLABLE) {
    int ret;

    - ret = __lock_page_killable(page);
    + ret = __lock_folio_killable(folio);
    if (ret) {
    mmap_read_unlock(mm);
    return 0;
    }
    } else {
    - __lock_folio(page_folio(page));
    + __lock_folio(folio);
    }

    return 1;
    @@ -2734,7 +2735,7 @@ static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page,

    *fpin = maybe_unlock_mmap_for_io(vmf, *fpin);
    if (vmf->flags & FAULT_FLAG_KILLABLE) {
    - if (__lock_page_killable(&folio->page)) {
    + if (__lock_folio_killable(folio)) {
    /*
    * We didn't have the right flags to drop the mmap_lock,
    * but all fault_handlers only check for fatal signals
    --
    2.30.0
    \
     
     \ /
      Last update: 2021-03-05 05:24    [W:4.090 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site