lkml.org 
[lkml]   [2020]   [Feb]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v7 01/24] mm: Move readahead prototypes from mm.h
    Date
    From: "Matthew Wilcox (Oracle)" <willy@infradead.org>

    The readahead code is part of the page cache so should be found in the
    pagemap.h file. force_page_cache_readahead is only used within mm,
    so move it to mm/internal.h instead. Remove the parameter names where
    they add no value, and rename the ones which were actively misleading.

    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
    ---
    block/blk-core.c | 1 +
    include/linux/mm.h | 19 -------------------
    include/linux/pagemap.h | 8 ++++++++
    mm/fadvise.c | 2 ++
    mm/internal.h | 2 ++
    5 files changed, 13 insertions(+), 19 deletions(-)

    diff --git a/block/blk-core.c b/block/blk-core.c
    index 089e890ab208..41417bb93634 100644
    --- a/block/blk-core.c
    +++ b/block/blk-core.c
    @@ -20,6 +20,7 @@
    #include <linux/blk-mq.h>
    #include <linux/highmem.h>
    #include <linux/mm.h>
    +#include <linux/pagemap.h>
    #include <linux/kernel_stat.h>
    #include <linux/string.h>
    #include <linux/init.h>
    diff --git a/include/linux/mm.h b/include/linux/mm.h
    index 52269e56c514..68dcda9a2112 100644
    --- a/include/linux/mm.h
    +++ b/include/linux/mm.h
    @@ -2401,25 +2401,6 @@ extern vm_fault_t filemap_page_mkwrite(struct vm_fault *vmf);
    int __must_check write_one_page(struct page *page);
    void task_dirty_inc(struct task_struct *tsk);

    -/* readahead.c */
    -#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
    -
    -int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
    - pgoff_t offset, unsigned long nr_to_read);
    -
    -void page_cache_sync_readahead(struct address_space *mapping,
    - struct file_ra_state *ra,
    - struct file *filp,
    - pgoff_t offset,
    - unsigned long size);
    -
    -void page_cache_async_readahead(struct address_space *mapping,
    - struct file_ra_state *ra,
    - struct file *filp,
    - struct page *pg,
    - pgoff_t offset,
    - unsigned long size);
    -
    extern unsigned long stack_guard_gap;
    /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
    extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
    diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
    index ccb14b6a16b5..24894b9b90c9 100644
    --- a/include/linux/pagemap.h
    +++ b/include/linux/pagemap.h
    @@ -614,6 +614,14 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask);
    void delete_from_page_cache_batch(struct address_space *mapping,
    struct pagevec *pvec);

    +#define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE)
    +
    +void page_cache_sync_readahead(struct address_space *, struct file_ra_state *,
    + struct file *, pgoff_t index, unsigned long req_count);
    +void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
    + struct file *, struct page *, pgoff_t index,
    + unsigned long req_count);
    +
    /*
    * Like add_to_page_cache_locked, but used to add newly allocated pages:
    * the page is new, so we can just run __SetPageLocked() against it.
    diff --git a/mm/fadvise.c b/mm/fadvise.c
    index 4f17c83db575..3efebfb9952c 100644
    --- a/mm/fadvise.c
    +++ b/mm/fadvise.c
    @@ -22,6 +22,8 @@

    #include <asm/unistd.h>

    +#include "internal.h"
    +
    /*
    * POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
    * deactivate the pages and clear PG_Referenced.
    diff --git a/mm/internal.h b/mm/internal.h
    index 3cf20ab3ca01..83f353e74654 100644
    --- a/mm/internal.h
    +++ b/mm/internal.h
    @@ -49,6 +49,8 @@ void unmap_page_range(struct mmu_gather *tlb,
    unsigned long addr, unsigned long end,
    struct zap_details *details);

    +int force_page_cache_readahead(struct address_space *, struct file *,
    + pgoff_t index, unsigned long nr_to_read);
    extern unsigned int __do_page_cache_readahead(struct address_space *mapping,
    struct file *filp, pgoff_t offset, unsigned long nr_to_read,
    unsigned long lookahead_size);
    --
    2.25.0
    \
     
     \ /
      Last update: 2020-02-19 22:04    [W:5.386 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site