lkml.org 
[lkml]   [2011]   [Apr]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC 1/8] Only isolate page we can handle
    Date
    There are some places to isolate lru page and I believe
    users of isolate_lru_page will be growing.
    The purpose of them is each different so part of isolated pages
    should put back to LRU, again.

    The problem is when we put back the page into LRU,
    we lose LRU ordering and the page is inserted at head of LRU list.
    It makes unnecessary LRU churning so that vm can evict working set pages
    rather than idle pages.

    This patch adds new filter mask when we isolate page in LRU.
    So, we don't isolate pages if we can't handle it.
    It could reduce LRU churning.

    This patch shouldn't change old behavior.
    It's just used by next patches.

    Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
    Cc: Mel Gorman <mgorman@suse.de>
    Cc: Rik van Riel <riel@redhat.com>
    Cc: Andrea Arcangeli <aarcange@redhat.com>
    Signed-off-by: Minchan Kim <minchan.kim@gmail.com>
    ---
    include/linux/swap.h | 3 ++-
    mm/compaction.c | 2 +-
    mm/memcontrol.c | 2 +-
    mm/vmscan.c | 26 ++++++++++++++++++++------
    4 files changed, 24 insertions(+), 9 deletions(-)

    diff --git a/include/linux/swap.h b/include/linux/swap.h
    index 384eb5f..baef4ad 100644
    --- a/include/linux/swap.h
    +++ b/include/linux/swap.h
    @@ -259,7 +259,8 @@ extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
    unsigned int swappiness,
    struct zone *zone,
    unsigned long *nr_scanned);
    -extern int __isolate_lru_page(struct page *page, int mode, int file);
    +extern int __isolate_lru_page(struct page *page, int mode, int file,
    + int not_dirty, int not_mapped);
    extern unsigned long shrink_all_memory(unsigned long nr_pages);
    extern int vm_swappiness;
    extern int remove_mapping(struct address_space *mapping, struct page *page);
    diff --git a/mm/compaction.c b/mm/compaction.c
    index 021a296..dea32e3 100644
    --- a/mm/compaction.c
    +++ b/mm/compaction.c
    @@ -335,7 +335,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
    }

    /* Try isolate the page */
    - if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
    + if (__isolate_lru_page(page, ISOLATE_BOTH, 0, 0, 0) != 0)
    continue;

    VM_BUG_ON(PageTransCompound(page));
    diff --git a/mm/memcontrol.c b/mm/memcontrol.c
    index c2776f1..471e7fd 100644
    --- a/mm/memcontrol.c
    +++ b/mm/memcontrol.c
    @@ -1193,7 +1193,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
    continue;

    scan++;
    - ret = __isolate_lru_page(page, mode, file);
    + ret = __isolate_lru_page(page, mode, file, 0, 0);
    switch (ret) {
    case 0:
    list_move(&page->lru, dst);
    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index b3a569f..71d2da9 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -954,10 +954,13 @@ keep_lumpy:
    *
    * page: page to consider
    * mode: one of the LRU isolation modes defined above
    - *
    + * file: page be on a file LRU
    + * not_dirty: page should be not dirty or not writeback
    + * not_mapped: page should be not mapped
    * returns 0 on success, -ve errno on failure.
    */
    -int __isolate_lru_page(struct page *page, int mode, int file)
    +int __isolate_lru_page(struct page *page, int mode, int file,
    + int not_dirty, int not_mapped)
    {
    int ret = -EINVAL;

    @@ -976,6 +979,12 @@ int __isolate_lru_page(struct page *page, int mode, int file)
    if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
    return ret;

    + if (not_dirty)
    + if (PageDirty(page) || PageWriteback(page))
    + return ret;
    + if (not_mapped)
    + if (page_mapped(page))
    + return ret;
    /*
    * When this function is being called for lumpy reclaim, we
    * initially look into all LRU pages, active, inactive and
    @@ -1016,12 +1025,15 @@ int __isolate_lru_page(struct page *page, int mode, int file)
    * @order: The caller's attempted allocation order
    * @mode: One of the LRU isolation modes
    * @file: True [1] if isolating file [!anon] pages
    + * @not_dirty: True [1] if isolating file [!dirty] pages
    + * @not_mapped: True [1] if isolating file [!mapped] pages
    *
    * returns how many pages were moved onto *@dst.
    */
    static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
    struct list_head *src, struct list_head *dst,
    - unsigned long *scanned, int order, int mode, int file)
    + unsigned long *scanned, int order, int mode, int file,
    + int not_dirty, int not_mapped)
    {
    unsigned long nr_taken = 0;
    unsigned long nr_lumpy_taken = 0;
    @@ -1041,7 +1053,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,

    VM_BUG_ON(!PageLRU(page));

    - switch (__isolate_lru_page(page, mode, file)) {
    + switch (__isolate_lru_page(page, mode, file,
    + not_dirty, not_mapped)) {
    case 0:
    list_move(&page->lru, dst);
    mem_cgroup_del_lru(page);
    @@ -1100,7 +1113,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
    !PageSwapCache(cursor_page))
    break;

    - if (__isolate_lru_page(cursor_page, mode, file) == 0) {
    + if (__isolate_lru_page(cursor_page, mode, file,
    + not_dirty, not_mapped) == 0) {
    list_move(&cursor_page->lru, dst);
    mem_cgroup_del_lru(cursor_page);
    nr_taken += hpage_nr_pages(page);
    @@ -1143,7 +1157,7 @@ static unsigned long isolate_pages_global(unsigned long nr,
    if (file)
    lru += LRU_FILE;
    return isolate_lru_pages(nr, &z->lru[lru].list, dst, scanned, order,
    - mode, file);
    + mode, file, 0, 0);
    }

    /*
    --
    1.7.1


    \
     
     \ /
      Last update: 2011-04-26 18:29    [W:4.145 / U:0.072 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site