lkml.org 
[lkml]   [2009]   [Jul]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5/5] add shmem vmstat
    Date
    ChangeLog
    Since v1
    - Fixed misaccounting bug on page migration

    ========================
    Subject: [PATCH] add shmem vmstat

    Recently, We faced several OOM problem by plenty GEM cache. and generally,
    plenty Shmem/Tmpfs potentially makes memory shortage problem.

    We often use following calculation to know shmem pages,
    shmem = NR_ACTIVE_ANON + NR_INACTIVE_ANON - NR_ANON_PAGES
    but it is wrong expression. it doesn't consider isolated page and
    mlocked page.

    Then, This patch make explicit Shmem/Tmpfs vm-stat accounting.


    Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
    ---
    drivers/base/node.c | 2 ++
    fs/proc/meminfo.c | 2 ++
    include/linux/mmzone.h | 1 +
    mm/filemap.c | 4 ++++
    mm/migrate.c | 4 ++++
    mm/page_alloc.c | 5 ++++-
    mm/vmstat.c | 1 +
    7 files changed, 18 insertions(+), 1 deletion(-)

    Index: b/drivers/base/node.c
    ===================================================================
    --- a/drivers/base/node.c
    +++ b/drivers/base/node.c
    @@ -87,6 +87,7 @@ static ssize_t node_read_meminfo(struct
    "Node %d FilePages: %8lu kB\n"
    "Node %d Mapped: %8lu kB\n"
    "Node %d AnonPages: %8lu kB\n"
    + "Node %d Shmem: %8lu kB\n"
    "Node %d KernelStack: %8lu kB\n"
    "Node %d PageTables: %8lu kB\n"
    "Node %d NFS_Unstable: %8lu kB\n"
    @@ -121,6 +122,7 @@ static ssize_t node_read_meminfo(struct
    nid, K(node_page_state(nid, NR_FILE_PAGES)),
    nid, K(node_page_state(nid, NR_FILE_MAPPED)),
    nid, K(node_page_state(nid, NR_ANON_PAGES)),
    + nid, K(node_page_state(nid, NR_SHMEM)),
    nid, node_page_state(nid, NR_KERNEL_STACK) *
    THREAD_SIZE / 1024,
    nid, K(node_page_state(nid, NR_PAGETABLE)),
    Index: b/fs/proc/meminfo.c
    ===================================================================
    --- a/fs/proc/meminfo.c
    +++ b/fs/proc/meminfo.c
    @@ -83,6 +83,7 @@ static int meminfo_proc_show(struct seq_
    "Writeback: %8lu kB\n"
    "AnonPages: %8lu kB\n"
    "Mapped: %8lu kB\n"
    + "Shmem: %8lu kB\n"
    "Slab: %8lu kB\n"
    "SReclaimable: %8lu kB\n"
    "SUnreclaim: %8lu kB\n"
    @@ -129,6 +130,7 @@ static int meminfo_proc_show(struct seq_
    K(global_page_state(NR_WRITEBACK)),
    K(global_page_state(NR_ANON_PAGES)),
    K(global_page_state(NR_FILE_MAPPED)),
    + K(global_page_state(NR_SHMEM)),
    K(global_page_state(NR_SLAB_RECLAIMABLE) +
    global_page_state(NR_SLAB_UNRECLAIMABLE)),
    K(global_page_state(NR_SLAB_RECLAIMABLE)),
    Index: b/include/linux/mmzone.h
    ===================================================================
    --- a/include/linux/mmzone.h
    +++ b/include/linux/mmzone.h
    @@ -102,6 +102,7 @@ enum zone_stat_item {
    NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
    NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
    NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
    + NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
    #ifdef CONFIG_NUMA
    NUMA_HIT, /* allocated in intended node */
    NUMA_MISS, /* allocated in non intended node */
    Index: b/mm/filemap.c
    ===================================================================
    --- a/mm/filemap.c
    +++ b/mm/filemap.c
    @@ -120,6 +120,8 @@ void __remove_from_page_cache(struct pag
    page->mapping = NULL;
    mapping->nrpages--;
    __dec_zone_page_state(page, NR_FILE_PAGES);
    + if (PageSwapBacked(page))
    + __dec_zone_page_state(page, NR_SHMEM);
    BUG_ON(page_mapped(page));

    /*
    @@ -476,6 +478,8 @@ int add_to_page_cache_locked(struct page
    if (likely(!error)) {
    mapping->nrpages++;
    __inc_zone_page_state(page, NR_FILE_PAGES);
    + if (PageSwapBacked(page))
    + __inc_zone_page_state(page, NR_SHMEM);
    spin_unlock_irq(&mapping->tree_lock);
    } else {
    page->mapping = NULL;
    Index: b/mm/vmstat.c
    ===================================================================
    --- a/mm/vmstat.c
    +++ b/mm/vmstat.c
    @@ -646,6 +646,7 @@ static const char * const vmstat_text[]
    "nr_writeback_temp",
    "nr_isolated_anon",
    "nr_isolated_file",
    + "nr_shmem",
    #ifdef CONFIG_NUMA
    "numa_hit",
    "numa_miss",
    Index: b/mm/page_alloc.c
    ===================================================================
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -2120,7 +2120,7 @@ void show_free_areas(void)
    " unevictable:%lu\n"
    " dirty:%lu writeback:%lu unstable:%lu buffer:%lu\n"
    " free:%lu slab_reclaimable:%lu slab_unreclaimable:%lu\n"
    - " mapped:%lu pagetables:%lu bounce:%lu\n",
    + " mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n",
    global_page_state(NR_ACTIVE_ANON),
    global_page_state(NR_INACTIVE_ANON),
    global_page_state(NR_ISOLATED_ANON),
    @@ -2136,6 +2136,7 @@ void show_free_areas(void)
    global_page_state(NR_SLAB_RECLAIMABLE),
    global_page_state(NR_SLAB_UNRECLAIMABLE),
    global_page_state(NR_FILE_MAPPED),
    + global_page_state(NR_SHMEM),
    global_page_state(NR_PAGETABLE),
    global_page_state(NR_BOUNCE));

    @@ -2160,6 +2161,7 @@ void show_free_areas(void)
    " dirty:%lukB"
    " writeback:%lukB"
    " mapped:%lukB"
    + " shmem:%lukB"
    " slab_reclaimable:%lukB"
    " slab_unreclaimable:%lukB"
    " kernel_stack:%lukB"
    @@ -2187,6 +2189,7 @@ void show_free_areas(void)
    K(zone_page_state(zone, NR_FILE_DIRTY)),
    K(zone_page_state(zone, NR_WRITEBACK)),
    K(zone_page_state(zone, NR_FILE_MAPPED)),
    + K(zone_page_state(zone, NR_SHMEM)),
    K(zone_page_state(zone, NR_SLAB_RECLAIMABLE)),
    K(zone_page_state(zone, NR_SLAB_UNRECLAIMABLE)),
    zone_page_state(zone, NR_KERNEL_STACK) *
    Index: b/mm/migrate.c
    ===================================================================
    --- a/mm/migrate.c
    +++ b/mm/migrate.c
    @@ -312,7 +312,11 @@ static int migrate_page_move_mapping(str
    */
    __dec_zone_page_state(page, NR_FILE_PAGES);
    __inc_zone_page_state(newpage, NR_FILE_PAGES);
    + if (PageSwapBacked(page)) {
    + __dec_zone_page_state(page, NR_SHMEM);
    + __inc_zone_page_state(newpage, NR_SHMEM);

    + }
    spin_unlock_irq(&mapping->tree_lock);

    return 0;



    \
     
     \ /
      Last update: 2009-07-09 10:21    [W:4.102 / U:0.152 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site