lkml.org 
[lkml]   [2016]   [Jul]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 29/31] mm: vmstat: account per-zone stalls and pages skipped during reclaim
    Date
    The vmstat allocstall was fairly useful in the general sense but
    node-based LRUs change that. It's important to know if a stall was for an
    address-limited allocation request as this will require skipping pages
    from other zones. This patch adds pgstall_* counters to replace
    allocstall. The sum of the counters will equal the old allocstall so it
    can be trivially recalculated. A high number of address-limited
    allocation requests may result in a lot of useless LRU scanning for
    suitable pages.

    As address-limited allocations require pages to be skipped, it's important
    to know how much useless LRU scanning took place so this patch adds
    pgskip* counters. This yields the following model

    1. The number of address-space limited stalls can be accounted for (pgstall)
    2. The amount of useless work required to reclaim the data is accounted (pgskip)
    3. The total number of scans is available from pgscan_kswapd and pgscan_direct
    so from that the ratio of useful to useless scans can be calculated.

    Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
    Acked-by: Vlastimil Babka <vbabka@suse.cz>
    ---
    include/linux/vm_event_item.h | 4 +++-
    mm/vmscan.c | 15 +++++++++++++--
    mm/vmstat.c | 3 ++-
    3 files changed, 18 insertions(+), 4 deletions(-)

    diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
    index 1798ff542517..6d47f66f0e9c 100644
    --- a/include/linux/vm_event_item.h
    +++ b/include/linux/vm_event_item.h
    @@ -23,6 +23,8 @@

    enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
    FOR_ALL_ZONES(PGALLOC),
    + FOR_ALL_ZONES(PGSTALL),
    + FOR_ALL_ZONES(PGSCAN_SKIP),
    PGFREE, PGACTIVATE, PGDEACTIVATE,
    PGFAULT, PGMAJFAULT,
    PGLAZYFREED,
    @@ -37,7 +39,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
    #endif
    PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL,
    KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY,
    - PAGEOUTRUN, ALLOCSTALL, PGROTATED,
    + PAGEOUTRUN, PGROTATED,
    DROP_PAGECACHE, DROP_SLAB,
    #ifdef CONFIG_NUMA_BALANCING
    NUMA_PTE_UPDATES,
    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index a687cfa91166..151c30dd27e2 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -1394,6 +1394,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
    struct list_head *src = &lruvec->lists[lru];
    unsigned long nr_taken = 0;
    unsigned long nr_zone_taken[MAX_NR_ZONES] = { 0 };
    + unsigned long nr_skipped[MAX_NR_ZONES] = { 0, };
    unsigned long scan, nr_pages;
    LIST_HEAD(pages_skipped);

    @@ -1408,6 +1409,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,

    if (page_zonenum(page) > sc->reclaim_idx) {
    list_move(&page->lru, &pages_skipped);
    + nr_skipped[page_zonenum(page)]++;
    continue;
    }

    @@ -1436,8 +1438,17 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
    * scanning would soon rescan the same pages to skip and put the
    * system at risk of premature OOM.
    */
    - if (!list_empty(&pages_skipped))
    + if (!list_empty(&pages_skipped)) {
    + int zid;
    +
    list_splice(&pages_skipped, src);
    + for (zid = 0; zid < MAX_NR_ZONES; zid++) {
    + if (!nr_skipped[zid])
    + continue;
    +
    + __count_zid_vm_events(PGSCAN_SKIP, zid, nr_skipped[zid]);
    + }
    + }
    *nr_scanned = scan;
    trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
    nr_taken, mode, is_file_lru(lru));
    @@ -2676,7 +2687,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
    delayacct_freepages_start();

    if (global_reclaim(sc))
    - count_vm_event(ALLOCSTALL);
    + __count_zid_vm_events(PGSTALL, sc->reclaim_idx, 1);

    do {
    vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup,
    diff --git a/mm/vmstat.c b/mm/vmstat.c
    index 905ea9ae2d5a..b9a9844e3142 100644
    --- a/mm/vmstat.c
    +++ b/mm/vmstat.c
    @@ -970,6 +970,8 @@ const char * const vmstat_text[] = {
    "pswpout",

    TEXTS_FOR_ZONES("pgalloc")
    + TEXTS_FOR_ZONES("pgstall")
    + TEXTS_FOR_ZONES("pgskip")

    "pgfree",
    "pgactivate",
    @@ -995,7 +997,6 @@ const char * const vmstat_text[] = {
    "kswapd_low_wmark_hit_quickly",
    "kswapd_high_wmark_hit_quickly",
    "pageoutrun",
    - "allocstall",

    "pgrotated",

    --
    2.6.4
    \
     
     \ /
      Last update: 2016-07-01 22:41    [W:3.335 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site