lkml.org 
[lkml]   [2017]   [Feb]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [RFC PATCH] mm/vmscan: fix high cpu usage of kswapd if there
    On Wed 22-02-17 15:16:57, Johannes Weiner wrote:
    [...]
    > And a follow-up: once it gives up, when should kswapd return to work?
    > We used to reset NR_PAGES_SCANNED whenever a page gets freed. But
    > that's a branch in a common allocator path, just to recover kswapd - a
    > latency tool, not a necessity for functional correctness - from a
    > situation that's exceedingly pretty rare. How about we leave it
    > disabled until a direct reclaimer manages to free something?

    Hmm, I guess we also want to reset the counter after OOM invocation
    which might free a lot of memory and we do not want to wait for the
    direct reclaim to resurrect the kswapd. Something like the following on
    top of yours
    ---
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index ddf27c435225..6be11c18551f 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -3446,7 +3446,8 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
    return page;
    }

    -static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)
    +static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac,
    + bool force)
    {
    struct zoneref *z;
    struct zone *zone;
    @@ -3454,8 +3455,11 @@ static void wake_all_kswapds(unsigned int order, const struct alloc_context *ac)

    for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
    ac->high_zoneidx, ac->nodemask) {
    - if (last_pgdat != zone->zone_pgdat)
    + if (last_pgdat != zone->zone_pgdat) {
    + if (force)
    + zone->zone_pgdat->kswapd_failed_runs = 0;
    wakeup_kswapd(zone, order, ac->high_zoneidx);
    + }
    last_pgdat = zone->zone_pgdat;
    }
    }
    @@ -3640,6 +3644,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
    unsigned long alloc_start = jiffies;
    unsigned int stall_timeout = 10 * HZ;
    unsigned int cpuset_mems_cookie;
    + bool kick_kswapd = false;

    /*
    * In the slowpath, we sanity check order to avoid ever trying to
    @@ -3685,7 +3690,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
    goto nopage;

    if (gfp_mask & __GFP_KSWAPD_RECLAIM)
    - wake_all_kswapds(order, ac);
    + wake_all_kswapds(order, ac, false);

    /*
    * The adjusted alloc_flags might result in immediate success, so try
    @@ -3738,7 +3743,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
    retry:
    /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
    if (gfp_mask & __GFP_KSWAPD_RECLAIM)
    - wake_all_kswapds(order, ac);
    + wake_all_kswapds(order, ac, kick_kswapd);
    + kick_kswapd = false;

    if (gfp_pfmemalloc_allowed(gfp_mask))
    alloc_flags = ALLOC_NO_WATERMARKS;
    @@ -3833,6 +3839,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
    /* Retry as long as the OOM killer is making progress */
    if (did_some_progress) {
    no_progress_loops = 0;
    + kick_kswapd = true;
    goto retry;
    }

    --
    Michal Hocko
    SUSE Labs
    \
     
     \ /
      Last update: 2017-02-23 12:16    [W:3.958 / U:0.596 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site