lkml.org 
[lkml]   [2021]   [Mar]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[v9 PATCH 10/13] mm: vmscan: use per memcg nr_deferred of shrinker
    Date
    Use per memcg's nr_deferred for memcg aware shrinkers.  The shrinker's nr_deferred
    will be used in the following cases:
    1. Non memcg aware shrinkers
    2. !CONFIG_MEMCG
    3. memcg is disabled by boot parameter

    Acked-by: Roman Gushchin <guro@fb.com>
    Acked-by: Kirill Tkhai <ktkhai@virtuozzo.com>
    Reviewed-by: Shakeel Butt <shakeelb@google.com>
    Signed-off-by: Yang Shi <shy828301@gmail.com>
    ---
    mm/vmscan.c | 78 ++++++++++++++++++++++++++++++++++++++++++++---------
    1 file changed, 66 insertions(+), 12 deletions(-)

    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index ae82afe6cec6..326f0e0c4356 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -374,6 +374,24 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
    idr_remove(&shrinker_idr, id);
    }

    +static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
    + struct mem_cgroup *memcg)
    +{
    + struct shrinker_info *info;
    +
    + info = shrinker_info_protected(memcg, nid);
    + return atomic_long_xchg(&info->nr_deferred[shrinker->id], 0);
    +}
    +
    +static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
    + struct mem_cgroup *memcg)
    +{
    + struct shrinker_info *info;
    +
    + info = shrinker_info_protected(memcg, nid);
    + return atomic_long_add_return(nr, &info->nr_deferred[shrinker->id]);
    +}
    +
    static bool cgroup_reclaim(struct scan_control *sc)
    {
    return sc->target_mem_cgroup;
    @@ -412,6 +430,18 @@ static void unregister_memcg_shrinker(struct shrinker *shrinker)
    {
    }

    +static long xchg_nr_deferred_memcg(int nid, struct shrinker *shrinker,
    + struct mem_cgroup *memcg)
    +{
    + return 0;
    +}
    +
    +static long add_nr_deferred_memcg(long nr, int nid, struct shrinker *shrinker,
    + struct mem_cgroup *memcg)
    +{
    + return 0;
    +}
    +
    static bool cgroup_reclaim(struct scan_control *sc)
    {
    return false;
    @@ -423,6 +453,39 @@ static bool writeback_throttling_sane(struct scan_control *sc)
    }
    #endif

    +static long xchg_nr_deferred(struct shrinker *shrinker,
    + struct shrink_control *sc)
    +{
    + int nid = sc->nid;
    +
    + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    + nid = 0;
    +
    + if (sc->memcg &&
    + (shrinker->flags & SHRINKER_MEMCG_AWARE))
    + return xchg_nr_deferred_memcg(nid, shrinker,
    + sc->memcg);
    +
    + return atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
    +}
    +
    +
    +static long add_nr_deferred(long nr, struct shrinker *shrinker,
    + struct shrink_control *sc)
    +{
    + int nid = sc->nid;
    +
    + if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    + nid = 0;
    +
    + if (sc->memcg &&
    + (shrinker->flags & SHRINKER_MEMCG_AWARE))
    + return add_nr_deferred_memcg(nr, nid, shrinker,
    + sc->memcg);
    +
    + return atomic_long_add_return(nr, &shrinker->nr_deferred[nid]);
    +}
    +
    /*
    * This misses isolated pages which are not accounted for to save counters.
    * As the data only determines if reclaim or compaction continues, it is
    @@ -559,14 +622,10 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
    long freeable;
    long nr;
    long new_nr;
    - int nid = shrinkctl->nid;
    long batch_size = shrinker->batch ? shrinker->batch
    : SHRINK_BATCH;
    long scanned = 0, next_deferred;

    - if (!(shrinker->flags & SHRINKER_NUMA_AWARE))
    - nid = 0;
    -
    freeable = shrinker->count_objects(shrinker, shrinkctl);
    if (freeable == 0 || freeable == SHRINK_EMPTY)
    return freeable;
    @@ -576,7 +635,7 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
    * and zero it so that other concurrent shrinker invocations
    * don't also do this scanning work.
    */
    - nr = atomic_long_xchg(&shrinker->nr_deferred[nid], 0);
    + nr = xchg_nr_deferred(shrinker, shrinkctl);

    total_scan = nr;
    if (shrinker->seeks) {
    @@ -667,14 +726,9 @@ static unsigned long do_shrink_slab(struct shrink_control *shrinkctl,
    next_deferred = 0;
    /*
    * move the unused scan count back into the shrinker in a
    - * manner that handles concurrent updates. If we exhausted the
    - * scan, there is no need to do an update.
    + * manner that handles concurrent updates.
    */
    - if (next_deferred > 0)
    - new_nr = atomic_long_add_return(next_deferred,
    - &shrinker->nr_deferred[nid]);
    - else
    - new_nr = atomic_long_read(&shrinker->nr_deferred[nid]);
    + new_nr = add_nr_deferred(next_deferred, shrinker, shrinkctl);

    trace_mm_shrink_slab_end(shrinker, shrinkctl->nid, freed, nr, new_nr, total_scan);
    return freed;
    --
    2.26.2
    \
     
     \ /
      Last update: 2021-03-10 18:49    [W:2.661 / U:1.196 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site