lkml.org 
[lkml]   [2011]   [Sep]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[patch 07/10] mm: vmscan: convert global reclaim to per-memcg LRU lists
    Date
    The global per-zone LRU lists are about to go away on memcg-enabled
    kernels, global reclaim must be able to find its pages on the
    per-memcg LRU lists.

    Since the LRU pages of a zone are distributed over all existing memory
    cgroups, a scan target for a zone is complete when all memory cgroups
    are scanned for their proportional share of a zone's memory.

    The forced scanning of small scan targets from kswapd is limited to
    zones marked unreclaimable, otherwise kswapd can quickly overreclaim
    by force-scanning the LRU lists of multiple memory cgroups.

    Signed-off-by: Johannes Weiner <jweiner@redhat.com>
    Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
    Reviewed-by: Michal Hocko <mhocko@suse.cz>
    Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name>
    ---
    mm/vmscan.c | 39 ++++++++++++++++++++++-----------------
    1 files changed, 22 insertions(+), 17 deletions(-)

    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index 96acc1a..f411e7f 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -1887,7 +1887,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
    * latencies, so it's better to scan a minimum amount there as
    * well.
    */
    - if (current_is_kswapd())
    + if (current_is_kswapd() && mz->zone->all_unreclaimable)
    force_scan = true;
    if (!global_reclaim(sc))
    force_scan = true;
    @@ -2111,16 +2111,6 @@ static void shrink_zone(int priority, struct zone *zone,
    };
    struct mem_cgroup *memcg;

    - if (global_reclaim(sc)) {
    - struct mem_cgroup_zone mz = {
    - .mem_cgroup = NULL,
    - .zone = zone,
    - };
    -
    - shrink_mem_cgroup_zone(priority, &mz, sc);
    - return;
    - }
    -
    memcg = mem_cgroup_iter(root, NULL, &reclaim);
    do {
    struct mem_cgroup_zone mz = {
    @@ -2134,6 +2124,10 @@ static void shrink_zone(int priority, struct zone *zone,
    * scanned it with decreasing priority levels until
    * nr_to_reclaim had been reclaimed. This priority
    * cycle is thus over after a single memcg.
    + *
    + * Direct reclaim and kswapd, on the other hand, have
    + * to scan all memory cgroups to fulfill the overall
    + * scan target for the zone.
    */
    if (!global_reclaim(sc)) {
    mem_cgroup_iter_break(root, memcg);
    @@ -2451,13 +2445,24 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
    static void age_active_anon(struct zone *zone, struct scan_control *sc,
    int priority)
    {
    - struct mem_cgroup_zone mz = {
    - .mem_cgroup = NULL,
    - .zone = zone,
    - };
    + struct mem_cgroup *memcg;

    - if (inactive_anon_is_low(&mz))
    - shrink_active_list(SWAP_CLUSTER_MAX, &mz, sc, priority, 0);
    + if (!total_swap_pages)
    + return;
    +
    + memcg = mem_cgroup_iter(NULL, NULL, NULL);
    + do {
    + struct mem_cgroup_zone mz = {
    + .mem_cgroup = memcg,
    + .zone = zone,
    + };
    +
    + if (inactive_anon_is_low(&mz))
    + shrink_active_list(SWAP_CLUSTER_MAX, &mz,
    + sc, priority, 0);
    +
    + memcg = mem_cgroup_iter(NULL, memcg, NULL);
    + } while (memcg);
    }

    /*
    --
    1.7.6.2


    \
     
     \ /
      Last update: 2011-09-29 23:05    [W:0.034 / U:29.940 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site