lkml.org 
[lkml]   [2021]   [Aug]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v4 03/11] mm/vmscan.c: refactor shrink_node()
    From
    This patch refactors shrink_node(). This will make the upcoming
    changes to mm/vmscan.c more readable.

    Signed-off-by: Yu Zhao <yuzhao@google.com>
    Tested-by: Konstantin Kharlamov <Hi-Angel@yandex.ru>
    ---
    mm/vmscan.c | 186 +++++++++++++++++++++++++++-------------------------
    1 file changed, 98 insertions(+), 88 deletions(-)

    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index 4620df62f0ff..b6d14880bd76 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -2437,6 +2437,103 @@ enum scan_balance {
    SCAN_FILE,
    };

    +static void prepare_scan_count(pg_data_t *pgdat, struct scan_control *sc)
    +{
    + unsigned long file;
    + struct lruvec *target_lruvec;
    +
    + target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);
    +
    + /*
    + * Determine the scan balance between anon and file LRUs.
    + */
    + spin_lock_irq(&target_lruvec->lru_lock);
    + sc->anon_cost = target_lruvec->anon_cost;
    + sc->file_cost = target_lruvec->file_cost;
    + spin_unlock_irq(&target_lruvec->lru_lock);
    +
    + /*
    + * Target desirable inactive:active list ratios for the anon
    + * and file LRU lists.
    + */
    + if (!sc->force_deactivate) {
    + unsigned long refaults;
    +
    + refaults = lruvec_page_state(target_lruvec,
    + WORKINGSET_ACTIVATE_ANON);
    + if (refaults != target_lruvec->refaults[0] ||
    + inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
    + sc->may_deactivate |= DEACTIVATE_ANON;
    + else
    + sc->may_deactivate &= ~DEACTIVATE_ANON;
    +
    + /*
    + * When refaults are being observed, it means a new
    + * workingset is being established. Deactivate to get
    + * rid of any stale active pages quickly.
    + */
    + refaults = lruvec_page_state(target_lruvec,
    + WORKINGSET_ACTIVATE_FILE);
    + if (refaults != target_lruvec->refaults[1] ||
    + inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
    + sc->may_deactivate |= DEACTIVATE_FILE;
    + else
    + sc->may_deactivate &= ~DEACTIVATE_FILE;
    + } else
    + sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
    +
    + /*
    + * If we have plenty of inactive file pages that aren't
    + * thrashing, try to reclaim those first before touching
    + * anonymous pages.
    + */
    + file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
    + if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
    + sc->cache_trim_mode = 1;
    + else
    + sc->cache_trim_mode = 0;
    +
    + /*
    + * Prevent the reclaimer from falling into the cache trap: as
    + * cache pages start out inactive, every cache fault will tip
    + * the scan balance towards the file LRU. And as the file LRU
    + * shrinks, so does the window for rotation from references.
    + * This means we have a runaway feedback loop where a tiny
    + * thrashing file LRU becomes infinitely more attractive than
    + * anon pages. Try to detect this based on file LRU size.
    + */
    + if (!cgroup_reclaim(sc)) {
    + unsigned long total_high_wmark = 0;
    + unsigned long free, anon;
    + int z;
    +
    + free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
    + file = node_page_state(pgdat, NR_ACTIVE_FILE) +
    + node_page_state(pgdat, NR_INACTIVE_FILE);
    +
    + for (z = 0; z < MAX_NR_ZONES; z++) {
    + struct zone *zone = &pgdat->node_zones[z];
    +
    + if (!managed_zone(zone))
    + continue;
    +
    + total_high_wmark += high_wmark_pages(zone);
    + }
    +
    + /*
    + * Consider anon: if that's low too, this isn't a
    + * runaway file reclaim problem, but rather just
    + * extreme pressure. Reclaim as per usual then.
    + */
    + anon = node_page_state(pgdat, NR_INACTIVE_ANON);
    +
    + sc->file_is_tiny =
    + file + free <= total_high_wmark &&
    + !(sc->may_deactivate & DEACTIVATE_ANON) &&
    + anon >> sc->priority;
    + }
    +}
    +
    /*
    * Determine how aggressively the anon and file LRU lists should be
    * scanned. The relative value of each set of LRU lists is determined
    @@ -2882,7 +2979,6 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
    unsigned long nr_reclaimed, nr_scanned;
    struct lruvec *target_lruvec;
    bool reclaimable = false;
    - unsigned long file;

    target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat);

    @@ -2892,93 +2988,7 @@ static void shrink_node(pg_data_t *pgdat, struct scan_control *sc)
    nr_reclaimed = sc->nr_reclaimed;
    nr_scanned = sc->nr_scanned;

    - /*
    - * Determine the scan balance between anon and file LRUs.
    - */
    - spin_lock_irq(&target_lruvec->lru_lock);
    - sc->anon_cost = target_lruvec->anon_cost;
    - sc->file_cost = target_lruvec->file_cost;
    - spin_unlock_irq(&target_lruvec->lru_lock);
    -
    - /*
    - * Target desirable inactive:active list ratios for the anon
    - * and file LRU lists.
    - */
    - if (!sc->force_deactivate) {
    - unsigned long refaults;
    -
    - refaults = lruvec_page_state(target_lruvec,
    - WORKINGSET_ACTIVATE_ANON);
    - if (refaults != target_lruvec->refaults[0] ||
    - inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
    - sc->may_deactivate |= DEACTIVATE_ANON;
    - else
    - sc->may_deactivate &= ~DEACTIVATE_ANON;
    -
    - /*
    - * When refaults are being observed, it means a new
    - * workingset is being established. Deactivate to get
    - * rid of any stale active pages quickly.
    - */
    - refaults = lruvec_page_state(target_lruvec,
    - WORKINGSET_ACTIVATE_FILE);
    - if (refaults != target_lruvec->refaults[1] ||
    - inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
    - sc->may_deactivate |= DEACTIVATE_FILE;
    - else
    - sc->may_deactivate &= ~DEACTIVATE_FILE;
    - } else
    - sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE;
    -
    - /*
    - * If we have plenty of inactive file pages that aren't
    - * thrashing, try to reclaim those first before touching
    - * anonymous pages.
    - */
    - file = lruvec_page_state(target_lruvec, NR_INACTIVE_FILE);
    - if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE))
    - sc->cache_trim_mode = 1;
    - else
    - sc->cache_trim_mode = 0;
    -
    - /*
    - * Prevent the reclaimer from falling into the cache trap: as
    - * cache pages start out inactive, every cache fault will tip
    - * the scan balance towards the file LRU. And as the file LRU
    - * shrinks, so does the window for rotation from references.
    - * This means we have a runaway feedback loop where a tiny
    - * thrashing file LRU becomes infinitely more attractive than
    - * anon pages. Try to detect this based on file LRU size.
    - */
    - if (!cgroup_reclaim(sc)) {
    - unsigned long total_high_wmark = 0;
    - unsigned long free, anon;
    - int z;
    -
    - free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES);
    - file = node_page_state(pgdat, NR_ACTIVE_FILE) +
    - node_page_state(pgdat, NR_INACTIVE_FILE);
    -
    - for (z = 0; z < MAX_NR_ZONES; z++) {
    - struct zone *zone = &pgdat->node_zones[z];
    - if (!managed_zone(zone))
    - continue;
    -
    - total_high_wmark += high_wmark_pages(zone);
    - }
    -
    - /*
    - * Consider anon: if that's low too, this isn't a
    - * runaway file reclaim problem, but rather just
    - * extreme pressure. Reclaim as per usual then.
    - */
    - anon = node_page_state(pgdat, NR_INACTIVE_ANON);
    -
    - sc->file_is_tiny =
    - file + free <= total_high_wmark &&
    - !(sc->may_deactivate & DEACTIVATE_ANON) &&
    - anon >> sc->priority;
    - }
    + prepare_scan_count(pgdat, sc);

    shrink_node_memcgs(pgdat, sc);

    --
    2.33.0.rc1.237.g0d66db33f3-goog
    \
     
     \ /
      Last update: 2021-08-18 08:33    [W:3.597 / U:0.328 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site