lkml.org 
[lkml]   [2009]   [Dec]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[cleanup][PATCH 1/8] vmscan: Make shrink_zone_begin/end helper function
    Date
    concurrent_reclaimers limitation related code made mess to shrink_zone.
    To introduce helper function increase readability.

    Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
    ---
    mm/vmscan.c | 58 +++++++++++++++++++++++++++++++++++-----------------------
    1 files changed, 35 insertions(+), 23 deletions(-)

    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index ecfe28c..74c36fe 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -1597,25 +1597,11 @@ static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
    return nr;
    }

    -/*
    - * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
    - */
    -static void shrink_zone(int priority, struct zone *zone,
    - struct scan_control *sc)
    +static int shrink_zone_begin(struct zone *zone, struct scan_control *sc)
    {
    - unsigned long nr[NR_LRU_LISTS];
    - unsigned long nr_to_scan;
    - unsigned long percent[2]; /* anon @ 0; file @ 1 */
    - enum lru_list l;
    - unsigned long nr_reclaimed = sc->nr_reclaimed;
    - unsigned long nr_to_reclaim = sc->nr_to_reclaim;
    - struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
    - int noswap = 0;
    -
    - if (!current_is_kswapd() && atomic_read(&zone->concurrent_reclaimers) >
    - max_zone_concurrent_reclaimers &&
    - (sc->gfp_mask & (__GFP_IO|__GFP_FS)) ==
    - (__GFP_IO|__GFP_FS)) {
    + if (!current_is_kswapd() &&
    + atomic_read(&zone->concurrent_reclaimers) > max_zone_concurrent_reclaimers &&
    + (sc->gfp_mask & (__GFP_IO|__GFP_FS)) == (__GFP_IO|__GFP_FS)) {
    /*
    * Do not add to the lock contention if this zone has
    * enough processes doing page reclaim already, since
    @@ -1630,12 +1616,40 @@ static void shrink_zone(int priority, struct zone *zone,
    if (zone_watermark_ok(zone, sc->order, low_wmark_pages(zone),
    0, 0)) {
    wake_up(&zone->reclaim_wait);
    - sc->nr_reclaimed += nr_to_reclaim;
    - return;
    + sc->nr_reclaimed += sc->nr_to_reclaim;
    + return -ERESTARTSYS;
    }
    }

    atomic_inc(&zone->concurrent_reclaimers);
    + return 0;
    +}
    +
    +static void shrink_zone_end(struct zone *zone, struct scan_control *sc)
    +{
    + atomic_dec(&zone->concurrent_reclaimers);
    + wake_up(&zone->reclaim_wait);
    +}
    +
    +/*
    + * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
    + */
    +static void shrink_zone(int priority, struct zone *zone,
    + struct scan_control *sc)
    +{
    + unsigned long nr[NR_LRU_LISTS];
    + unsigned long nr_to_scan;
    + unsigned long percent[2]; /* anon @ 0; file @ 1 */
    + enum lru_list l;
    + unsigned long nr_reclaimed = sc->nr_reclaimed;
    + unsigned long nr_to_reclaim = sc->nr_to_reclaim;
    + struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
    + int noswap = 0;
    + int ret;
    +
    + ret = shrink_zone_begin(zone, sc);
    + if (ret)
    + return;

    /* If we have no swap space, do not bother scanning anon pages. */
    if (!sc->may_swap || (nr_swap_pages <= 0)) {
    @@ -1692,9 +1706,7 @@ static void shrink_zone(int priority, struct zone *zone,
    shrink_active_list(SWAP_CLUSTER_MAX, zone, sc, priority, 0);

    throttle_vm_writeout(sc->gfp_mask);
    -
    - atomic_dec(&zone->concurrent_reclaimers);
    - wake_up(&zone->reclaim_wait);
    + shrink_zone_end(zone, sc);
    }

    /*
    --
    1.6.5.2




    \
     
     \ /
      Last update: 2009-12-14 13:27    [W:0.041 / U:32.128 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site