lkml.org 
[lkml]   [2011]   [Jan]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[REPOST] [PATCH 2/3] Refactor zone_reclaim code (v3)
    From
    Date
    Changelog v3
    1. Renamed zone_reclaim_unmapped_pages to zone_reclaim_pages

    Refactor zone_reclaim, move reusable functionality outside
    of zone_reclaim. Make zone_reclaim_unmapped_pages modular

    Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com>
    ---
    mm/vmscan.c | 35 +++++++++++++++++++++++------------
    1 files changed, 23 insertions(+), 12 deletions(-)

    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index e841cae..3b25423 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -2815,6 +2815,27 @@ static long zone_pagecache_reclaimable(struct zone *zone)
    }

    /*
    + * Helper function to reclaim unmapped pages, we might add something
    + * similar to this for slab cache as well. Currently this function
    + * is shared with __zone_reclaim()
    + */
    +static inline void
    +zone_reclaim_pages(struct zone *zone, struct scan_control *sc,
    + unsigned long nr_pages)
    +{
    + int priority;
    + /*
    + * Free memory by calling shrink zone with increasing
    + * priorities until we have enough memory freed.
    + */
    + priority = ZONE_RECLAIM_PRIORITY;
    + do {
    + shrink_zone(priority, zone, sc);
    + priority--;
    + } while (priority >= 0 && sc->nr_reclaimed < nr_pages);
    +}
    +
    +/*
    * Try to free up some pages from this zone through reclaim.
    */
    static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
    @@ -2823,7 +2844,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
    const unsigned long nr_pages = 1 << order;
    struct task_struct *p = current;
    struct reclaim_state reclaim_state;
    - int priority;
    struct scan_control sc = {
    .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
    .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
    @@ -2847,17 +2867,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
    reclaim_state.reclaimed_slab = 0;
    p->reclaim_state = &reclaim_state;

    - if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) {
    - /*
    - * Free memory by calling shrink zone with increasing
    - * priorities until we have enough memory freed.
    - */
    - priority = ZONE_RECLAIM_PRIORITY;
    - do {
    - shrink_zone(priority, zone, &sc);
    - priority--;
    - } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
    - }
    + if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages)
    + zone_reclaim_pages(zone, &sc, nr_pages);

    nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
    if (nr_slab_pages0 > zone->min_slab_pages) {


    \
     
     \ /
      Last update: 2011-01-20 13:41    [W:0.044 / U:33.396 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site