lkml.org 
[lkml]   [2010]   [Jun]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 05/12] vmscan: kill prev_priority completely
    Date
    From: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>

    Since 2.6.28 zone->prev_priority is unused. Then it can be removed
    safely. It reduce stack usage slightly.

    Now I have to say that I'm sorry. 2 years ago, I thought prev_priority
    can be integrate again, it's useful. but four (or more) times trying
    haven't got good performance number. Thus I give up such approach.

    Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
    Reviewed-by: Johannes Weiner <hannes@cmpxchg.org>
    Signed-off-by: Mel Gorman <mel@csn.ul.ie>
    ---
    include/linux/mmzone.h | 15 ------------
    mm/page_alloc.c | 2 -
    mm/vmscan.c | 57 ------------------------------------------------
    mm/vmstat.c | 2 -
    4 files changed, 0 insertions(+), 76 deletions(-)

    diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
    index b4d109e..b578eee 100644
    --- a/include/linux/mmzone.h
    +++ b/include/linux/mmzone.h
    @@ -348,21 +348,6 @@ struct zone {
    atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];

    /*
    - * prev_priority holds the scanning priority for this zone. It is
    - * defined as the scanning priority at which we achieved our reclaim
    - * target at the previous try_to_free_pages() or balance_pgdat()
    - * invocation.
    - *
    - * We use prev_priority as a measure of how much stress page reclaim is
    - * under - it drives the swappiness decision: whether to unmap mapped
    - * pages.
    - *
    - * Access to both this field is quite racy even on uniprocessor. But
    - * it is expected to average out OK.
    - */
    - int prev_priority;
    -
    - /*
    * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
    * this zone's LRU. Maintained by the pageout code.
    */
    diff --git a/mm/page_alloc.c b/mm/page_alloc.c
    index 431214b..0b0b629 100644
    --- a/mm/page_alloc.c
    +++ b/mm/page_alloc.c
    @@ -4081,8 +4081,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
    zone_seqlock_init(zone);
    zone->zone_pgdat = pgdat;

    - zone->prev_priority = DEF_PRIORITY;
    -
    zone_pcp_init(zone);
    for_each_lru(l) {
    INIT_LIST_HEAD(&zone->lru[l].list);
    diff --git a/mm/vmscan.c b/mm/vmscan.c
    index 58527c4..29e1ecd 100644
    --- a/mm/vmscan.c
    +++ b/mm/vmscan.c
    @@ -1286,20 +1286,6 @@ done:
    }

    /*
    - * We are about to scan this zone at a certain priority level. If that priority
    - * level is smaller (ie: more urgent) than the previous priority, then note
    - * that priority level within the zone. This is done so that when the next
    - * process comes in to scan this zone, it will immediately start out at this
    - * priority level rather than having to build up its own scanning priority.
    - * Here, this priority affects only the reclaim-mapped threshold.
    - */
    -static inline void note_zone_scanning_priority(struct zone *zone, int priority)
    -{
    - if (priority < zone->prev_priority)
    - zone->prev_priority = priority;
    -}
    -
    -/*
    * This moves pages from the active list to the inactive list.
    *
    * We move them the other way if the page is referenced by one or more
    @@ -1762,17 +1748,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
    if (scanning_global_lru(sc)) {
    if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
    continue;
    - note_zone_scanning_priority(zone, priority);
    -
    if (zone->all_unreclaimable && priority != DEF_PRIORITY)
    continue; /* Let kswapd poll it */
    - } else {
    - /*
    - * Ignore cpuset limitation here. We just want to reduce
    - * # of used pages by us regardless of memory shortage.
    - */
    - mem_cgroup_note_reclaim_priority(sc->mem_cgroup,
    - priority);
    }

    shrink_zone(priority, zone, sc);
    @@ -1878,17 +1855,6 @@ out:
    if (priority < 0)
    priority = 0;

    - if (scanning_global_lru(sc)) {
    - for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
    -
    - if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
    - continue;
    -
    - zone->prev_priority = priority;
    - }
    - } else
    - mem_cgroup_record_reclaim_priority(sc->mem_cgroup, priority);
    -
    delayacct_freepages_end();
    put_mems_allowed();

    @@ -2054,22 +2020,12 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
    .order = order,
    .mem_cgroup = NULL,
    };
    - /*
    - * temp_priority is used to remember the scanning priority at which
    - * this zone was successfully refilled to
    - * free_pages == high_wmark_pages(zone).
    - */
    - int temp_priority[MAX_NR_ZONES];
    -
    loop_again:
    total_scanned = 0;
    sc.nr_reclaimed = 0;
    sc.may_writepage = !laptop_mode;
    count_vm_event(PAGEOUTRUN);

    - for (i = 0; i < pgdat->nr_zones; i++)
    - temp_priority[i] = DEF_PRIORITY;
    -
    for (priority = DEF_PRIORITY; priority >= 0; priority--) {
    int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
    unsigned long lru_pages = 0;
    @@ -2137,9 +2093,7 @@ loop_again:
    if (zone->all_unreclaimable && priority != DEF_PRIORITY)
    continue;

    - temp_priority[i] = priority;
    sc.nr_scanned = 0;
    - note_zone_scanning_priority(zone, priority);

    nid = pgdat->node_id;
    zid = zone_idx(zone);
    @@ -2212,16 +2166,6 @@ loop_again:
    break;
    }
    out:
    - /*
    - * Note within each zone the priority level at which this zone was
    - * brought into a happy state. So that the next thread which scans this
    - * zone will start out at that priority level.
    - */
    - for (i = 0; i < pgdat->nr_zones; i++) {
    - struct zone *zone = pgdat->node_zones + i;
    -
    - zone->prev_priority = temp_priority[i];
    - }
    if (!all_zones_ok) {
    cond_resched();

    @@ -2641,7 +2585,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
    */
    priority = ZONE_RECLAIM_PRIORITY;
    do {
    - note_zone_scanning_priority(zone, priority);
    shrink_zone(priority, zone, &sc);
    priority--;
    } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
    diff --git a/mm/vmstat.c b/mm/vmstat.c
    index 7759941..5c0b1b6 100644
    --- a/mm/vmstat.c
    +++ b/mm/vmstat.c
    @@ -853,11 +853,9 @@ static void zoneinfo_show_print(struct seq_file *m, pg_data_t *pgdat,
    }
    seq_printf(m,
    "\n all_unreclaimable: %u"
    - "\n prev_priority: %i"
    "\n start_pfn: %lu"
    "\n inactive_ratio: %u",
    zone->all_unreclaimable,
    - zone->prev_priority,
    zone->zone_start_pfn,
    zone->inactive_ratio);
    seq_putc(m, '\n');
    --
    1.7.1


    \
     
     \ /
      Last update: 2010-06-14 13:23    [W:0.055 / U:0.296 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site