lkml.org 
[lkml]   [2010]   [Nov]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 02/13] writeback: consolidate variable names in balance_dirty_pages()
    Lots of lenthy tests.. Let's compact the names

    *_dirty = NR_FILE_DIRTY + NR_WRITEBACK + NR_UNSTABLE_NFS

    balance_dirty_pages() only cares about the above dirty sum except
    in one place -- on starting background writeback.

    Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
    ---
    mm/page-writeback.c | 26 ++++++++++++--------------
    1 file changed, 12 insertions(+), 14 deletions(-)

    --- linux-next.orig/mm/page-writeback.c 2010-11-15 19:50:16.000000000 +0800
    +++ linux-next/mm/page-writeback.c 2010-11-15 19:50:27.000000000 +0800
    @@ -461,8 +461,8 @@ unsigned long bdi_dirty_limit(struct bac
    static void balance_dirty_pages(struct address_space *mapping,
    unsigned long pages_dirtied)
    {
    - long nr_reclaimable, bdi_nr_reclaimable;
    - long nr_writeback, bdi_nr_writeback;
    + long nr_reclaimable;
    + long nr_dirty, bdi_dirty; /* = file_dirty + writeback + unstable_nfs */
    unsigned long background_thresh;
    unsigned long dirty_thresh;
    unsigned long bdi_thresh;
    @@ -480,7 +480,7 @@ static void balance_dirty_pages(struct a
    */
    nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
    global_page_state(NR_UNSTABLE_NFS);
    - nr_writeback = global_page_state(NR_WRITEBACK);
    + nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);

    global_dirty_limits(&background_thresh, &dirty_thresh);

    @@ -489,8 +489,7 @@ static void balance_dirty_pages(struct a
    * catch-up. This avoids (excessively) small writeouts
    * when the bdi limits are ramping up.
    */
    - if (nr_reclaimable + nr_writeback <=
    - (background_thresh + dirty_thresh) / 2)
    + if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
    break;

    bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
    @@ -507,21 +506,21 @@ static void balance_dirty_pages(struct a
    * deltas.
    */
    if (bdi_thresh < 2*bdi_stat_error(bdi)) {
    - bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
    - bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
    + bdi_dirty = bdi_stat_sum(bdi, BDI_RECLAIMABLE) +
    + bdi_stat_sum(bdi, BDI_WRITEBACK);
    } else {
    - bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
    - bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
    + bdi_dirty = bdi_stat(bdi, BDI_RECLAIMABLE) +
    + bdi_stat(bdi, BDI_WRITEBACK);
    }

    - if (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh) {
    + if (bdi_dirty >= bdi_thresh) {
    pause = HZ/10;
    goto pause;
    }

    bw = 100 << 20; /* use static 100MB/s for the moment */

    - bw = bw * (bdi_thresh - (bdi_nr_reclaimable + bdi_nr_writeback));
    + bw = bw * (bdi_thresh - bdi_dirty);
    bw = bw / (bdi_thresh / TASK_SOFT_DIRTY_LIMIT + 1);

    pause = HZ * (pages_dirtied << PAGE_CACHE_SHIFT) / (bw + 1);
    @@ -537,9 +536,8 @@ pause:
    * bdi or process from holding back light ones; The latter is
    * the last resort safeguard.
    */
    - dirty_exceeded =
    - (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
    - || (nr_reclaimable + nr_writeback > dirty_thresh);
    + dirty_exceeded = (bdi_dirty > bdi_thresh) ||
    + (nr_dirty > dirty_thresh);

    if (!dirty_exceeded)
    break;



    \
     
     \ /
      Last update: 2010-11-17 05:35    [W:4.178 / U:0.048 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site