lkml.org 
[lkml]   [2006]   [Jun]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 21/21] Remove useless struct wbs
    Subject: zoned vm counters: remove useless writeback structure
    From: Christoph Lameter <clameter@sgi.com>

    Remove writeback state

    We can remove some functions now that were needed to calculate the page state
    for writeback control since these statistics are now directly available.

    Signed-off-by: Christoph Lameter <clameter@sgi.com>
    Signed-off-by: Andrew Morton <akpm@osdl.org>
    Index: linux-2.6.17-rc6-cl/mm/page-writeback.c
    ===================================================================
    --- linux-2.6.17-rc6-cl.orig/mm/page-writeback.c 2006-06-13 17:16:41.187504717 -0700
    +++ linux-2.6.17-rc6-cl/mm/page-writeback.c 2006-06-13 17:18:26.555990446 -0700
    @@ -99,23 +99,6 @@ EXPORT_SYMBOL(laptop_mode);

    static void background_writeout(unsigned long _min_pages);

    -struct writeback_state
    -{
    - unsigned long nr_dirty;
    - unsigned long nr_unstable;
    - unsigned long nr_mapped;
    - unsigned long nr_writeback;
    -};
    -
    -static void get_writeback_state(struct writeback_state *wbs)
    -{
    - wbs->nr_dirty = global_page_state(NR_DIRTY);
    - wbs->nr_unstable = global_page_state(NR_UNSTABLE);
    - wbs->nr_mapped = global_page_state(NR_MAPPED) +
    - global_page_state(NR_ANON);
    - wbs->nr_writeback = global_page_state(NR_WRITEBACK);
    -}
    -
    /*
    * Work out the current dirty-memory clamping and background writeout
    * thresholds.
    @@ -134,8 +117,8 @@ static void get_writeback_state(struct w
    * clamping level.
    */
    static void
    -get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty,
    - struct address_space *mapping)
    +get_dirty_limits(long *pbackground, long *pdirty,
    + struct address_space *mapping)
    {
    int background_ratio; /* Percentages */
    int dirty_ratio;
    @@ -145,8 +128,6 @@ get_dirty_limits(struct writeback_state
    unsigned long available_memory = total_pages;
    struct task_struct *tsk;

    - get_writeback_state(wbs);
    -
    #ifdef CONFIG_HIGHMEM
    /*
    * If this mapping can only allocate from low memory,
    @@ -157,7 +138,9 @@ get_dirty_limits(struct writeback_state
    #endif


    - unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages;
    + unmapped_ratio = 100 - ((global_page_state(NR_MAPPED) +
    + global_page_state(NR_ANON)) * 100) /
    + total_pages;

    dirty_ratio = vm_dirty_ratio;
    if (dirty_ratio > unmapped_ratio / 2)
    @@ -190,7 +173,6 @@ get_dirty_limits(struct writeback_state
    */
    static void balance_dirty_pages(struct address_space *mapping)
    {
    - struct writeback_state wbs;
    long nr_reclaimable;
    long background_thresh;
    long dirty_thresh;
    @@ -208,11 +190,12 @@ static void balance_dirty_pages(struct a
    .range_cyclic = 1,
    };

    - get_dirty_limits(&wbs, &background_thresh,
    - &dirty_thresh, mapping);
    - nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
    - if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
    - break;
    + get_dirty_limits(&background_thresh, &dirty_thresh, mapping);
    + nr_reclaimable = global_page_state(NR_DIRTY) +
    + global_page_state(NR_UNSTABLE);
    + if (nr_reclaimable + global_page_state(NR_WRITEBACK) <=
    + dirty_thresh)
    + break;

    if (!dirty_exceeded)
    dirty_exceeded = 1;
    @@ -225,11 +208,14 @@ static void balance_dirty_pages(struct a
    */
    if (nr_reclaimable) {
    writeback_inodes(&wbc);
    - get_dirty_limits(&wbs, &background_thresh,
    - &dirty_thresh, mapping);
    - nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable;
    - if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh)
    - break;
    + get_dirty_limits(&background_thresh,
    + &dirty_thresh, mapping);
    + nr_reclaimable = global_page_state(NR_DIRTY) +
    + global_page_state(NR_UNSTABLE);
    + if (nr_reclaimable +
    + global_page_state(NR_WRITEBACK)
    + <= dirty_thresh)
    + break;
    pages_written += write_chunk - wbc.nr_to_write;
    if (pages_written >= write_chunk)
    break; /* We've done our duty */
    @@ -237,8 +223,9 @@ static void balance_dirty_pages(struct a
    blk_congestion_wait(WRITE, HZ/10);
    }

    - if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded)
    - dirty_exceeded = 0;
    + if (nr_reclaimable + global_page_state(NR_WRITEBACK)
    + <= dirty_thresh && dirty_exceeded)
    + dirty_exceeded = 0;

    if (writeback_in_progress(bdi))
    return; /* pdflush is already working this queue */
    @@ -300,12 +287,11 @@ EXPORT_SYMBOL(balance_dirty_pages_rateli

    void throttle_vm_writeout(void)
    {
    - struct writeback_state wbs;
    long background_thresh;
    long dirty_thresh;

    for ( ; ; ) {
    - get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
    + get_dirty_limits(&background_thresh, &dirty_thresh, NULL);

    /*
    * Boost the allowable dirty threshold a bit for page
    @@ -313,8 +299,9 @@ void throttle_vm_writeout(void)
    */
    dirty_thresh += dirty_thresh / 10; /* wheeee... */

    - if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh)
    - break;
    + if (global_page_state(NR_UNSTABLE) +
    + global_page_state(NR_WRITEBACK) <= dirty_thresh)
    + break;
    blk_congestion_wait(WRITE, HZ/10);
    }
    }
    @@ -337,12 +324,12 @@ static void background_writeout(unsigned
    };

    for ( ; ; ) {
    - struct writeback_state wbs;
    long background_thresh;
    long dirty_thresh;

    - get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL);
    - if (wbs.nr_dirty + wbs.nr_unstable < background_thresh
    + get_dirty_limits(&background_thresh, &dirty_thresh, NULL);
    + if (global_page_state(NR_DIRTY) +
    + global_page_state(NR_UNSTABLE) < background_thresh
    && min_pages <= 0)
    break;
    wbc.encountered_congestion = 0;
    @@ -366,12 +353,9 @@ static void background_writeout(unsigned
    */
    int wakeup_pdflush(long nr_pages)
    {
    - if (nr_pages == 0) {
    - struct writeback_state wbs;
    -
    - get_writeback_state(&wbs);
    - nr_pages = wbs.nr_dirty + wbs.nr_unstable;
    - }
    + if (nr_pages == 0)
    + nr_pages = global_page_state(NR_DIRTY) +
    + global_page_state(NR_UNSTABLE);
    return pdflush_operation(background_writeout, nr_pages);
    }

    @@ -402,7 +386,6 @@ static void wb_kupdate(unsigned long arg
    unsigned long start_jif;
    unsigned long next_jif;
    long nr_to_write;
    - struct writeback_state wbs;
    struct writeback_control wbc = {
    .bdi = NULL,
    .sync_mode = WB_SYNC_NONE,
    @@ -415,11 +398,11 @@ static void wb_kupdate(unsigned long arg

    sync_supers();

    - get_writeback_state(&wbs);
    oldest_jif = jiffies - dirty_expire_interval;
    start_jif = jiffies;
    next_jif = start_jif + dirty_writeback_interval;
    - nr_to_write = wbs.nr_dirty + wbs.nr_unstable +
    + nr_to_write = global_page_state(NR_DIRTY) +
    + global_page_state(NR_UNSTABLE) +
    (inodes_stat.nr_inodes - inodes_stat.nr_unused);
    while (nr_to_write > 0) {
    wbc.encountered_congestion = 0;
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/
    \
     
     \ /
      Last update: 2006-06-14 03:10    [W:0.036 / U:0.384 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site