lkml.org 
[lkml]   [2008]   [Jul]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 43/52] percpu_counter: new function percpu_counter_sum_and_set
    Date
    From: Mingming Cao <cmm@us.ibm.com>

    Delayed allocation need to check free blocks at every write time.
    percpu_counter_read_positive() is not quit accurate. delayed
    allocation need a more accurate accounting, but using
    percpu_counter_sum_positive() is frequently is quite expensive.

    This patch added a new function to update center counter when sum
    per-cpu counter, to increase the accurate rate for next
    percpu_counter_read() and require less calling expensive
    percpu_counter_sum().

    Signed-off-by: Mingming Cao <cmm@us.ibm.com>
    Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
    ---
    fs/ext4/balloc.c | 2 +-
    include/linux/percpu_counter.h | 12 +++++++++---
    lib/percpu_counter.c | 7 ++++++-
    3 files changed, 16 insertions(+), 5 deletions(-)

    diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
    index 25f63d8..6369bac 100644
    --- a/fs/ext4/balloc.c
    +++ b/fs/ext4/balloc.c
    @@ -1621,7 +1621,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct ext4_sb_info *sbi,
    #ifdef CONFIG_SMP
    if (free_blocks - root_blocks < FBC_BATCH)
    free_blocks =
    - percpu_counter_sum_positive(&sbi->s_freeblocks_counter);
    + percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
    #endif
    if (free_blocks - root_blocks < nblocks)
    return free_blocks - root_blocks;
    diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
    index 9007ccd..2083888 100644
    --- a/include/linux/percpu_counter.h
    +++ b/include/linux/percpu_counter.h
    @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percpu_counter *fbc, s64 amount);
    void percpu_counter_destroy(struct percpu_counter *fbc);
    void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
    void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
    -s64 __percpu_counter_sum(struct percpu_counter *fbc);
    +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);

    static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
    {
    @@ -44,13 +44,19 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)

    static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
    {
    - s64 ret = __percpu_counter_sum(fbc);
    + s64 ret = __percpu_counter_sum(fbc, 0);
    return ret < 0 ? 0 : ret;
    }

    +static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
    +{
    + return __percpu_counter_sum(fbc, 1);
    +}
    +
    +
    static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
    {
    - return __percpu_counter_sum(fbc);
    + return __percpu_counter_sum(fbc, 0);
    }

    static inline s64 percpu_counter_read(struct percpu_counter *fbc)
    diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c
    index 1191744..4a8ba4b 100644
    --- a/lib/percpu_counter.c
    +++ b/lib/percpu_counter.c
    @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
    * Add up all the per-cpu counts, return the result. This is a more accurate
    * but much slower version of percpu_counter_read_positive()
    */
    -s64 __percpu_counter_sum(struct percpu_counter *fbc)
    +s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
    {
    s64 ret;
    int cpu;
    @@ -62,7 +62,12 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc)
    for_each_online_cpu(cpu) {
    s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
    ret += *pcount;
    + if (set)
    + *pcount = 0;
    }
    + if (set)
    + fbc->count = ret;
    +
    spin_unlock(&fbc->lock);
    return ret;
    }
    --
    1.5.6.rc3.1.g36b7.dirty


    \
     
     \ /
      Last update: 2008-07-05 19:43    [W:0.102 / U:32.040 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site