lkml.org 
[lkml]   [2010]   [May]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[03/24] percpu counter: clean up percpu_counter_sum_and_set()
    2.6.27-stable review patch.  If anyone has any objections, please let us know.

    ------------------


    From: Mingming Cao <cmm@us.ibm.com>

    commit 1f7c14c62ce63805f9574664a6c6de3633d4a354 upstream.

    percpu_counter_sum_and_set() and percpu_counter_sum() is the same except
    the former updates the global counter after accounting. Since we are
    taking the fbc->lock to calculate the precise value of the counter in
    percpu_counter_sum() anyway, it should simply set fbc->count too, as the
    percpu_counter_sum_and_set() does.

    This patch merges these two interfaces into one.

    Signed-off-by: Mingming Cao <cmm@us.ibm.com>
    Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: <linux-ext4@vger.kernel.org>
    Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
    Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
    Signed-off-by: Jayson R. King <dev@jaysonking.com>
    Signed-off-by: Theodore Ts'o <tytso@mit.edu>

    ---
    fs/ext4/balloc.c | 2 +-
    include/linux/percpu_counter.h | 12 +++---------
    lib/percpu_counter.c | 8 +++-----
    3 files changed, 7 insertions(+), 15 deletions(-)

    --- a/fs/ext4/balloc.c
    +++ b/fs/ext4/balloc.c
    @@ -1778,7 +1778,7 @@ ext4_fsblk_t ext4_has_free_blocks(struct
    #ifdef CONFIG_SMP
    if (free_blocks - root_blocks < FBC_BATCH)
    free_blocks =
    - percpu_counter_sum_and_set(&sbi->s_freeblocks_counter);
    + percpu_counter_sum(&sbi->s_freeblocks_counter);
    #endif
    if (free_blocks <= root_blocks)
    /* we don't have free space */
    --- a/include/linux/percpu_counter.h
    +++ b/include/linux/percpu_counter.h
    @@ -35,7 +35,7 @@ int percpu_counter_init_irq(struct percp
    void percpu_counter_destroy(struct percpu_counter *fbc);
    void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
    void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
    -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set);
    +s64 __percpu_counter_sum(struct percpu_counter *fbc);

    static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
    {
    @@ -44,19 +44,13 @@ static inline void percpu_counter_add(st

    static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
    {
    - s64 ret = __percpu_counter_sum(fbc, 0);
    + s64 ret = __percpu_counter_sum(fbc);
    return ret < 0 ? 0 : ret;
    }

    -static inline s64 percpu_counter_sum_and_set(struct percpu_counter *fbc)
    -{
    - return __percpu_counter_sum(fbc, 1);
    -}
    -
    -
    static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
    {
    - return __percpu_counter_sum(fbc, 0);
    + return __percpu_counter_sum(fbc);
    }

    static inline s64 percpu_counter_read(struct percpu_counter *fbc)
    --- a/lib/percpu_counter.c
    +++ b/lib/percpu_counter.c
    @@ -52,7 +52,7 @@ EXPORT_SYMBOL(__percpu_counter_add);
    * Add up all the per-cpu counts, return the result. This is a more accurate
    * but much slower version of percpu_counter_read_positive()
    */
    -s64 __percpu_counter_sum(struct percpu_counter *fbc, int set)
    +s64 __percpu_counter_sum(struct percpu_counter *fbc)
    {
    s64 ret;
    int cpu;
    @@ -62,11 +62,9 @@ s64 __percpu_counter_sum(struct percpu_c
    for_each_online_cpu(cpu) {
    s32 *pcount = per_cpu_ptr(fbc->counters, cpu);
    ret += *pcount;
    - if (set)
    - *pcount = 0;
    + *pcount = 0;
    }
    - if (set)
    - fbc->count = ret;
    + fbc->count = ret;

    spin_unlock(&fbc->lock);
    return ret;



    \
     
     \ /
      Last update: 2010-05-25 00:45    [W:0.026 / U:27.828 seconds]
    ©2003-2017 Jasper Spaans. hosted at Digital OceanAdvertise on this site