lkml.org 
[lkml]   [2009]   [Dec]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[this_cpu_xx V8 16/16] x86 support for this_cpu_add_return
    x86 had the xadd instruction that can be use to have a percpu atomic
    increment and return instruction.

    Signed-off-by: Christoph Lameter <cl@linux-foundation.org>

    ---
    arch/x86/include/asm/percpu.h | 35 +++++++++++++++++++++++++++++++++++
    1 file changed, 35 insertions(+)

    Index: linux-2.6/arch/x86/include/asm/percpu.h
    ===================================================================
    --- linux-2.6.orig/arch/x86/include/asm/percpu.h 2009-12-18 15:55:07.000000000 -0600
    +++ linux-2.6/arch/x86/include/asm/percpu.h 2009-12-18 16:06:33.000000000 -0600
    @@ -165,6 +165,35 @@ do { \
    __tmp_old; \
    })

    +#define this_cpu_add_return_x86(var, val) \
    +({ \
    + typeof(var) pfo_ret__; \
    + switch (sizeof(var)) { \
    + case 1: \
    + asm("xaddb %0,"__percpu_arg(1) \
    + : "+r" (val), "+m" (var) \
    + : "qi" ((pto_T__)(val))); \
    + break; \
    + case 2: \
    + asm("xaddw %0,"__percpu_arg(1) \
    + : "+r" (val), "+m" (var) \
    + : "=r" (pfo_ret__) \
    + break; \
    + case 4: \
    + asm("xaddl %0,"__percpu_arg(1) \
    + : "+r" (val), "+m" (var) \
    + : "=r" (pfo_ret__) \
    + break; \
    + case 8: \
    + asm("xaddq %0,"__percpu_arg(1) \
    + : "+r" (val), "+m" (var) \
    + : "re" ((pto_T__)(val))); \
    + break; \
    + default: __bad_percpu_size(); \
    + } \
    + pfo_ret__; \
    +})
    +
    /*
    * percpu_read() makes gcc load the percpu variable every time it is
    * accessed while percpu_read_stable() allows the value to be cached.
    @@ -216,6 +245,9 @@ do { \
    #define __this_cpu_xchg_1(pcp, new) this_cpu_xchg_x86((pcp), new)
    #define __this_cpu_xchg_2(pcp, new) this_cpu_xchg_x86((pcp), new)
    #define __this_cpu_xchg_4(pcp, new) this_cpu_xchg_x86((pcp), new)
    +#define __this_cpu_add_return_1(pcp, val) this_cpu_add_return_x86((pcp), val)
    +#define __this_cpu_add_return_2(pcp, val) this_cpu_add_return_x86((pcp), val)
    +#define __this_cpu_add_return_4(pcp, val) this_cpu_add_return_x86((pcp), val)

    #define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
    #define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
    @@ -272,6 +304,9 @@ do { \
    #define irqsafe_cpu_xchg_1(pcp, new) this_cpu_xchg_x86((pcp), new)
    #define irqsafe_cpu_xchg_2(pcp, new) this_cpu_xchg_x86((pcp), new)
    #define irqsafe_cpu_xchg_4(pcp, new) this_cpu_xchg_x86((pcp), new)
    +#define irqsafe_cpu_add_return_1(pcp, val) this_cpu_add_return_x86((pcp), val)
    +#define irqsafe_cpu_add_return_2(pcp, val) this_cpu_add_return_x86((pcp), val)
    +#define irqsafe_cpu_add_return_4(pcp, val) this_cpu_add_return_x86((pcp), val)

    /*
    * Per cpu atomic 64 bit operations are only available under 64 bit.
    --


    \
     
     \ /
      Last update: 2009-12-19 01:31    [W:5.455 / U:0.144 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site