lkml.org 
[lkml]   [2010]   [Dec]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[cpuops inc_return V1 2/9] x86: Support for this_cpu_add,sub,dec,inc_return
    Supply an implementation for x86 in order to generate more efficient code.

    V2->V3:
    - Cleanup
    - Remove strange type checking from percpu_add_return_op.

    Signed-off-by: Christoph Lameter <cl@linux.com>

    ---
    arch/x86/include/asm/percpu.h | 46 ++++++++++++++++++++++++++++++++++++++++++
    1 file changed, 46 insertions(+)

    Index: linux-2.6/arch/x86/include/asm/percpu.h
    ===================================================================
    --- linux-2.6.orig/arch/x86/include/asm/percpu.h 2010-11-29 14:29:13.000000000 -0600
    +++ linux-2.6/arch/x86/include/asm/percpu.h 2010-11-30 08:42:02.000000000 -0600
    @@ -177,6 +177,41 @@ do { \
    } \
    } while (0)

    +
    +/*
    + * Add return operation
    + */
    +#define percpu_add_return_op(var, val) \
    +({ \
    + typedef typeof(var) pao_T__; \
    + typeof(var) ret__ = val; \
    + switch (sizeof(var)) { \
    + case 1: \
    + asm("xaddb %0, "__percpu_arg(1) \
    + : "+q" (ret__), "+m" (var) \
    + : : "memory"); \
    + break; \
    + case 2: \
    + asm("xaddw %0, "__percpu_arg(1) \
    + : "+r" (ret__), "+m" (var) \
    + : : "memory"); \
    + break; \
    + case 4: \
    + asm("xaddl %0, "__percpu_arg(1) \
    + : "+r"(ret__), "+m" (var) \
    + : : "memory"); \
    + break; \
    + case 8: \
    + asm("xaddq %0, "__percpu_arg(1) \
    + : "+re" (ret__), "+m" (var) \
    + : : "memory"); \
    + break; \
    + default: __bad_percpu_size(); \
    + } \
    + ret__ += val; \
    + ret__; \
    +})
    +
    #define percpu_from_op(op, var, constraint) \
    ({ \
    typeof(var) pfo_ret__; \
    @@ -300,6 +335,14 @@ do { \
    #define irqsafe_cpu_xor_2(pcp, val) percpu_to_op("xor", (pcp), val)
    #define irqsafe_cpu_xor_4(pcp, val) percpu_to_op("xor", (pcp), val)

    +#ifndef CONFIG_M386
    +#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
    +#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
    +#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
    +#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
    +#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
    +#define this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
    +#endif
    /*
    * Per cpu atomic 64 bit operations are only available under 64 bit.
    * 32 bit must fall back to generic operations.
    @@ -324,6 +367,9 @@ do { \
    #define irqsafe_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
    #define irqsafe_cpu_xor_8(pcp, val) percpu_to_op("xor", (pcp), val)

    +#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
    +#define this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
    +
    #endif

    /* This is not atomic against other CPUs -- CPU preemption needs to be off */


    \
     
     \ /
      Last update: 2010-12-06 18:45    [W:0.032 / U:0.504 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site