lkml.org 
[lkml]   [2011]   [Aug]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 05/12] x86: add xadd helper macro
    Date
    From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

    Add a common xadd implementation.

    Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
    ---
    arch/x86/include/asm/cmpxchg.h | 43 ++++++++++++++++++++++++++++++++++++++++
    1 files changed, 43 insertions(+), 0 deletions(-)

    diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
    index efe3ec7..0d0d9cd 100644
    --- a/arch/x86/include/asm/cmpxchg.h
    +++ b/arch/x86/include/asm/cmpxchg.h
    @@ -6,6 +6,7 @@
    /* Non-existant functions to indicate usage errors at link time. */
    extern void __xchg_wrong_size(void);
    extern void __cmpxchg_wrong_size(void);
    +extern void __xadd_wrong_size(void);

    /*
    * Constants for operation sizes. On 32-bit, the 64-bit size it set to
    @@ -157,4 +158,46 @@ extern void __cmpxchg_wrong_size(void);
    __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
    #endif

    +#define __xadd(ptr, inc, lock) \
    + ({ \
    + __typeof__ (*(ptr)) __ret = (inc); \
    + switch (sizeof(*(ptr))) { \
    + case __X86_CASE_B: \
    + asm volatile (lock "xaddb %b0, %1\n" \
    + : "+r" (__ret), "+m" (*(ptr)) \
    + : : "memory", "cc"); \
    + break; \
    + case __X86_CASE_W: \
    + asm volatile (lock "xaddw %w0, %1\n" \
    + : "+r" (__ret), "+m" (*(ptr)) \
    + : : "memory", "cc"); \
    + break; \
    + case __X86_CASE_L: \
    + asm volatile (lock "xaddl %0, %1\n" \
    + : "+r" (__ret), "+m" (*(ptr)) \
    + : : "memory", "cc"); \
    + break; \
    + case __X86_CASE_Q: \
    + asm volatile (lock "xaddq %q0, %1\n" \
    + : "+r" (__ret), "+m" (*(ptr)) \
    + : : "memory", "cc"); \
    + break; \
    + default: \
    + __xadd_wrong_size(); \
    + } \
    + __ret; \
    + })
    +
    +/*
    + * xadd() adds "inc" to "*ptr" and atomically returns the previous
    + * value of "*ptr".
    + *
    + * xadd() is locked when multiple CPUs are online
    + * xadd_sync() is always locked
    + * xadd_local() is never locked
    + */
    +#define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
    +#define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
    +#define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
    +
    #endif /* ASM_X86_CMPXCHG_H */
    --
    1.7.6


    \
     
     \ /
      Last update: 2011-08-24 23:39    [W:0.021 / U:0.096 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site