lkml.org 
[lkml]   [2009]   [Oct]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    SubjectRe: [PATCH] x86: Generate cmpxchg build failures
    From
    Date
    On Mon, 2009-10-05 at 12:16 -0700, Linus Torvalds wrote:
    >
    > On Mon, 5 Oct 2009, Peter Zijlstra wrote:
    >
    > > On Tue, 2009-09-29 at 14:17 -0700, Linus Torvalds wrote:
    > >
    > > > And regardless, we should fix the silent cmpxchg failure, even if it's
    > > > just a link-time failure or something.
    > >
    > > Something like the below?
    >
    > Looks good to me. This is also one of the cases where a macro will do
    > better than an inline function, since the return value depends on the size
    > of the pointer, and it doesn't do the whole 'unsigned long' thing any
    > more.

    Indeed, I was glad to see that go.

    > It should also be fairly easy to make this now just do a cmpxchg8b for the
    > 64-bit case (again, that wouldn't have worked sanely due to the fixed type
    > in the inline function version - expanding that size to 64-bit would have
    > been insane). But that's a separate issue (and maybe we don't want to do
    > it, due to the whole "it's not DMA-atomic" etc issue - we may be better
    > off with a build failure, and forcing people who really want 64-bit
    > accesses to use the explicit cmpxchg64 thing)

    Right, that would be a second patch if we think its a sane thing to do.

    > That said, I think that you should merge the insane three versions of this
    > macro into one. Having separate versions for "__[sync_|local_|]cmpxchg()"
    > is disgusting. I bet you can do it with a single macro, and just pass in
    > the LOCK_PREFIX (or empty, or "lock;") to that as needed. Rather than
    > duplicating it three times.

    You're right, what was I thinking doing all this copy/paste stuff.

    Here's a new one, still seems to build i386 and x86_64 defconfigs.


    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    ---
    arch/x86/include/asm/cmpxchg_32.h | 218 ++++++++++++++---------------------
    arch/x86/include/asm/cmpxchg_64.h | 234 ++++++++++++++----------------------
    2 files changed, 177 insertions(+), 275 deletions(-)

    diff --git a/arch/x86/include/asm/cmpxchg_32.h b/arch/x86/include/asm/cmpxchg_32.h
    index ee1931b..720c3d4 100644
    --- a/arch/x86/include/asm/cmpxchg_32.h
    +++ b/arch/x86/include/asm/cmpxchg_32.h
    @@ -8,14 +8,50 @@
    * you need to test for the feature in boot_cpu_data.
    */

    -#define xchg(ptr, v) \
    - ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
    +extern void __xchg_wrong_size(void);
    +
    +/*
    + * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
    + * Note 2: xchg has side effect, so that attribute volatile is necessary,
    + * but generally the primitive is invalid, *ptr is output argument. --ANK
    + */

    struct __xchg_dummy {
    unsigned long a[100];
    };
    #define __xg(x) ((struct __xchg_dummy *)(x))

    +#define __xchg(x, ptr, size) \
    +({ \
    + __typeof(*(ptr)) __x = (x); \
    + switch (size) { \
    + case 1: \
    + asm volatile("xchgb %b0,%1" \
    + : "=q" (__x) \
    + : "m" (*__xg(ptr)), "0" (__x) \
    + : "memory"); \
    + break; \
    + case 2: \
    + asm volatile("xchgw %w0,%1" \
    + : "=r" (__x) \
    + : "m" (*__xg(ptr)), "0" (__x) \
    + : "memory"); \
    + break; \
    + case 4: \
    + asm volatile("xchgl %0,%1" \
    + : "=r" (__x) \
    + : "m" (*__xg(ptr)), "0" (__x) \
    + : "memory"); \
    + break; \
    + default: \
    + __xchg_wrong_size(); \
    + } \
    + __x; \
    +})
    +
    +#define xchg(ptr, v) \
    + __xchg((v), (ptr), sizeof(*ptr))
    +
    /*
    * The semantics of XCHGCMP8B are a bit strange, this is why
    * there is a loop and the loading of %%eax and %%edx has to
    @@ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr,
    (unsigned int)((value) >> 32)) \
    : __set_64bit(ptr, ll_low((value)), ll_high((value))))

    -/*
    - * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
    - * Note 2: xchg has side effect, so that attribute volatile is necessary,
    - * but generally the primitive is invalid, *ptr is output argument. --ANK
    - */
    -static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
    - int size)
    -{
    - switch (size) {
    - case 1:
    - asm volatile("xchgb %b0,%1"
    - : "=q" (x)
    - : "m" (*__xg(ptr)), "0" (x)
    - : "memory");
    - break;
    - case 2:
    - asm volatile("xchgw %w0,%1"
    - : "=r" (x)
    - : "m" (*__xg(ptr)), "0" (x)
    - : "memory");
    - break;
    - case 4:
    - asm volatile("xchgl %0,%1"
    - : "=r" (x)
    - : "m" (*__xg(ptr)), "0" (x)
    - : "memory");
    - break;
    - }
    - return x;
    -}
    +extern void __cmpxchg_wrong_size(void);

    /*
    * Atomic compare and exchange. Compare OLD with MEM, if identical,
    * store NEW in MEM. Return the initial value in MEM. Success is
    * indicated by comparing RETURN with OLD.
    */
    +#define __raw_cmpxchg(ptr, old, new, size, lock) \
    +({ \
    + __typeof__(*(ptr)) __ret; \
    + __typeof__(*(ptr)) __old = (old); \
    + __typeof__(*(ptr)) __new = (new); \
    + switch (size) { \
    + case 1: \
    + asm volatile(lock "cmpxchgb %b1,%2" \
    + : "=a"(__ret) \
    + : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \
    + : "memory"); \
    + break; \
    + case 2: \
    + asm volatile(lock "cmpxchgw %w1,%2" \
    + : "=a"(__ret) \
    + : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
    + : "memory"); \
    + break; \
    + case 4: \
    + asm volatile(lock "cmpxchgl %1,%2" \
    + : "=a"(__ret) \
    + : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
    + : "memory"); \
    + break; \
    + default: \
    + __cmpxchg_wrong_size(); \
    + } \
    + __ret; \
    +})
    +
    +#define __cmpxchg(ptr, old, new, size) \
    + __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
    +
    +#define __sync_cmpxchg(ptr, old, new, size) \
    + __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
    +
    +#define __cmpxchg_local(ptr, old, new, size) \
    + __raw_cmpxchg((ptr), (old), (new), (size), "")

    #ifdef CONFIG_X86_CMPXCHG
    #define __HAVE_ARCH_CMPXCHG 1
    -#define cmpxchg(ptr, o, n) \
    - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
    - (unsigned long)(n), \
    - sizeof(*(ptr))))
    -#define sync_cmpxchg(ptr, o, n) \
    - ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
    - (unsigned long)(n), \
    - sizeof(*(ptr))))
    -#define cmpxchg_local(ptr, o, n) \
    - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
    - (unsigned long)(n), \
    - sizeof(*(ptr))))
    +
    +#define cmpxchg(ptr, old, new) \
    + __cmpxchg((ptr), (old), (new), sizeof(*ptr))
    +
    +#define sync_cmpxchg(ptr, old, new) \
    + __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
    +
    +#define cmpxchg_local(ptr, old, new) \
    + __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
    #endif

    #ifdef CONFIG_X86_CMPXCHG64
    @@ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
    (unsigned long long)(n)))
    #endif

    -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
    - unsigned long new, int size)
    -{
    - unsigned long prev;
    - switch (size) {
    - case 1:
    - asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
    - : "=a"(prev)
    - : "q"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 2:
    - asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 4:
    - asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - }
    - return old;
    -}
    -
    -/*
    - * Always use locked operations when touching memory shared with a
    - * hypervisor, since the system may be SMP even if the guest kernel
    - * isn't.
    - */
    -static inline unsigned long __sync_cmpxchg(volatile void *ptr,
    - unsigned long old,
    - unsigned long new, int size)
    -{
    - unsigned long prev;
    - switch (size) {
    - case 1:
    - asm volatile("lock; cmpxchgb %b1,%2"
    - : "=a"(prev)
    - : "q"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 2:
    - asm volatile("lock; cmpxchgw %w1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 4:
    - asm volatile("lock; cmpxchgl %1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - }
    - return old;
    -}
    -
    -static inline unsigned long __cmpxchg_local(volatile void *ptr,
    - unsigned long old,
    - unsigned long new, int size)
    -{
    - unsigned long prev;
    - switch (size) {
    - case 1:
    - asm volatile("cmpxchgb %b1,%2"
    - : "=a"(prev)
    - : "q"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 2:
    - asm volatile("cmpxchgw %w1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 4:
    - asm volatile("cmpxchgl %1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - }
    - return old;
    -}
    -
    static inline unsigned long long __cmpxchg64(volatile void *ptr,
    unsigned long long old,
    unsigned long long new)
    diff --git a/arch/x86/include/asm/cmpxchg_64.h b/arch/x86/include/asm/cmpxchg_64.h
    index 52de72e..d565670 100644
    --- a/arch/x86/include/asm/cmpxchg_64.h
    +++ b/arch/x86/include/asm/cmpxchg_64.h
    @@ -3,9 +3,6 @@

    #include <asm/alternative.h> /* Provides LOCK_PREFIX */

    -#define xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), \
    - (ptr), sizeof(*(ptr))))
    -
    #define __xg(x) ((volatile long *)(x))

    static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
    @@ -15,167 +12,118 @@ static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)

    #define _set_64bit set_64bit

    +extern void __xchg_wrong_size(void);
    +extern void __cmpxchg_wrong_size(void);
    +
    /*
    * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
    * Note 2: xchg has side effect, so that attribute volatile is necessary,
    * but generally the primitive is invalid, *ptr is output argument. --ANK
    */
    -static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
    - int size)
    -{
    - switch (size) {
    - case 1:
    - asm volatile("xchgb %b0,%1"
    - : "=q" (x)
    - : "m" (*__xg(ptr)), "0" (x)
    - : "memory");
    - break;
    - case 2:
    - asm volatile("xchgw %w0,%1"
    - : "=r" (x)
    - : "m" (*__xg(ptr)), "0" (x)
    - : "memory");
    - break;
    - case 4:
    - asm volatile("xchgl %k0,%1"
    - : "=r" (x)
    - : "m" (*__xg(ptr)), "0" (x)
    - : "memory");
    - break;
    - case 8:
    - asm volatile("xchgq %0,%1"
    - : "=r" (x)
    - : "m" (*__xg(ptr)), "0" (x)
    - : "memory");
    - break;
    - }
    - return x;
    -}
    +#define __xchg(x, ptr, size) \
    +({ \
    + __typeof(*(ptr)) __x = (x); \
    + switch (size) { \
    + case 1: \
    + asm volatile("xchgb %b0,%1" \
    + : "=q" (__x) \
    + : "m" (*__xg(ptr)), "0" (__x) \
    + : "memory"); \
    + break; \
    + case 2: \
    + asm volatile("xchgw %w0,%1" \
    + : "=r" (__x) \
    + : "m" (*__xg(ptr)), "0" (__x) \
    + : "memory"); \
    + break; \
    + case 4: \
    + asm volatile("xchgl %k0,%1" \
    + : "=r" (__x) \
    + : "m" (*__xg(ptr)), "0" (__x) \
    + : "memory"); \
    + break; \
    + case 8: \
    + asm volatile("xchgq %0,%1" \
    + : "=r" (__x) \
    + : "m" (*__xg(ptr)), "0" (__x) \
    + : "memory"); \
    + break; \
    + default: \
    + __xchg_wrong_size(); \
    + } \
    + __x; \
    +})
    +
    +#define xchg(ptr, v) \
    + __xchg((v), (ptr), sizeof(*ptr))
    +
    +#define __HAVE_ARCH_CMPXCHG 1

    /*
    * Atomic compare and exchange. Compare OLD with MEM, if identical,
    * store NEW in MEM. Return the initial value in MEM. Success is
    * indicated by comparing RETURN with OLD.
    */
    +#define __raw_cmpxchg(ptr, old, new, size, lock) \
    +({ \
    + __typeof__(*(ptr)) __ret; \
    + __typeof__(*(ptr)) __old = (old); \
    + __typeof__(*(ptr)) __new = (new); \
    + switch (size) { \
    + case 1: \
    + asm volatile(lock "cmpxchgb %b1,%2" \
    + : "=a"(__ret) \
    + : "q"(__new), "m"(*__xg(ptr)), "0"(__old) \
    + : "memory"); \
    + break; \
    + case 2: \
    + asm volatile(lock "cmpxchgw %w1,%2" \
    + : "=a"(__ret) \
    + : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
    + : "memory"); \
    + break; \
    + case 4: \
    + asm volatile(lock "cmpxchgl %k1,%2" \
    + : "=a"(__ret) \
    + : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
    + : "memory"); \
    + break; \
    + case 8: \
    + asm volatile(lock "cmpxchgq %1,%2" \
    + : "=a"(__ret) \
    + : "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
    + : "memory"); \
    + break; \
    + default: \
    + __cmpxchg_wrong_size(); \
    + } \
    + __ret; \
    +})

    -#define __HAVE_ARCH_CMPXCHG 1
    +#define __cmpxchg(ptr, old, new, size) \
    + __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)

    -static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
    - unsigned long new, int size)
    -{
    - unsigned long prev;
    - switch (size) {
    - case 1:
    - asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
    - : "=a"(prev)
    - : "q"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 2:
    - asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 4:
    - asm volatile(LOCK_PREFIX "cmpxchgl %k1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 8:
    - asm volatile(LOCK_PREFIX "cmpxchgq %1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - }
    - return old;
    -}
    +#define __sync_cmpxchg(ptr, old, new, size) \
    + __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")

    -/*
    - * Always use locked operations when touching memory shared with a
    - * hypervisor, since the system may be SMP even if the guest kernel
    - * isn't.
    - */
    -static inline unsigned long __sync_cmpxchg(volatile void *ptr,
    - unsigned long old,
    - unsigned long new, int size)
    -{
    - unsigned long prev;
    - switch (size) {
    - case 1:
    - asm volatile("lock; cmpxchgb %b1,%2"
    - : "=a"(prev)
    - : "q"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 2:
    - asm volatile("lock; cmpxchgw %w1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 4:
    - asm volatile("lock; cmpxchgl %1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - }
    - return old;
    -}
    +#define __cmpxchg_local(ptr, old, new, size) \
    + __raw_cmpxchg((ptr), (old), (new), (size), "")

    -static inline unsigned long __cmpxchg_local(volatile void *ptr,
    - unsigned long old,
    - unsigned long new, int size)
    -{
    - unsigned long prev;
    - switch (size) {
    - case 1:
    - asm volatile("cmpxchgb %b1,%2"
    - : "=a"(prev)
    - : "q"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 2:
    - asm volatile("cmpxchgw %w1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 4:
    - asm volatile("cmpxchgl %k1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - case 8:
    - asm volatile("cmpxchgq %1,%2"
    - : "=a"(prev)
    - : "r"(new), "m"(*__xg(ptr)), "0"(old)
    - : "memory");
    - return prev;
    - }
    - return old;
    -}
    +#define cmpxchg(ptr, old, new) \
    + __cmpxchg((ptr), (old), (new), sizeof(*ptr))
    +
    +#define sync_cmpxchg(ptr, old, new) \
    + __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
    +
    +#define cmpxchg_local(ptr, old, new) \
    + __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))

    -#define cmpxchg(ptr, o, n) \
    - ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
    - (unsigned long)(n), sizeof(*(ptr))))
    #define cmpxchg64(ptr, o, n) \
    ({ \
    BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
    cmpxchg((ptr), (o), (n)); \
    })
    -#define cmpxchg_local(ptr, o, n) \
    - ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
    - (unsigned long)(n), \
    - sizeof(*(ptr))))
    -#define sync_cmpxchg(ptr, o, n) \
    - ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
    - (unsigned long)(n), \
    - sizeof(*(ptr))))
    +
    #define cmpxchg64_local(ptr, o, n) \
    ({ \
    BUILD_BUG_ON(sizeof(*(ptr)) != 8); \



    \
     
     \ /
      Last update: 2009-10-05 21:41    [W:0.052 / U:154.156 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site