Messages in this thread Patch in this message | | | From | Uros Bizjak <> | Subject | [PATCH 4/6] locking/atomic/x86: Merge x86_32 and x86_64 arch_atomic64_fetch_{and,or,xor}() functions | Date | Tue, 9 Apr 2024 12:03:55 +0200 |
| |
Move the same definitions of arch_atomic64_fetch_{and,or,xor}() from x86/include/asm/atomic64_32.h and x86/include/asm/atomic64_64.h to the common place in arch/x86/include/asm/atomic.h
No functional changes intended.
Signed-off-by: Uros Bizjak <ubizjak@gmail.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@kernel.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Dave Hansen <dave.hansen@linux.intel.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Peter Zijlstra <peterz@infradead.org> --- arch/x86/include/asm/atomic.h | 30 ++++++++++++++++++++++++++++++ arch/x86/include/asm/atomic64_32.h | 30 ------------------------------ arch/x86/include/asm/atomic64_64.h | 30 ------------------------------ 3 files changed, 30 insertions(+), 60 deletions(-)
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h index b166da21ee98..b2e44de36934 100644 --- a/arch/x86/include/asm/atomic.h +++ b/arch/x86/include/asm/atomic.h @@ -182,4 +182,34 @@ static __always_inline int arch_atomic_fetch_xor(int i, atomic_t *v) # include <asm/atomic64_64.h> #endif +static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) +{ + s64 val = __READ_ONCE(v->counter); + + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); + + return val; +} +#define arch_atomic64_fetch_and arch_atomic64_fetch_and + +static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) +{ + s64 val = __READ_ONCE(v->counter); + + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); + + return val; +} +#define arch_atomic64_fetch_or arch_atomic64_fetch_or + +static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) +{ + s64 val = __READ_ONCE(v->counter); + + do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); + + return val; +} +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor + #endif /* _ASM_X86_ATOMIC_H */ diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h index 84affd7a5d1c..4f79198da98e 100644 --- a/arch/x86/include/asm/atomic64_32.h +++ b/arch/x86/include/asm/atomic64_32.h @@ -206,16 +206,6 @@ static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v) do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); } -static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) -{ - s64 val = __READ_ONCE(v->counter); - - do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); - - return val; -} -#define arch_atomic64_fetch_and arch_atomic64_fetch_and - static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v) { s64 val = __READ_ONCE(v->counter); @@ -223,16 +213,6 @@ static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v) do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); } -static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) -{ - s64 val = __READ_ONCE(v->counter); - - do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); - - return val; -} -#define arch_atomic64_fetch_or arch_atomic64_fetch_or - static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v) { s64 val = __READ_ONCE(v->counter); @@ -240,16 +220,6 @@ static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v) do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); } -static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) -{ - s64 val = __READ_ONCE(v->counter); - - do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); - - return val; -} -#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor - static __always_inline s64 arch_atomic64_fetch_add(s64 i, atomic64_t *v) { s64 val = __READ_ONCE(v->counter); diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h index e7b12a48fecb..b2c9974ba971 100644 --- a/arch/x86/include/asm/atomic64_64.h +++ b/arch/x86/include/asm/atomic64_64.h @@ -124,16 +124,6 @@ static __always_inline void arch_atomic64_and(s64 i, atomic64_t *v) : "memory"); } -static __always_inline s64 arch_atomic64_fetch_and(s64 i, atomic64_t *v) -{ - s64 val = __READ_ONCE(v->counter); - - do { } while (!arch_atomic64_try_cmpxchg(v, &val, val & i)); - - return val; -} -#define arch_atomic64_fetch_and arch_atomic64_fetch_and - static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v) { asm volatile(LOCK_PREFIX "orq %1,%0" @@ -142,16 +132,6 @@ static __always_inline void arch_atomic64_or(s64 i, atomic64_t *v) : "memory"); } -static __always_inline s64 arch_atomic64_fetch_or(s64 i, atomic64_t *v) -{ - s64 val = __READ_ONCE(v->counter); - - do { } while (!arch_atomic64_try_cmpxchg(v, &val, val | i)); - - return val; -} -#define arch_atomic64_fetch_or arch_atomic64_fetch_or - static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v) { asm volatile(LOCK_PREFIX "xorq %1,%0" @@ -160,14 +140,4 @@ static __always_inline void arch_atomic64_xor(s64 i, atomic64_t *v) : "memory"); } -static __always_inline s64 arch_atomic64_fetch_xor(s64 i, atomic64_t *v) -{ - s64 val = __READ_ONCE(v->counter); - - do { } while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i)); - - return val; -} -#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor - #endif /* _ASM_X86_ATOMIC64_64_H */ -- 2.44.0
| |