lkml.org 
[lkml]   [2020]   [Aug]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.14 160/228] parisc: Implement __smp_store_release and __smp_load_acquire barriers
    Date
    From: John David Anglin <dave.anglin@bell.net>

    commit e96ebd589debd9a6a793608c4ec7019c38785dea upstream.

    This patch implements the __smp_store_release and __smp_load_acquire barriers
    using ordered stores and loads. This avoids the sync instruction present in
    the generic implementation.

    Cc: <stable@vger.kernel.org> # 4.14+
    Signed-off-by: Dave Anglin <dave.anglin@bell.net>
    Signed-off-by: Helge Deller <deller@gmx.de>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    arch/parisc/include/asm/barrier.h | 61 ++++++++++++++++++++++++++++++++++++++
    1 file changed, 61 insertions(+)

    --- a/arch/parisc/include/asm/barrier.h
    +++ b/arch/parisc/include/asm/barrier.h
    @@ -26,6 +26,67 @@
    #define __smp_rmb() mb()
    #define __smp_wmb() mb()

    +#define __smp_store_release(p, v) \
    +do { \
    + typeof(p) __p = (p); \
    + union { typeof(*p) __val; char __c[1]; } __u = \
    + { .__val = (__force typeof(*p)) (v) }; \
    + compiletime_assert_atomic_type(*p); \
    + switch (sizeof(*p)) { \
    + case 1: \
    + asm volatile("stb,ma %0,0(%1)" \
    + : : "r"(*(__u8 *)__u.__c), "r"(__p) \
    + : "memory"); \
    + break; \
    + case 2: \
    + asm volatile("sth,ma %0,0(%1)" \
    + : : "r"(*(__u16 *)__u.__c), "r"(__p) \
    + : "memory"); \
    + break; \
    + case 4: \
    + asm volatile("stw,ma %0,0(%1)" \
    + : : "r"(*(__u32 *)__u.__c), "r"(__p) \
    + : "memory"); \
    + break; \
    + case 8: \
    + if (IS_ENABLED(CONFIG_64BIT)) \
    + asm volatile("std,ma %0,0(%1)" \
    + : : "r"(*(__u64 *)__u.__c), "r"(__p) \
    + : "memory"); \
    + break; \
    + } \
    +} while (0)
    +
    +#define __smp_load_acquire(p) \
    +({ \
    + union { typeof(*p) __val; char __c[1]; } __u; \
    + typeof(p) __p = (p); \
    + compiletime_assert_atomic_type(*p); \
    + switch (sizeof(*p)) { \
    + case 1: \
    + asm volatile("ldb,ma 0(%1),%0" \
    + : "=r"(*(__u8 *)__u.__c) : "r"(__p) \
    + : "memory"); \
    + break; \
    + case 2: \
    + asm volatile("ldh,ma 0(%1),%0" \
    + : "=r"(*(__u16 *)__u.__c) : "r"(__p) \
    + : "memory"); \
    + break; \
    + case 4: \
    + asm volatile("ldw,ma 0(%1),%0" \
    + : "=r"(*(__u32 *)__u.__c) : "r"(__p) \
    + : "memory"); \
    + break; \
    + case 8: \
    + if (IS_ENABLED(CONFIG_64BIT)) \
    + asm volatile("ldd,ma 0(%1),%0" \
    + : "=r"(*(__u64 *)__u.__c) : "r"(__p) \
    + : "memory"); \
    + break; \
    + } \
    + __u.__val; \
    +})
    #include <asm-generic/barrier.h>

    #endif /* !__ASSEMBLY__ */

    \
     
     \ /
      Last update: 2020-08-20 12:16    [W:2.272 / U:0.140 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site