lkml.org 
[lkml]   [2019]   [Nov]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 05/13] alpha: Override READ_ONCE() with barriered implementation
    Date
    Rather then relying on the core code to use 'smp_read_barrier_depends()'
    as part of the 'READ_ONCE()' definition, instead override 'READ_ONCE()'
    in the Alpha code so that it is treated the same way as
    'smp_load_acquire()'.

    Signed-off-by: Will Deacon <will@kernel.org>
    ---
    arch/alpha/include/asm/barrier.h | 61 ++++----------------------------
    arch/alpha/include/asm/rwonce.h | 22 ++++++++++++
    2 files changed, 29 insertions(+), 54 deletions(-)
    create mode 100644 arch/alpha/include/asm/rwonce.h

    diff --git a/arch/alpha/include/asm/barrier.h b/arch/alpha/include/asm/barrier.h
    index 92ec486a4f9e..1f6abe2d1392 100644
    --- a/arch/alpha/include/asm/barrier.h
    +++ b/arch/alpha/include/asm/barrier.h
    @@ -2,64 +2,17 @@
    #ifndef __BARRIER_H
    #define __BARRIER_H

    -#include <asm/compiler.h>
    -
    #define mb() __asm__ __volatile__("mb": : :"memory")
    #define rmb() __asm__ __volatile__("mb": : :"memory")
    #define wmb() __asm__ __volatile__("wmb": : :"memory")

    -/**
    - * read_barrier_depends - Flush all pending reads that subsequents reads
    - * depend on.
    - *
    - * No data-dependent reads from memory-like regions are ever reordered
    - * over this barrier. All reads preceding this primitive are guaranteed
    - * to access memory (but not necessarily other CPUs' caches) before any
    - * reads following this primitive that depend on the data return by
    - * any of the preceding reads. This primitive is much lighter weight than
    - * rmb() on most CPUs, and is never heavier weight than is
    - * rmb().
    - *
    - * These ordering constraints are respected by both the local CPU
    - * and the compiler.
    - *
    - * Ordering is not guaranteed by anything other than these primitives,
    - * not even by data dependencies. See the documentation for
    - * memory_barrier() for examples and URLs to more information.
    - *
    - * For example, the following code would force ordering (the initial
    - * value of "a" is zero, "b" is one, and "p" is "&a"):
    - *
    - * <programlisting>
    - * CPU 0 CPU 1
    - *
    - * b = 2;
    - * memory_barrier();
    - * p = &b; q = p;
    - * read_barrier_depends();
    - * d = *q;
    - * </programlisting>
    - *
    - * because the read of "*q" depends on the read of "p" and these
    - * two reads are separated by a read_barrier_depends(). However,
    - * the following code, with the same initial values for "a" and "b":
    - *
    - * <programlisting>
    - * CPU 0 CPU 1
    - *
    - * a = 2;
    - * memory_barrier();
    - * b = 3; y = b;
    - * read_barrier_depends();
    - * x = a;
    - * </programlisting>
    - *
    - * does not enforce ordering, since there is no data dependency between
    - * the read of "a" and the read of "b". Therefore, on some CPUs, such
    - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
    - * in cases like this where there are no data dependencies.
    - */
    -#define read_barrier_depends() __asm__ __volatile__("mb": : :"memory")
    +#define __smp_load_acquire(p) \
    +({ \
    + typeof(*p) ___p1 = (*(volatile typeof(*p) *)(p)); \
    + compiletime_assert_atomic_type(*p); \
    + mb(); \
    + ___p1; \
    +})

    #ifdef CONFIG_SMP
    #define __ASM_SMP_MB "\tmb\n"
    diff --git a/arch/alpha/include/asm/rwonce.h b/arch/alpha/include/asm/rwonce.h
    new file mode 100644
    index 000000000000..ef5601352b55
    --- /dev/null
    +++ b/arch/alpha/include/asm/rwonce.h
    @@ -0,0 +1,22 @@
    +/* SPDX-License-Identifier: GPL-2.0 */
    +/*
    + * Copyright (C) 2019 Google LLC.
    + */
    +#ifndef __ASM_RWONCE_H
    +#define __ASM_RWONCE_H
    +
    +#include <asm/barrier.h>
    +
    +/*
    + * Alpha is apparently daft enough to reorder address-dependent loads
    + * on some CPU implementations. Knock some common sense into it with
    + * a memory barrier in READ_ONCE().
    + */
    +#define __read_once_size_1(p) __smp_load_acquire((u8 *)(p))
    +#define __read_once_size_2(p) __smp_load_acquire((u16 *)(p))
    +#define __read_once_size_4(p) __smp_load_acquire((u32 *)(p))
    +#define __read_once_size_8(p) __smp_load_acquire((u64 *)(p))
    +
    +#include <asm-generic/rwonce.h>
    +
    +#endif /* __ASM_RWONCE_H */
    --
    2.24.0.rc1.363.gb1bccd3e3d-goog
    \
     
     \ /
      Last update: 2019-11-08 18:03    [W:4.128 / U:0.376 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site