lkml.org 
[lkml]   [2018]   [Jun]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.9 01/61] arm64: lse: Add early clobbers to some input/output asm operands
    Date
    4.9-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Will Deacon <will.deacon@arm.com>

    commit 32c3fa7cdf0c4a3eb8405fc3e13398de019e828b upstream.

    For LSE atomics that read and write a register operand, we need to
    ensure that these operands are annotated as "early clobber" if the
    register is written before all of the input operands have been consumed.
    Failure to do so can result in the compiler allocating the same register
    to both operands, leading to splats such as:

    Unable to handle kernel paging request at virtual address 11111122222221
    [...]
    x1 : 1111111122222222 x0 : 1111111122222221
    Process swapper/0 (pid: 1, stack limit = 0x000000008209f908)
    Call trace:
    test_atomic64+0x1360/0x155c

    where x0 has been allocated as both the value to be stored and also the
    atomic_t pointer.

    This patch adds the missing clobbers.

    Cc: <stable@vger.kernel.org>
    Cc: Dave Martin <dave.martin@arm.com>
    Cc: Robin Murphy <robin.murphy@arm.com>
    Reported-by: Mark Salter <msalter@redhat.com>
    Signed-off-by: Will Deacon <will.deacon@arm.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    arch/arm64/include/asm/atomic_lse.h | 22 +++++++++++-----------
    1 file changed, 11 insertions(+), 11 deletions(-)

    --- a/arch/arm64/include/asm/atomic_lse.h
    +++ b/arch/arm64/include/asm/atomic_lse.h
    @@ -117,7 +117,7 @@ static inline void atomic_and(int i, ato
    /* LSE atomics */
    " mvn %w[i], %w[i]\n"
    " stclr %w[i], %[v]")
    - : [i] "+r" (w0), [v] "+Q" (v->counter)
    + : [i] "+&r" (w0), [v] "+Q" (v->counter)
    : "r" (x1)
    : __LL_SC_CLOBBERS);
    }
    @@ -135,7 +135,7 @@ static inline int atomic_fetch_and##name
    /* LSE atomics */ \
    " mvn %w[i], %w[i]\n" \
    " ldclr" #mb " %w[i], %w[i], %[v]") \
    - : [i] "+r" (w0), [v] "+Q" (v->counter) \
    + : [i] "+&r" (w0), [v] "+Q" (v->counter) \
    : "r" (x1) \
    : __LL_SC_CLOBBERS, ##cl); \
    \
    @@ -161,7 +161,7 @@ static inline void atomic_sub(int i, ato
    /* LSE atomics */
    " neg %w[i], %w[i]\n"
    " stadd %w[i], %[v]")
    - : [i] "+r" (w0), [v] "+Q" (v->counter)
    + : [i] "+&r" (w0), [v] "+Q" (v->counter)
    : "r" (x1)
    : __LL_SC_CLOBBERS);
    }
    @@ -180,7 +180,7 @@ static inline int atomic_sub_return##nam
    " neg %w[i], %w[i]\n" \
    " ldadd" #mb " %w[i], w30, %[v]\n" \
    " add %w[i], %w[i], w30") \
    - : [i] "+r" (w0), [v] "+Q" (v->counter) \
    + : [i] "+&r" (w0), [v] "+Q" (v->counter) \
    : "r" (x1) \
    : __LL_SC_CLOBBERS , ##cl); \
    \
    @@ -207,7 +207,7 @@ static inline int atomic_fetch_sub##name
    /* LSE atomics */ \
    " neg %w[i], %w[i]\n" \
    " ldadd" #mb " %w[i], %w[i], %[v]") \
    - : [i] "+r" (w0), [v] "+Q" (v->counter) \
    + : [i] "+&r" (w0), [v] "+Q" (v->counter) \
    : "r" (x1) \
    : __LL_SC_CLOBBERS, ##cl); \
    \
    @@ -314,7 +314,7 @@ static inline void atomic64_and(long i,
    /* LSE atomics */
    " mvn %[i], %[i]\n"
    " stclr %[i], %[v]")
    - : [i] "+r" (x0), [v] "+Q" (v->counter)
    + : [i] "+&r" (x0), [v] "+Q" (v->counter)
    : "r" (x1)
    : __LL_SC_CLOBBERS);
    }
    @@ -332,7 +332,7 @@ static inline long atomic64_fetch_and##n
    /* LSE atomics */ \
    " mvn %[i], %[i]\n" \
    " ldclr" #mb " %[i], %[i], %[v]") \
    - : [i] "+r" (x0), [v] "+Q" (v->counter) \
    + : [i] "+&r" (x0), [v] "+Q" (v->counter) \
    : "r" (x1) \
    : __LL_SC_CLOBBERS, ##cl); \
    \
    @@ -358,7 +358,7 @@ static inline void atomic64_sub(long i,
    /* LSE atomics */
    " neg %[i], %[i]\n"
    " stadd %[i], %[v]")
    - : [i] "+r" (x0), [v] "+Q" (v->counter)
    + : [i] "+&r" (x0), [v] "+Q" (v->counter)
    : "r" (x1)
    : __LL_SC_CLOBBERS);
    }
    @@ -377,7 +377,7 @@ static inline long atomic64_sub_return##
    " neg %[i], %[i]\n" \
    " ldadd" #mb " %[i], x30, %[v]\n" \
    " add %[i], %[i], x30") \
    - : [i] "+r" (x0), [v] "+Q" (v->counter) \
    + : [i] "+&r" (x0), [v] "+Q" (v->counter) \
    : "r" (x1) \
    : __LL_SC_CLOBBERS, ##cl); \
    \
    @@ -404,7 +404,7 @@ static inline long atomic64_fetch_sub##n
    /* LSE atomics */ \
    " neg %[i], %[i]\n" \
    " ldadd" #mb " %[i], %[i], %[v]") \
    - : [i] "+r" (x0), [v] "+Q" (v->counter) \
    + : [i] "+&r" (x0), [v] "+Q" (v->counter) \
    : "r" (x1) \
    : __LL_SC_CLOBBERS, ##cl); \
    \
    @@ -516,7 +516,7 @@ static inline long __cmpxchg_double##nam
    " eor %[old1], %[old1], %[oldval1]\n" \
    " eor %[old2], %[old2], %[oldval2]\n" \
    " orr %[old1], %[old1], %[old2]") \
    - : [old1] "+r" (x0), [old2] "+r" (x1), \
    + : [old1] "+&r" (x0), [old2] "+&r" (x1), \
    [v] "+Q" (*(unsigned long *)ptr) \
    : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \
    [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \

    \
     
     \ /
      Last update: 2018-06-05 19:23    [W:2.968 / U:0.000 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site