lkml.org 
[lkml]   [2019]   [Mar]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.19 096/280] ARCv2: lib: memcpy: fix doing prefetchw outside of buffer
    Date
    4.19-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    [ Upstream commit f8a15f97664178f27dfbf86a38f780a532cb6df0 ]

    ARCv2 optimized memcpy uses PREFETCHW instruction for prefetching the
    next cache line but doesn't ensure that the line is not past the end of
    the buffer. PRETECHW changes the line ownership and marks it dirty,
    which can cause data corruption if this area is used for DMA IO.

    Fix the issue by avoiding the PREFETCHW. This leads to performance
    degradation but it is OK as we'll introduce new memcpy implementation
    optimized for unaligned memory access using.

    We also cut off all PREFETCH instructions at they are quite useless
    here:
    * we call PREFETCH right before LOAD instruction call.
    * we copy 16 or 32 bytes of data (depending on CONFIG_ARC_HAS_LL64)
    in a main logical loop. so we call PREFETCH 4 times (or 2 times)
    for each L1 cache line (in case of 64B L1 cache Line which is
    default case). Obviously this is not optimal.

    Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
    Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    arch/arc/lib/memcpy-archs.S | 14 --------------
    1 file changed, 14 deletions(-)

    diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
    index d61044dd8b58..ea14b0bf3116 100644
    --- a/arch/arc/lib/memcpy-archs.S
    +++ b/arch/arc/lib/memcpy-archs.S
    @@ -25,15 +25,11 @@
    #endif

    #ifdef CONFIG_ARC_HAS_LL64
    -# define PREFETCH_READ(RX) prefetch [RX, 56]
    -# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
    # define LOADX(DST,RX) ldd.ab DST, [RX, 8]
    # define STOREX(SRC,RX) std.ab SRC, [RX, 8]
    # define ZOLSHFT 5
    # define ZOLAND 0x1F
    #else
    -# define PREFETCH_READ(RX) prefetch [RX, 28]
    -# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
    # define LOADX(DST,RX) ld.ab DST, [RX, 4]
    # define STOREX(SRC,RX) st.ab SRC, [RX, 4]
    # define ZOLSHFT 4
    @@ -41,8 +37,6 @@
    #endif

    ENTRY_CFI(memcpy)
    - prefetch [r1] ; Prefetch the read location
    - prefetchw [r0] ; Prefetch the write location
    mov.f 0, r2
    ;;; if size is zero
    jz.d [blink]
    @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
    lpnz @.Lcopy32_64bytes
    ;; LOOP START
    LOADX (r6, r1)
    - PREFETCH_READ (r1)
    - PREFETCH_WRITE (r3)
    LOADX (r8, r1)
    LOADX (r10, r1)
    LOADX (r4, r1)
    @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
    lpnz @.Lcopy8bytes_1
    ;; LOOP START
    ld.ab r6, [r1, 4]
    - prefetch [r1, 28] ;Prefetch the next read location
    ld.ab r8, [r1,4]
    - prefetchw [r3, 32] ;Prefetch the next write location

    SHIFT_1 (r7, r6, 24)
    or r7, r7, r5
    @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
    lpnz @.Lcopy8bytes_2
    ;; LOOP START
    ld.ab r6, [r1, 4]
    - prefetch [r1, 28] ;Prefetch the next read location
    ld.ab r8, [r1,4]
    - prefetchw [r3, 32] ;Prefetch the next write location

    SHIFT_1 (r7, r6, 16)
    or r7, r7, r5
    @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
    lpnz @.Lcopy8bytes_3
    ;; LOOP START
    ld.ab r6, [r1, 4]
    - prefetch [r1, 28] ;Prefetch the next read location
    ld.ab r8, [r1,4]
    - prefetchw [r3, 32] ;Prefetch the next write location

    SHIFT_1 (r7, r6, 8)
    or r7, r7, r5
    --
    2.19.1


    \
     
     \ /
      Last update: 2019-03-22 13:45    [W:2.617 / U:0.532 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site