]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/blob - releases/4.19.31/arcv2-lib-memcpy-fix-doing-prefetchw-outside-of-buff.patch
Linux 4.19.31
[thirdparty/kernel/stable-queue.git] / releases / 4.19.31 / arcv2-lib-memcpy-fix-doing-prefetchw-outside-of-buff.patch
1 From 2273366d2143c4ce2b792878f764a40c76b573e6 Mon Sep 17 00:00:00 2001
2 From: Eugeniy Paltsev <eugeniy.paltsev@synopsys.com>
3 Date: Wed, 30 Jan 2019 19:32:40 +0300
4 Subject: ARCv2: lib: memcpy: fix doing prefetchw outside of buffer
5
6 [ Upstream commit f8a15f97664178f27dfbf86a38f780a532cb6df0 ]
7
8 ARCv2 optimized memcpy uses PREFETCHW instruction for prefetching the
9 next cache line but doesn't ensure that the line is not past the end of
10 the buffer. PRETECHW changes the line ownership and marks it dirty,
11 which can cause data corruption if this area is used for DMA IO.
12
13 Fix the issue by avoiding the PREFETCHW. This leads to performance
14 degradation but it is OK as we'll introduce new memcpy implementation
15 optimized for unaligned memory access using.
16
17 We also cut off all PREFETCH instructions at they are quite useless
18 here:
19 * we call PREFETCH right before LOAD instruction call.
20 * we copy 16 or 32 bytes of data (depending on CONFIG_ARC_HAS_LL64)
21 in a main logical loop. so we call PREFETCH 4 times (or 2 times)
22 for each L1 cache line (in case of 64B L1 cache Line which is
23 default case). Obviously this is not optimal.
24
25 Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
26 Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
27 Signed-off-by: Sasha Levin <sashal@kernel.org>
28 ---
29 arch/arc/lib/memcpy-archs.S | 14 --------------
30 1 file changed, 14 deletions(-)
31
32 diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
33 index d61044dd8b58..ea14b0bf3116 100644
34 --- a/arch/arc/lib/memcpy-archs.S
35 +++ b/arch/arc/lib/memcpy-archs.S
36 @@ -25,15 +25,11 @@
37 #endif
38
39 #ifdef CONFIG_ARC_HAS_LL64
40 -# define PREFETCH_READ(RX) prefetch [RX, 56]
41 -# define PREFETCH_WRITE(RX) prefetchw [RX, 64]
42 # define LOADX(DST,RX) ldd.ab DST, [RX, 8]
43 # define STOREX(SRC,RX) std.ab SRC, [RX, 8]
44 # define ZOLSHFT 5
45 # define ZOLAND 0x1F
46 #else
47 -# define PREFETCH_READ(RX) prefetch [RX, 28]
48 -# define PREFETCH_WRITE(RX) prefetchw [RX, 32]
49 # define LOADX(DST,RX) ld.ab DST, [RX, 4]
50 # define STOREX(SRC,RX) st.ab SRC, [RX, 4]
51 # define ZOLSHFT 4
52 @@ -41,8 +37,6 @@
53 #endif
54
55 ENTRY_CFI(memcpy)
56 - prefetch [r1] ; Prefetch the read location
57 - prefetchw [r0] ; Prefetch the write location
58 mov.f 0, r2
59 ;;; if size is zero
60 jz.d [blink]
61 @@ -72,8 +66,6 @@ ENTRY_CFI(memcpy)
62 lpnz @.Lcopy32_64bytes
63 ;; LOOP START
64 LOADX (r6, r1)
65 - PREFETCH_READ (r1)
66 - PREFETCH_WRITE (r3)
67 LOADX (r8, r1)
68 LOADX (r10, r1)
69 LOADX (r4, r1)
70 @@ -117,9 +109,7 @@ ENTRY_CFI(memcpy)
71 lpnz @.Lcopy8bytes_1
72 ;; LOOP START
73 ld.ab r6, [r1, 4]
74 - prefetch [r1, 28] ;Prefetch the next read location
75 ld.ab r8, [r1,4]
76 - prefetchw [r3, 32] ;Prefetch the next write location
77
78 SHIFT_1 (r7, r6, 24)
79 or r7, r7, r5
80 @@ -162,9 +152,7 @@ ENTRY_CFI(memcpy)
81 lpnz @.Lcopy8bytes_2
82 ;; LOOP START
83 ld.ab r6, [r1, 4]
84 - prefetch [r1, 28] ;Prefetch the next read location
85 ld.ab r8, [r1,4]
86 - prefetchw [r3, 32] ;Prefetch the next write location
87
88 SHIFT_1 (r7, r6, 16)
89 or r7, r7, r5
90 @@ -204,9 +192,7 @@ ENTRY_CFI(memcpy)
91 lpnz @.Lcopy8bytes_3
92 ;; LOOP START
93 ld.ab r6, [r1, 4]
94 - prefetch [r1, 28] ;Prefetch the next read location
95 ld.ab r8, [r1,4]
96 - prefetchw [r3, 32] ;Prefetch the next write location
97
98 SHIFT_1 (r7, r6, 8)
99 or r7, r7, r5
100 --
101 2.19.1
102