]>
Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
f1a0c4aa CM |
2 | /* |
3 | * Cache maintenance | |
4 | * | |
5 | * Copyright (C) 2001 Deep Blue Solutions Ltd. | |
6 | * Copyright (C) 2012 ARM Ltd. | |
f1a0c4aa CM |
7 | */ |
8 | ||
a2d25a53 | 9 | #include <linux/errno.h> |
f1a0c4aa CM |
10 | #include <linux/linkage.h> |
11 | #include <linux/init.h> | |
12 | #include <asm/assembler.h> | |
301bcfac | 13 | #include <asm/cpufeature.h> |
8d883b23 | 14 | #include <asm/alternative.h> |
b4b8664d | 15 | #include <asm/asm-uaccess.h> |
f1a0c4aa | 16 | |
f1a0c4aa CM |
17 | /* |
18 | * flush_icache_range(start,end) | |
19 | * | |
20 | * Ensure that the I and D caches are coherent within specified region. | |
21 | * This is typically used when code has been written to a memory region, | |
22 | * and will be executed. | |
23 | * | |
24 | * - start - virtual start address of region | |
25 | * - end - virtual end address of region | |
26 | */ | |
3b8c9f1c | 27 | ENTRY(__flush_icache_range) |
f1a0c4aa CM |
28 | /* FALLTHROUGH */ |
29 | ||
30 | /* | |
31 | * __flush_cache_user_range(start,end) | |
32 | * | |
33 | * Ensure that the I and D caches are coherent within specified region. | |
34 | * This is typically used when code has been written to a memory region, | |
35 | * and will be executed. | |
36 | * | |
37 | * - start - virtual start address of region | |
38 | * - end - virtual end address of region | |
39 | */ | |
40 | ENTRY(__flush_cache_user_range) | |
448fadc8 | 41 | uaccess_ttbr0_enable x2, x3, x4 |
6ae4b6e0 SD |
42 | alternative_if ARM64_HAS_CACHE_IDC |
43 | dsb ishst | |
44 | b 7f | |
45 | alternative_else_nop_endif | |
f1a0c4aa CM |
46 | dcache_line_size x2, x3 |
47 | sub x3, x2, #1 | |
48 | bic x4, x0, x3 | |
49 | 1: | |
290622ef | 50 | user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE |
f1a0c4aa CM |
51 | add x4, x4, x2 |
52 | cmp x4, x1 | |
53 | b.lo 1b | |
dc60b777 | 54 | dsb ish |
f1a0c4aa | 55 | |
6ae4b6e0 SD |
56 | 7: |
57 | alternative_if ARM64_HAS_CACHE_DIC | |
58 | isb | |
59 | b 8f | |
60 | alternative_else_nop_endif | |
4fee9473 | 61 | invalidate_icache_by_line x0, x1, x2, x3, 9f |
6ae4b6e0 | 62 | 8: mov x0, #0 |
39bc88e5 | 63 | 1: |
0482b505 | 64 | uaccess_ttbr0_disable x1, x2 |
a2d25a53 VM |
65 | ret |
66 | 9: | |
67 | mov x0, #-EFAULT | |
39bc88e5 | 68 | b 1b |
3b8c9f1c | 69 | ENDPROC(__flush_icache_range) |
f1a0c4aa CM |
70 | ENDPROC(__flush_cache_user_range) |
71 | ||
4fee9473 MZ |
72 | /* |
73 | * invalidate_icache_range(start,end) | |
74 | * | |
75 | * Ensure that the I cache is invalid within specified region. | |
76 | * | |
77 | * - start - virtual start address of region | |
78 | * - end - virtual end address of region | |
79 | */ | |
80 | ENTRY(invalidate_icache_range) | |
6ae4b6e0 SD |
81 | alternative_if ARM64_HAS_CACHE_DIC |
82 | mov x0, xzr | |
83 | isb | |
84 | ret | |
85 | alternative_else_nop_endif | |
86 | ||
448fadc8 | 87 | uaccess_ttbr0_enable x2, x3, x4 |
4fee9473 MZ |
88 | |
89 | invalidate_icache_by_line x0, x1, x2, x3, 2f | |
90 | mov x0, xzr | |
91 | 1: | |
0482b505 | 92 | uaccess_ttbr0_disable x1, x2 |
4fee9473 MZ |
93 | ret |
94 | 2: | |
95 | mov x0, #-EFAULT | |
96 | b 1b | |
97 | ENDPROC(invalidate_icache_range) | |
98 | ||
f1a0c4aa | 99 | /* |
03324e6e | 100 | * __flush_dcache_area(kaddr, size) |
f1a0c4aa | 101 | * |
0a28714c AK |
102 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) |
103 | * are cleaned and invalidated to the PoC. | |
f1a0c4aa CM |
104 | * |
105 | * - kaddr - kernel address | |
106 | * - size - size in question | |
107 | */ | |
108 | ENTRY(__flush_dcache_area) | |
0a28714c | 109 | dcache_by_line_op civac, sy, x0, x1, x2, x3 |
f1a0c4aa | 110 | ret |
20791846 | 111 | ENDPIPROC(__flush_dcache_area) |
7363590d | 112 | |
0a28714c AK |
113 | /* |
114 | * __clean_dcache_area_pou(kaddr, size) | |
115 | * | |
116 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) | |
117 | * are cleaned to the PoU. | |
118 | * | |
119 | * - kaddr - kernel address | |
120 | * - size - size in question | |
121 | */ | |
122 | ENTRY(__clean_dcache_area_pou) | |
6ae4b6e0 SD |
123 | alternative_if ARM64_HAS_CACHE_IDC |
124 | dsb ishst | |
125 | ret | |
126 | alternative_else_nop_endif | |
0a28714c AK |
127 | dcache_by_line_op cvau, ish, x0, x1, x2, x3 |
128 | ret | |
129 | ENDPROC(__clean_dcache_area_pou) | |
130 | ||
c218bca7 | 131 | /* |
d46befef RM |
132 | * __inval_dcache_area(kaddr, size) |
133 | * | |
134 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) | |
135 | * are invalidated. Any partial lines at the ends of the interval are | |
136 | * also cleaned to PoC to prevent data loss. | |
137 | * | |
138 | * - kaddr - kernel address | |
d34fdb70 | 139 | * - size - size in question |
c218bca7 | 140 | */ |
d46befef | 141 | ENTRY(__inval_dcache_area) |
c218bca7 CM |
142 | /* FALLTHROUGH */ |
143 | ||
7363590d | 144 | /* |
d46befef RM |
145 | * __dma_inv_area(start, size) |
146 | * - start - virtual start address of region | |
147 | * - size - size in question | |
7363590d | 148 | */ |
d46befef RM |
149 | __dma_inv_area: |
150 | add x1, x1, x0 | |
7363590d CM |
151 | dcache_line_size x2, x3 |
152 | sub x3, x2, #1 | |
ebf81a93 | 153 | tst x1, x3 // end cache line aligned? |
7363590d | 154 | bic x1, x1, x3 |
ebf81a93 CM |
155 | b.eq 1f |
156 | dc civac, x1 // clean & invalidate D / U line | |
157 | 1: tst x0, x3 // start cache line aligned? | |
158 | bic x0, x0, x3 | |
159 | b.eq 2f | |
160 | dc civac, x0 // clean & invalidate D / U line | |
161 | b 3f | |
162 | 2: dc ivac, x0 // invalidate D / U line | |
163 | 3: add x0, x0, x2 | |
7363590d | 164 | cmp x0, x1 |
ebf81a93 | 165 | b.lo 2b |
7363590d CM |
166 | dsb sy |
167 | ret | |
d46befef | 168 | ENDPIPROC(__inval_dcache_area) |
d34fdb70 KL |
169 | ENDPROC(__dma_inv_area) |
170 | ||
171 | /* | |
172 | * __clean_dcache_area_poc(kaddr, size) | |
173 | * | |
174 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) | |
175 | * are cleaned to the PoC. | |
176 | * | |
177 | * - kaddr - kernel address | |
178 | * - size - size in question | |
179 | */ | |
180 | ENTRY(__clean_dcache_area_poc) | |
181 | /* FALLTHROUGH */ | |
7363590d CM |
182 | |
183 | /* | |
d34fdb70 | 184 | * __dma_clean_area(start, size) |
7363590d | 185 | * - start - virtual start address of region |
d34fdb70 | 186 | * - size - size in question |
7363590d | 187 | */ |
d34fdb70 KL |
188 | __dma_clean_area: |
189 | dcache_by_line_op cvac, sy, x0, x1, x2, x3 | |
7363590d | 190 | ret |
d34fdb70 KL |
191 | ENDPIPROC(__clean_dcache_area_poc) |
192 | ENDPROC(__dma_clean_area) | |
7363590d | 193 | |
d50e071f RM |
194 | /* |
195 | * __clean_dcache_area_pop(kaddr, size) | |
196 | * | |
197 | * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) | |
198 | * are cleaned to the PoP. | |
199 | * | |
200 | * - kaddr - kernel address | |
201 | * - size - size in question | |
202 | */ | |
203 | ENTRY(__clean_dcache_area_pop) | |
33309ecd WD |
204 | alternative_if_not ARM64_HAS_DCPOP |
205 | b __clean_dcache_area_poc | |
206 | alternative_else_nop_endif | |
d50e071f RM |
207 | dcache_by_line_op cvap, sy, x0, x1, x2, x3 |
208 | ret | |
209 | ENDPIPROC(__clean_dcache_area_pop) | |
210 | ||
7363590d | 211 | /* |
d34fdb70 KL |
212 | * __dma_flush_area(start, size) |
213 | * | |
214 | * clean & invalidate D / U line | |
215 | * | |
7363590d | 216 | * - start - virtual start address of region |
d34fdb70 | 217 | * - size - size in question |
7363590d | 218 | */ |
d34fdb70 KL |
219 | ENTRY(__dma_flush_area) |
220 | dcache_by_line_op civac, sy, x0, x1, x2, x3 | |
7363590d | 221 | ret |
d34fdb70 | 222 | ENDPIPROC(__dma_flush_area) |
7363590d CM |
223 | |
224 | /* | |
225 | * __dma_map_area(start, size, dir) | |
226 | * - start - kernel virtual start address | |
227 | * - size - size of region | |
228 | * - dir - DMA direction | |
229 | */ | |
230 | ENTRY(__dma_map_area) | |
7363590d | 231 | cmp w2, #DMA_FROM_DEVICE |
d34fdb70 KL |
232 | b.eq __dma_inv_area |
233 | b __dma_clean_area | |
20791846 | 234 | ENDPIPROC(__dma_map_area) |
7363590d CM |
235 | |
236 | /* | |
237 | * __dma_unmap_area(start, size, dir) | |
238 | * - start - kernel virtual start address | |
239 | * - size - size of region | |
240 | * - dir - DMA direction | |
241 | */ | |
242 | ENTRY(__dma_unmap_area) | |
7363590d | 243 | cmp w2, #DMA_TO_DEVICE |
d34fdb70 | 244 | b.ne __dma_inv_area |
7363590d | 245 | ret |
20791846 | 246 | ENDPIPROC(__dma_unmap_area) |