]> git.ipfire.org Git - thirdparty/u-boot.git/blame - arch/arm/cpu/armv8/cache.S
arm: Check FEAT_CCIDX when parsing ccsidr_el1 register
[thirdparty/u-boot.git] / arch / arm / cpu / armv8 / cache.S
CommitLineData
83d290c5 1/* SPDX-License-Identifier: GPL-2.0+ */
0ae76531
DF
2/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
6 * This file is based on sample code from ARMv8 ARM.
0ae76531
DF
7 */
8
9#include <asm-offsets.h>
10#include <config.h>
0ae76531 11#include <asm/macro.h>
5e2ec773 12#include <asm/system.h>
0ae76531
DF
13#include <linux/linkage.h>
14
46dc5428 15#ifndef CONFIG_CMO_BY_VA_ONLY
0ae76531 16/*
ba9eb6c7 17 * void __asm_dcache_level(level)
0ae76531 18 *
ba9eb6c7 19 * flush or invalidate one level cache.
0ae76531
DF
20 *
21 * x0: cache level
1a021230 22 * x1: 0 clean & invalidate, 1 invalidate only
95167db3 23 * x16: FEAT_CCIDX
1e6ad55c 24 * x2~x9: clobbered
0ae76531 25 */
e6a05862 26.pushsection .text.__asm_dcache_level, "ax"
ba9eb6c7 27ENTRY(__asm_dcache_level)
1e6ad55c
YS
28 lsl x12, x0, #1
29 msr csselr_el1, x12 /* select cache level */
0ae76531
DF
30 isb /* sync change of cssidr_el1 */
31 mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
f050bfac 32 ubfx x2, x6, #0, #3 /* x2 <- log2(cache line size)-4 */
95167db3
LW
33 cbz x16, 3f /* check for FEAT_CCIDX */
34 ubfx x3, x6, #3, #21 /* x3 <- number of cache ways - 1 */
35 ubfx x4, x6, #32, #24 /* x4 <- number of cache sets - 1 */
36 b 4f
373:
f050bfac
PCT
38 ubfx x3, x6, #3, #10 /* x3 <- number of cache ways - 1 */
39 ubfx x4, x6, #13, #15 /* x4 <- number of cache sets - 1 */
95167db3 404:
0ae76531 41 add x2, x2, #4 /* x2 <- log2(cache line size) */
42ddfad6 42 clz w5, w3 /* bit position of #ways */
1e6ad55c 43 /* x12 <- cache level << 1 */
0ae76531
DF
44 /* x2 <- line length offset */
45 /* x3 <- number of cache ways - 1 */
46 /* x4 <- number of cache sets - 1 */
47 /* x5 <- bit position of #ways */
48
49loop_set:
50 mov x6, x3 /* x6 <- working copy of #ways */
51loop_way:
52 lsl x7, x6, x5
1e6ad55c 53 orr x9, x12, x7 /* map way and level to cisw value */
0ae76531
DF
54 lsl x7, x4, x2
55 orr x9, x9, x7 /* map set number to cisw value */
1e6ad55c
YS
56 tbz w1, #0, 1f
57 dc isw, x9
58 b 2f
591: dc cisw, x9 /* clean & invalidate by set/way */
602: subs x6, x6, #1 /* decrement the way */
0ae76531
DF
61 b.ge loop_way
62 subs x4, x4, #1 /* decrement the set */
63 b.ge loop_set
64
65 ret
ba9eb6c7 66ENDPROC(__asm_dcache_level)
e6a05862 67.popsection
0ae76531
DF
68
69/*
1e6ad55c
YS
70 * void __asm_flush_dcache_all(int invalidate_only)
71 *
1a021230 72 * x0: 0 clean & invalidate, 1 invalidate only
0ae76531 73 *
ba9eb6c7 74 * flush or invalidate all data cache by SET/WAY.
0ae76531 75 */
e6a05862 76.pushsection .text.__asm_dcache_all, "ax"
1e6ad55c
YS
77ENTRY(__asm_dcache_all)
78 mov x1, x0
0ae76531
DF
79 dsb sy
80 mrs x10, clidr_el1 /* read clidr_el1 */
f050bfac 81 ubfx x11, x10, #24, #3 /* x11 <- loc */
0ae76531
DF
82 cbz x11, finished /* if loc is 0, exit */
83 mov x15, lr
95167db3
LW
84 mrs x16, s3_0_c0_c7_2 /* read value of id_aa64mmfr2_el1*/
85 ubfx x16, x16, #20, #4 /* save FEAT_CCIDX identifier in x16 */
0ae76531
DF
86 mov x0, #0 /* start flush at cache level 0 */
87 /* x0 <- cache level */
88 /* x10 <- clidr_el1 */
89 /* x11 <- loc */
90 /* x15 <- return address */
91
92loop_level:
37479e65 93 add x12, x0, x0, lsl #1 /* x12 <- tripled cache level */
1e6ad55c
YS
94 lsr x12, x10, x12
95 and x12, x12, #7 /* x12 <- cache type */
96 cmp x12, #2
0ae76531 97 b.lt skip /* skip if no cache or icache */
ba9eb6c7 98 bl __asm_dcache_level /* x1 = 0 flush, 1 invalidate */
0ae76531
DF
99skip:
100 add x0, x0, #1 /* increment cache level */
101 cmp x11, x0
102 b.gt loop_level
103
104 mov x0, #0
f1075aed 105 msr csselr_el1, x0 /* restore csselr_el1 */
0ae76531
DF
106 dsb sy
107 isb
108 mov lr, x15
109
110finished:
111 ret
1e6ad55c 112ENDPROC(__asm_dcache_all)
e6a05862 113.popsection
1e6ad55c 114
e6a05862 115.pushsection .text.__asm_flush_dcache_all, "ax"
1e6ad55c 116ENTRY(__asm_flush_dcache_all)
1e6ad55c 117 mov x0, #0
25828588 118 b __asm_dcache_all
0ae76531 119ENDPROC(__asm_flush_dcache_all)
e6a05862 120.popsection
0ae76531 121
e6a05862 122.pushsection .text.__asm_invalidate_dcache_all, "ax"
1e6ad55c 123ENTRY(__asm_invalidate_dcache_all)
208bd513 124 mov x0, #0x1
25828588 125 b __asm_dcache_all
1e6ad55c 126ENDPROC(__asm_invalidate_dcache_all)
e6a05862 127.popsection
1e6ad55c 128
46dc5428
MZ
129.pushsection .text.__asm_flush_l3_dcache, "ax"
130WEAK(__asm_flush_l3_dcache)
131 mov x0, #0 /* return status as success */
132 ret
133ENDPROC(__asm_flush_l3_dcache)
134.popsection
135
136.pushsection .text.__asm_invalidate_l3_icache, "ax"
137WEAK(__asm_invalidate_l3_icache)
138 mov x0, #0 /* return status as success */
139 ret
140ENDPROC(__asm_invalidate_l3_icache)
141.popsection
142
143#else /* CONFIG_CMO_BY_VA */
144
145/*
146 * Define these so that they actively clash with in implementation
147 * accidentally selecting CONFIG_CMO_BY_VA
148 */
149
150.pushsection .text.__asm_invalidate_l3_icache, "ax"
151ENTRY(__asm_invalidate_l3_icache)
152 mov x0, xzr
153 ret
154ENDPROC(__asm_invalidate_l3_icache)
155.popsection
156.pushsection .text.__asm_flush_l3_dcache, "ax"
157ENTRY(__asm_flush_l3_dcache)
158 mov x0, xzr
159 ret
160ENDPROC(__asm_flush_l3_dcache)
161.popsection
162#endif /* CONFIG_CMO_BY_VA */
163
0ae76531
DF
164/*
165 * void __asm_flush_dcache_range(start, end)
166 *
167 * clean & invalidate data cache in the range
168 *
169 * x0: start address
170 * x1: end address
171 */
e6a05862 172.pushsection .text.__asm_flush_dcache_range, "ax"
0ae76531
DF
173ENTRY(__asm_flush_dcache_range)
174 mrs x3, ctr_el0
f050bfac 175 ubfx x3, x3, #16, #4
0ae76531
DF
176 mov x2, #4
177 lsl x2, x2, x3 /* cache line size */
178
179 /* x2 <- minimal cache line size in cache system */
180 sub x3, x2, #1
181 bic x0, x0, x3
1821: dc civac, x0 /* clean & invalidate data or unified cache */
183 add x0, x0, x2
184 cmp x0, x1
185 b.lo 1b
186 dsb sy
187 ret
188ENDPROC(__asm_flush_dcache_range)
e6a05862 189.popsection
6775a820
SG
190/*
191 * void __asm_invalidate_dcache_range(start, end)
192 *
193 * invalidate data cache in the range
194 *
195 * x0: start address
196 * x1: end address
197 */
e6a05862 198.pushsection .text.__asm_invalidate_dcache_range, "ax"
6775a820
SG
199ENTRY(__asm_invalidate_dcache_range)
200 mrs x3, ctr_el0
f050bfac 201 ubfx x3, x3, #16, #4
6775a820
SG
202 mov x2, #4
203 lsl x2, x2, x3 /* cache line size */
204
205 /* x2 <- minimal cache line size in cache system */
206 sub x3, x2, #1
207 bic x0, x0, x3
2081: dc ivac, x0 /* invalidate data or unified cache */
209 add x0, x0, x2
210 cmp x0, x1
211 b.lo 1b
212 dsb sy
213 ret
214ENDPROC(__asm_invalidate_dcache_range)
e6a05862 215.popsection
0ae76531
DF
216
217/*
218 * void __asm_invalidate_icache_all(void)
219 *
220 * invalidate all tlb entries.
221 */
e6a05862 222.pushsection .text.__asm_invalidate_icache_all, "ax"
0ae76531
DF
223ENTRY(__asm_invalidate_icache_all)
224 ic ialluis
225 isb sy
226 ret
227ENDPROC(__asm_invalidate_icache_all)
e6a05862 228.popsection
dcd468b8 229
e6a05862 230.pushsection .text.__asm_invalidate_l3_dcache, "ax"
f7b845bf 231WEAK(__asm_invalidate_l3_dcache)
dcd468b8
YS
232 mov x0, #0 /* return status as success */
233 ret
1ab557a0 234ENDPROC(__asm_invalidate_l3_dcache)
e6a05862 235.popsection
1ab557a0 236
5e2ec773
AG
237/*
238 * void __asm_switch_ttbr(ulong new_ttbr)
239 *
240 * Safely switches to a new page table.
241 */
e6a05862 242.pushsection .text.__asm_switch_ttbr, "ax"
5e2ec773
AG
243ENTRY(__asm_switch_ttbr)
244 /* x2 = SCTLR (alive throghout the function) */
245 switch_el x4, 3f, 2f, 1f
2463: mrs x2, sctlr_el3
247 b 0f
2482: mrs x2, sctlr_el2
249 b 0f
2501: mrs x2, sctlr_el1
2510:
252
253 /* Unset CR_M | CR_C | CR_I from SCTLR to disable all caches */
254 movn x1, #(CR_M | CR_C | CR_I)
255 and x1, x2, x1
256 switch_el x4, 3f, 2f, 1f
2573: msr sctlr_el3, x1
258 b 0f
2592: msr sctlr_el2, x1
260 b 0f
2611: msr sctlr_el1, x1
2620: isb
263
264 /* This call only clobbers x30 (lr) and x9 (unused) */
265 mov x3, x30
266 bl __asm_invalidate_tlb_all
267
268 /* From here on we're running safely with caches disabled */
269
270 /* Set TTBR to our first argument */
271 switch_el x4, 3f, 2f, 1f
2723: msr ttbr0_el3, x0
273 b 0f
2742: msr ttbr0_el2, x0
275 b 0f
2761: msr ttbr0_el1, x0
2770: isb
278
279 /* Restore original SCTLR and thus enable caches again */
280 switch_el x4, 3f, 2f, 1f
2813: msr sctlr_el3, x2
282 b 0f
2832: msr sctlr_el2, x2
284 b 0f
2851: msr sctlr_el1, x2
2860: isb
287
288 ret x3
289ENDPROC(__asm_switch_ttbr)
e6a05862 290.popsection