]> git.ipfire.org Git - thirdparty/u-boot.git/blob - arch/mips/lib/cache_init.S
SPDX: Convert all of our single license tags to Linux Kernel style
[thirdparty/u-boot.git] / arch / mips / lib / cache_init.S
1 /* SPDX-License-Identifier: GPL-2.0+ */
2 /*
3 * Cache-handling routined for MIPS CPUs
4 *
5 * Copyright (c) 2003 Wolfgang Denk <wd@denx.de>
6 */
7
8 #include <asm-offsets.h>
9 #include <config.h>
10 #include <asm/asm.h>
11 #include <asm/regdef.h>
12 #include <asm/mipsregs.h>
13 #include <asm/addrspace.h>
14 #include <asm/cacheops.h>
15 #include <asm/cm.h>
16
17 #ifndef CONFIG_SYS_MIPS_CACHE_MODE
18 #define CONFIG_SYS_MIPS_CACHE_MODE CONF_CM_CACHABLE_NONCOHERENT
19 #endif
20
21 #define INDEX_BASE CKSEG0
22
23 .macro f_fill64 dst, offset, val
24 LONG_S \val, (\offset + 0 * LONGSIZE)(\dst)
25 LONG_S \val, (\offset + 1 * LONGSIZE)(\dst)
26 LONG_S \val, (\offset + 2 * LONGSIZE)(\dst)
27 LONG_S \val, (\offset + 3 * LONGSIZE)(\dst)
28 LONG_S \val, (\offset + 4 * LONGSIZE)(\dst)
29 LONG_S \val, (\offset + 5 * LONGSIZE)(\dst)
30 LONG_S \val, (\offset + 6 * LONGSIZE)(\dst)
31 LONG_S \val, (\offset + 7 * LONGSIZE)(\dst)
32 #if LONGSIZE == 4
33 LONG_S \val, (\offset + 8 * LONGSIZE)(\dst)
34 LONG_S \val, (\offset + 9 * LONGSIZE)(\dst)
35 LONG_S \val, (\offset + 10 * LONGSIZE)(\dst)
36 LONG_S \val, (\offset + 11 * LONGSIZE)(\dst)
37 LONG_S \val, (\offset + 12 * LONGSIZE)(\dst)
38 LONG_S \val, (\offset + 13 * LONGSIZE)(\dst)
39 LONG_S \val, (\offset + 14 * LONGSIZE)(\dst)
40 LONG_S \val, (\offset + 15 * LONGSIZE)(\dst)
41 #endif
42 .endm
43
44 .macro cache_loop curr, end, line_sz, op
45 10: cache \op, 0(\curr)
46 PTR_ADDU \curr, \curr, \line_sz
47 bne \curr, \end, 10b
48 .endm
49
50 .macro l1_info sz, line_sz, off
51 .set push
52 .set noat
53
54 mfc0 $1, CP0_CONFIG, 1
55
56 /* detect line size */
57 srl \line_sz, $1, \off + MIPS_CONF1_DL_SHF - MIPS_CONF1_DA_SHF
58 andi \line_sz, \line_sz, (MIPS_CONF1_DL >> MIPS_CONF1_DL_SHF)
59 move \sz, zero
60 beqz \line_sz, 10f
61 li \sz, 2
62 sllv \line_sz, \sz, \line_sz
63
64 /* detect associativity */
65 srl \sz, $1, \off + MIPS_CONF1_DA_SHF - MIPS_CONF1_DA_SHF
66 andi \sz, \sz, (MIPS_CONF1_DA >> MIPS_CONF1_DA_SHF)
67 addiu \sz, \sz, 1
68
69 /* sz *= line_sz */
70 mul \sz, \sz, \line_sz
71
72 /* detect log32(sets) */
73 srl $1, $1, \off + MIPS_CONF1_DS_SHF - MIPS_CONF1_DA_SHF
74 andi $1, $1, (MIPS_CONF1_DS >> MIPS_CONF1_DS_SHF)
75 addiu $1, $1, 1
76 andi $1, $1, 0x7
77
78 /* sz <<= log32(sets) */
79 sllv \sz, \sz, $1
80
81 /* sz *= 32 */
82 li $1, 32
83 mul \sz, \sz, $1
84 10:
85 .set pop
86 .endm
87 /*
88 * mips_cache_reset - low level initialisation of the primary caches
89 *
90 * This routine initialises the primary caches to ensure that they have good
91 * parity. It must be called by the ROM before any cached locations are used
92 * to prevent the possibility of data with bad parity being written to memory.
93 *
94 * To initialise the instruction cache it is essential that a source of data
95 * with good parity is available. This routine will initialise an area of
96 * memory starting at location zero to be used as a source of parity.
97 *
98 * Note that this function does not follow the standard calling convention &
99 * may clobber typically callee-saved registers.
100 *
101 * RETURNS: N/A
102 *
103 */
104 #define R_RETURN s0
105 #define R_IC_SIZE s1
106 #define R_IC_LINE s2
107 #define R_DC_SIZE s3
108 #define R_DC_LINE s4
109 #define R_L2_SIZE s5
110 #define R_L2_LINE s6
111 #define R_L2_BYPASSED s7
112 #define R_L2_L2C t8
113 LEAF(mips_cache_reset)
114 move R_RETURN, ra
115
116 #ifdef CONFIG_MIPS_L2_CACHE
117 /*
118 * For there to be an L2 present, Config2 must be present. If it isn't
119 * then we proceed knowing there's no L2 cache.
120 */
121 move R_L2_SIZE, zero
122 move R_L2_LINE, zero
123 move R_L2_BYPASSED, zero
124 move R_L2_L2C, zero
125 mfc0 t0, CP0_CONFIG, 1
126 bgez t0, l2_probe_done
127
128 /*
129 * From MIPSr6 onwards the L2 cache configuration might not be reported
130 * by Config2. The Config5.L2C bit indicates whether this is the case,
131 * and if it is then we need knowledge of where else to look. For cores
132 * from Imagination Technologies this is a CM GCR.
133 */
134 # if __mips_isa_rev >= 6
135 /* Check that Config5 exists */
136 mfc0 t0, CP0_CONFIG, 2
137 bgez t0, l2_probe_cop0
138 mfc0 t0, CP0_CONFIG, 3
139 bgez t0, l2_probe_cop0
140 mfc0 t0, CP0_CONFIG, 4
141 bgez t0, l2_probe_cop0
142
143 /* Check Config5.L2C is set */
144 mfc0 t0, CP0_CONFIG, 5
145 and R_L2_L2C, t0, MIPS_CONF5_L2C
146 beqz R_L2_L2C, l2_probe_cop0
147
148 /* Config5.L2C is set */
149 # ifdef CONFIG_MIPS_CM
150 /* The CM will provide L2 configuration */
151 PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
152 lw t1, GCR_L2_CONFIG(t0)
153 bgez t1, l2_probe_done
154
155 ext R_L2_LINE, t1, \
156 GCR_L2_CONFIG_LINESZ_SHIFT, GCR_L2_CONFIG_LINESZ_BITS
157 beqz R_L2_LINE, l2_probe_done
158 li t2, 2
159 sllv R_L2_LINE, t2, R_L2_LINE
160
161 ext t2, t1, GCR_L2_CONFIG_ASSOC_SHIFT, GCR_L2_CONFIG_ASSOC_BITS
162 addiu t2, t2, 1
163 mul R_L2_SIZE, R_L2_LINE, t2
164
165 ext t2, t1, GCR_L2_CONFIG_SETSZ_SHIFT, GCR_L2_CONFIG_SETSZ_BITS
166 sllv R_L2_SIZE, R_L2_SIZE, t2
167 li t2, 64
168 mul R_L2_SIZE, R_L2_SIZE, t2
169
170 /* Bypass the L2 cache so that we can init the L1s early */
171 or t1, t1, GCR_L2_CONFIG_BYPASS
172 sw t1, GCR_L2_CONFIG(t0)
173 sync
174 li R_L2_BYPASSED, 1
175
176 /* Zero the L2 tag registers */
177 sw zero, GCR_L2_TAG_ADDR(t0)
178 sw zero, GCR_L2_TAG_ADDR_UPPER(t0)
179 sw zero, GCR_L2_TAG_STATE(t0)
180 sw zero, GCR_L2_TAG_STATE_UPPER(t0)
181 sw zero, GCR_L2_DATA(t0)
182 sw zero, GCR_L2_DATA_UPPER(t0)
183 sync
184 # else
185 /* We don't know how to retrieve L2 configuration on this system */
186 # endif
187 b l2_probe_done
188 # endif
189
190 /*
191 * For pre-r6 systems, or r6 systems with Config5.L2C==0, probe the L2
192 * cache configuration from the cop0 Config2 register.
193 */
194 l2_probe_cop0:
195 mfc0 t0, CP0_CONFIG, 2
196
197 srl R_L2_LINE, t0, MIPS_CONF2_SL_SHF
198 andi R_L2_LINE, R_L2_LINE, MIPS_CONF2_SL >> MIPS_CONF2_SL_SHF
199 beqz R_L2_LINE, l2_probe_done
200 li t1, 2
201 sllv R_L2_LINE, t1, R_L2_LINE
202
203 srl t1, t0, MIPS_CONF2_SA_SHF
204 andi t1, t1, MIPS_CONF2_SA >> MIPS_CONF2_SA_SHF
205 addiu t1, t1, 1
206 mul R_L2_SIZE, R_L2_LINE, t1
207
208 srl t1, t0, MIPS_CONF2_SS_SHF
209 andi t1, t1, MIPS_CONF2_SS >> MIPS_CONF2_SS_SHF
210 sllv R_L2_SIZE, R_L2_SIZE, t1
211 li t1, 64
212 mul R_L2_SIZE, R_L2_SIZE, t1
213
214 /* Attempt to bypass the L2 so that we can init the L1s early */
215 or t0, t0, MIPS_CONF2_L2B
216 mtc0 t0, CP0_CONFIG, 2
217 ehb
218 mfc0 t0, CP0_CONFIG, 2
219 and R_L2_BYPASSED, t0, MIPS_CONF2_L2B
220
221 /* Zero the L2 tag registers */
222 mtc0 zero, CP0_TAGLO, 4
223 ehb
224 l2_probe_done:
225 #endif
226
227 #ifndef CONFIG_SYS_CACHE_SIZE_AUTO
228 li R_IC_SIZE, CONFIG_SYS_ICACHE_SIZE
229 li R_IC_LINE, CONFIG_SYS_ICACHE_LINE_SIZE
230 #else
231 l1_info R_IC_SIZE, R_IC_LINE, MIPS_CONF1_IA_SHF
232 #endif
233
234 #ifndef CONFIG_SYS_CACHE_SIZE_AUTO
235 li R_DC_SIZE, CONFIG_SYS_DCACHE_SIZE
236 li R_DC_LINE, CONFIG_SYS_DCACHE_LINE_SIZE
237 #else
238 l1_info R_DC_SIZE, R_DC_LINE, MIPS_CONF1_DA_SHF
239 #endif
240
241 #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
242
243 /* Determine the largest L1 cache size */
244 #ifndef CONFIG_SYS_CACHE_SIZE_AUTO
245 #if CONFIG_SYS_ICACHE_SIZE > CONFIG_SYS_DCACHE_SIZE
246 li v0, CONFIG_SYS_ICACHE_SIZE
247 #else
248 li v0, CONFIG_SYS_DCACHE_SIZE
249 #endif
250 #else
251 move v0, R_IC_SIZE
252 sltu t1, R_IC_SIZE, R_DC_SIZE
253 movn v0, R_DC_SIZE, t1
254 #endif
255 /*
256 * Now clear that much memory starting from zero.
257 */
258 PTR_LI a0, CKSEG1
259 PTR_ADDU a1, a0, v0
260 2: PTR_ADDIU a0, 64
261 f_fill64 a0, -64, zero
262 bne a0, a1, 2b
263
264 #endif /* CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD */
265
266 #ifdef CONFIG_MIPS_L2_CACHE
267 /*
268 * If the L2 is bypassed, init the L1 first so that we can execute the
269 * rest of the cache initialisation using the L1 instruction cache.
270 */
271 bnez R_L2_BYPASSED, l1_init
272
273 l2_init:
274 PTR_LI t0, INDEX_BASE
275 PTR_ADDU t1, t0, R_L2_SIZE
276 1: cache INDEX_STORE_TAG_SD, 0(t0)
277 PTR_ADDU t0, t0, R_L2_LINE
278 bne t0, t1, 1b
279
280 /*
281 * If the L2 was bypassed then we already initialised the L1s before
282 * the L2, so we are now done.
283 */
284 bnez R_L2_BYPASSED, l2_unbypass
285 #endif
286
287 /*
288 * The TagLo registers used depend upon the CPU implementation, but the
289 * architecture requires that it is safe for software to write to both
290 * TagLo selects 0 & 2 covering supported cases.
291 */
292 l1_init:
293 mtc0 zero, CP0_TAGLO
294 mtc0 zero, CP0_TAGLO, 2
295 ehb
296
297 /*
298 * The caches are probably in an indeterminate state, so we force good
299 * parity into them by doing an invalidate for each line. If
300 * CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD is set then we'll proceed to
301 * perform a load/fill & a further invalidate for each line, assuming
302 * that the bottom of RAM (having just been cleared) will generate good
303 * parity for the cache.
304 */
305
306 /*
307 * Initialize the I-cache first,
308 */
309 blez R_IC_SIZE, 1f
310 PTR_LI t0, INDEX_BASE
311 PTR_ADDU t1, t0, R_IC_SIZE
312 /* clear tag to invalidate */
313 cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
314 #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
315 /* fill once, so data field parity is correct */
316 PTR_LI t0, INDEX_BASE
317 cache_loop t0, t1, R_IC_LINE, FILL
318 /* invalidate again - prudent but not strictly neccessary */
319 PTR_LI t0, INDEX_BASE
320 cache_loop t0, t1, R_IC_LINE, INDEX_STORE_TAG_I
321 #endif
322
323 /* Enable use of the I-cache by setting Config.K0 */
324 sync
325 mfc0 t0, CP0_CONFIG
326 li t1, CONFIG_SYS_MIPS_CACHE_MODE
327 #if __mips_isa_rev >= 2
328 ins t0, t1, 0, 3
329 #else
330 ori t0, t0, CONF_CM_CMASK
331 xori t0, t0, CONF_CM_CMASK
332 or t0, t0, t1
333 #endif
334 mtc0 t0, CP0_CONFIG
335
336 /*
337 * then initialize D-cache.
338 */
339 1: blez R_DC_SIZE, 3f
340 PTR_LI t0, INDEX_BASE
341 PTR_ADDU t1, t0, R_DC_SIZE
342 /* clear all tags */
343 cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
344 #ifdef CONFIG_SYS_MIPS_CACHE_INIT_RAM_LOAD
345 /* load from each line (in cached space) */
346 PTR_LI t0, INDEX_BASE
347 2: LONG_L zero, 0(t0)
348 PTR_ADDU t0, R_DC_LINE
349 bne t0, t1, 2b
350 /* clear all tags */
351 PTR_LI t0, INDEX_BASE
352 cache_loop t0, t1, R_DC_LINE, INDEX_STORE_TAG_D
353 #endif
354 3:
355
356 #ifdef CONFIG_MIPS_L2_CACHE
357 /* If the L2 isn't bypassed then we're done */
358 beqz R_L2_BYPASSED, return
359
360 /* The L2 is bypassed - go initialise it */
361 b l2_init
362
363 l2_unbypass:
364 # if __mips_isa_rev >= 6
365 beqz R_L2_L2C, 1f
366
367 li t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
368 lw t1, GCR_L2_CONFIG(t0)
369 xor t1, t1, GCR_L2_CONFIG_BYPASS
370 sw t1, GCR_L2_CONFIG(t0)
371 sync
372 ehb
373 b 2f
374 # endif
375 1: mfc0 t0, CP0_CONFIG, 2
376 xor t0, t0, MIPS_CONF2_L2B
377 mtc0 t0, CP0_CONFIG, 2
378 ehb
379
380 2:
381 # ifdef CONFIG_MIPS_CM
382 /* Config3 must exist for a CM to be present */
383 mfc0 t0, CP0_CONFIG, 1
384 bgez t0, 2f
385 mfc0 t0, CP0_CONFIG, 2
386 bgez t0, 2f
387
388 /* Check Config3.CMGCR to determine CM presence */
389 mfc0 t0, CP0_CONFIG, 3
390 and t0, t0, MIPS_CONF3_CMGCR
391 beqz t0, 2f
392
393 /* Change Config.K0 to a coherent CCA */
394 mfc0 t0, CP0_CONFIG
395 li t1, CONF_CM_CACHABLE_COW
396 #if __mips_isa_rev >= 2
397 ins t0, t1, 0, 3
398 #else
399 ori t0, t0, CONF_CM_CMASK
400 xori t0, t0, CONF_CM_CMASK
401 or t0, t0, t1
402 #endif
403 mtc0 t0, CP0_CONFIG
404
405 /*
406 * Join the coherent domain such that the caches of this core are kept
407 * coherent with those of other cores.
408 */
409 PTR_LI t0, CKSEG1ADDR(CONFIG_MIPS_CM_BASE)
410 lw t1, GCR_REV(t0)
411 li t2, GCR_REV_CM3
412 li t3, GCR_Cx_COHERENCE_EN
413 bge t1, t2, 1f
414 li t3, GCR_Cx_COHERENCE_DOM_EN
415 1: sw t3, GCR_Cx_COHERENCE(t0)
416 ehb
417 2:
418 # endif
419 #endif
420
421 return:
422 /* Ensure all cache operations complete before returning */
423 sync
424 jr ra
425 END(mips_cache_reset)
426
427 /*
428 * dcache_status - get cache status
429 *
430 * RETURNS: 0 - cache disabled; 1 - cache enabled
431 *
432 */
433 LEAF(dcache_status)
434 mfc0 t0, CP0_CONFIG
435 li t1, CONF_CM_UNCACHED
436 andi t0, t0, CONF_CM_CMASK
437 move v0, zero
438 beq t0, t1, 2f
439 li v0, 1
440 2: jr ra
441 END(dcache_status)
442
443 /*
444 * dcache_disable - disable cache
445 *
446 * RETURNS: N/A
447 *
448 */
449 LEAF(dcache_disable)
450 mfc0 t0, CP0_CONFIG
451 li t1, -8
452 and t0, t0, t1
453 ori t0, t0, CONF_CM_UNCACHED
454 mtc0 t0, CP0_CONFIG
455 jr ra
456 END(dcache_disable)
457
458 /*
459 * dcache_enable - enable cache
460 *
461 * RETURNS: N/A
462 *
463 */
464 LEAF(dcache_enable)
465 mfc0 t0, CP0_CONFIG
466 ori t0, CONF_CM_CMASK
467 xori t0, CONF_CM_CMASK
468 ori t0, CONFIG_SYS_MIPS_CACHE_MODE
469 mtc0 t0, CP0_CONFIG
470 jr ra
471 END(dcache_enable)