]> git.ipfire.org Git - people/ms/u-boot.git/blob - arch/arc/lib/cache.c
arc: cache: Add required NOPs after invalidation of instruction cache
[people/ms/u-boot.git] / arch / arc / lib / cache.c
1 /*
2 * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7 #include <config.h>
8 #include <common.h>
9 #include <linux/compiler.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <asm/arcregs.h>
13 #include <asm/cache.h>
14
15 /* Bit values in IC_CTRL */
16 #define IC_CTRL_CACHE_DISABLE (1 << 0)
17
18 /* Bit values in DC_CTRL */
19 #define DC_CTRL_CACHE_DISABLE (1 << 0)
20 #define DC_CTRL_INV_MODE_FLUSH (1 << 6)
21 #define DC_CTRL_FLUSH_STATUS (1 << 8)
22 #define CACHE_VER_NUM_MASK 0xF
23 #define SLC_CTRL_SB (1 << 2)
24
25 #define OP_INV 0x1
26 #define OP_FLUSH 0x2
27 #define OP_INV_IC 0x3
28
29 /*
30 * By default that variable will fall into .bss section.
31 * But .bss section is not relocated and so it will be initilized before
32 * relocation but will be used after being zeroed.
33 */
34 int l1_line_sz __section(".data");
35 int dcache_exists __section(".data");
36 int icache_exists __section(".data");
37
38 #define CACHE_LINE_MASK (~(l1_line_sz - 1))
39
40 #ifdef CONFIG_ISA_ARCV2
41 int slc_line_sz __section(".data");
42 int slc_exists __section(".data");
43 int ioc_exists __section(".data");
44
45 static unsigned int __before_slc_op(const int op)
46 {
47 unsigned int reg = reg;
48
49 if (op == OP_INV) {
50 /*
51 * IM is set by default and implies Flush-n-inv
52 * Clear it here for vanilla inv
53 */
54 reg = read_aux_reg(ARC_AUX_SLC_CTRL);
55 write_aux_reg(ARC_AUX_SLC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
56 }
57
58 return reg;
59 }
60
61 static void __after_slc_op(const int op, unsigned int reg)
62 {
63 if (op & OP_FLUSH) { /* flush / flush-n-inv both wait */
64 /*
65 * Make sure "busy" bit reports correct status,
66 * see STAR 9001165532
67 */
68 read_aux_reg(ARC_AUX_SLC_CTRL);
69 while (read_aux_reg(ARC_AUX_SLC_CTRL) &
70 DC_CTRL_FLUSH_STATUS)
71 ;
72 }
73
74 /* Switch back to default Invalidate mode */
75 if (op == OP_INV)
76 write_aux_reg(ARC_AUX_SLC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
77 }
78
79 static inline void __slc_line_loop(unsigned long paddr, unsigned long sz,
80 const int op)
81 {
82 unsigned int aux_cmd;
83 int num_lines;
84
85 #define SLC_LINE_MASK (~(slc_line_sz - 1))
86
87 aux_cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
88
89 sz += paddr & ~SLC_LINE_MASK;
90 paddr &= SLC_LINE_MASK;
91
92 num_lines = DIV_ROUND_UP(sz, slc_line_sz);
93
94 while (num_lines-- > 0) {
95 write_aux_reg(aux_cmd, paddr);
96 paddr += slc_line_sz;
97 }
98 }
99
100 static inline void __slc_entire_op(const int cacheop)
101 {
102 int aux;
103 unsigned int ctrl_reg = __before_slc_op(cacheop);
104
105 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
106 aux = ARC_AUX_SLC_INVALIDATE;
107 else
108 aux = ARC_AUX_SLC_FLUSH;
109
110 write_aux_reg(aux, 0x1);
111
112 __after_slc_op(cacheop, ctrl_reg);
113 }
114
115 static inline void __slc_line_op(unsigned long paddr, unsigned long sz,
116 const int cacheop)
117 {
118 unsigned int ctrl_reg = __before_slc_op(cacheop);
119 __slc_line_loop(paddr, sz, cacheop);
120 __after_slc_op(cacheop, ctrl_reg);
121 }
122 #else
123 #define __slc_entire_op(cacheop)
124 #define __slc_line_op(paddr, sz, cacheop)
125 #endif
126
127 #ifdef CONFIG_ISA_ARCV2
128 static void read_decode_cache_bcr_arcv2(void)
129 {
130 union {
131 struct {
132 #ifdef CONFIG_CPU_BIG_ENDIAN
133 unsigned int pad:24, way:2, lsz:2, sz:4;
134 #else
135 unsigned int sz:4, lsz:2, way:2, pad:24;
136 #endif
137 } fields;
138 unsigned int word;
139 } slc_cfg;
140
141 union {
142 struct {
143 #ifdef CONFIG_CPU_BIG_ENDIAN
144 unsigned int pad:24, ver:8;
145 #else
146 unsigned int ver:8, pad:24;
147 #endif
148 } fields;
149 unsigned int word;
150 } sbcr;
151
152 sbcr.word = read_aux_reg(ARC_BCR_SLC);
153 if (sbcr.fields.ver) {
154 slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
155 slc_exists = 1;
156 slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
157 }
158
159 union {
160 struct bcr_clust_cfg {
161 #ifdef CONFIG_CPU_BIG_ENDIAN
162 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
163 #else
164 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
165 #endif
166 } fields;
167 unsigned int word;
168 } cbcr;
169
170 cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
171 if (cbcr.fields.c)
172 ioc_exists = 1;
173 }
174 #endif
175
176 void read_decode_cache_bcr(void)
177 {
178 int dc_line_sz = 0, ic_line_sz = 0;
179
180 union {
181 struct {
182 #ifdef CONFIG_CPU_BIG_ENDIAN
183 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
184 #else
185 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
186 #endif
187 } fields;
188 unsigned int word;
189 } ibcr, dbcr;
190
191 ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
192 if (ibcr.fields.ver) {
193 icache_exists = 1;
194 l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
195 if (!ic_line_sz)
196 panic("Instruction exists but line length is 0\n");
197 }
198
199 dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
200 if (dbcr.fields.ver){
201 dcache_exists = 1;
202 l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
203 if (!dc_line_sz)
204 panic("Data cache exists but line length is 0\n");
205 }
206
207 if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
208 panic("Instruction and data cache line lengths differ\n");
209 }
210
211 void cache_init(void)
212 {
213 read_decode_cache_bcr();
214
215 #ifdef CONFIG_ISA_ARCV2
216 read_decode_cache_bcr_arcv2();
217
218 if (ioc_exists) {
219 /* IOC Aperture start is equal to DDR start */
220 unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
221 /* IOC Aperture size is equal to DDR size */
222 long ap_size = CONFIG_SYS_SDRAM_SIZE;
223
224 flush_dcache_all();
225 invalidate_dcache_all();
226
227 if (!is_power_of_2(ap_size) || ap_size < 4096)
228 panic("IOC Aperture size must be power of 2 and bigger 4Kib");
229
230 /*
231 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
232 * so setting 0x11 implies 512M, 0x12 implies 1G...
233 */
234 write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
235 order_base_2(ap_size/1024) - 2);
236
237
238 /* IOC Aperture start must be aligned to the size of the aperture */
239 if (ap_base % ap_size != 0)
240 panic("IOC Aperture start must be aligned to the size of the aperture");
241
242 write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
243 write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
244 write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
245
246 }
247 #endif
248 }
249
250 int icache_status(void)
251 {
252 if (!icache_exists)
253 return 0;
254
255 if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE)
256 return 0;
257 else
258 return 1;
259 }
260
261 void icache_enable(void)
262 {
263 if (icache_exists)
264 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
265 ~IC_CTRL_CACHE_DISABLE);
266 }
267
268 void icache_disable(void)
269 {
270 if (icache_exists)
271 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
272 IC_CTRL_CACHE_DISABLE);
273 }
274
275 #ifndef CONFIG_SYS_DCACHE_OFF
276 void invalidate_icache_all(void)
277 {
278 /* Any write to IC_IVIC register triggers invalidation of entire I$ */
279 if (icache_status()) {
280 write_aux_reg(ARC_AUX_IC_IVIC, 1);
281 /*
282 * As per ARC HS databook (see chapter 5.3.3.2)
283 * it is required to add 3 NOPs after each write to IC_IVIC.
284 */
285 __builtin_arc_nop();
286 __builtin_arc_nop();
287 __builtin_arc_nop();
288 read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
289 }
290 }
291 #else
292 void invalidate_icache_all(void)
293 {
294 }
295 #endif
296
297 int dcache_status(void)
298 {
299 if (!dcache_exists)
300 return 0;
301
302 if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE)
303 return 0;
304 else
305 return 1;
306 }
307
308 void dcache_enable(void)
309 {
310 if (!dcache_exists)
311 return;
312
313 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
314 ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
315 }
316
317 void dcache_disable(void)
318 {
319 if (!dcache_exists)
320 return;
321
322 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
323 DC_CTRL_CACHE_DISABLE);
324 }
325
326 #ifndef CONFIG_SYS_DCACHE_OFF
327 /*
328 * Common Helper for Line Operations on {I,D}-Cache
329 */
330 static inline void __cache_line_loop(unsigned long paddr, unsigned long sz,
331 const int cacheop)
332 {
333 unsigned int aux_cmd;
334 #if (CONFIG_ARC_MMU_VER == 3)
335 unsigned int aux_tag;
336 #endif
337 int num_lines;
338
339 if (cacheop == OP_INV_IC) {
340 aux_cmd = ARC_AUX_IC_IVIL;
341 #if (CONFIG_ARC_MMU_VER == 3)
342 aux_tag = ARC_AUX_IC_PTAG;
343 #endif
344 } else {
345 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
346 aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
347 #if (CONFIG_ARC_MMU_VER == 3)
348 aux_tag = ARC_AUX_DC_PTAG;
349 #endif
350 }
351
352 sz += paddr & ~CACHE_LINE_MASK;
353 paddr &= CACHE_LINE_MASK;
354
355 num_lines = DIV_ROUND_UP(sz, l1_line_sz);
356
357 while (num_lines-- > 0) {
358 #if (CONFIG_ARC_MMU_VER == 3)
359 write_aux_reg(aux_tag, paddr);
360 #endif
361 write_aux_reg(aux_cmd, paddr);
362 paddr += l1_line_sz;
363 }
364 }
365
366 static unsigned int __before_dc_op(const int op)
367 {
368 unsigned int reg;
369
370 if (op == OP_INV) {
371 /*
372 * IM is set by default and implies Flush-n-inv
373 * Clear it here for vanilla inv
374 */
375 reg = read_aux_reg(ARC_AUX_DC_CTRL);
376 write_aux_reg(ARC_AUX_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
377 }
378
379 return reg;
380 }
381
382 static void __after_dc_op(const int op, unsigned int reg)
383 {
384 if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
385 while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
386 ;
387
388 /* Switch back to default Invalidate mode */
389 if (op == OP_INV)
390 write_aux_reg(ARC_AUX_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
391 }
392
393 static inline void __dc_entire_op(const int cacheop)
394 {
395 int aux;
396 unsigned int ctrl_reg = __before_dc_op(cacheop);
397
398 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
399 aux = ARC_AUX_DC_IVDC;
400 else
401 aux = ARC_AUX_DC_FLSH;
402
403 write_aux_reg(aux, 0x1);
404
405 __after_dc_op(cacheop, ctrl_reg);
406 }
407
408 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
409 const int cacheop)
410 {
411 unsigned int ctrl_reg = __before_dc_op(cacheop);
412 __cache_line_loop(paddr, sz, cacheop);
413 __after_dc_op(cacheop, ctrl_reg);
414 }
415 #else
416 #define __dc_entire_op(cacheop)
417 #define __dc_line_op(paddr, sz, cacheop)
418 #endif /* !CONFIG_SYS_DCACHE_OFF */
419
420 void invalidate_dcache_range(unsigned long start, unsigned long end)
421 {
422 #ifdef CONFIG_ISA_ARCV2
423 if (!ioc_exists)
424 #endif
425 __dc_line_op(start, end - start, OP_INV);
426
427 #ifdef CONFIG_ISA_ARCV2
428 if (slc_exists && !ioc_exists)
429 __slc_line_op(start, end - start, OP_INV);
430 #endif
431 }
432
433 void flush_dcache_range(unsigned long start, unsigned long end)
434 {
435 #ifdef CONFIG_ISA_ARCV2
436 if (!ioc_exists)
437 #endif
438 __dc_line_op(start, end - start, OP_FLUSH);
439
440 #ifdef CONFIG_ISA_ARCV2
441 if (slc_exists && !ioc_exists)
442 __slc_line_op(start, end - start, OP_FLUSH);
443 #endif
444 }
445
446 void flush_cache(unsigned long start, unsigned long size)
447 {
448 flush_dcache_range(start, start + size);
449 }
450
451 void invalidate_dcache_all(void)
452 {
453 __dc_entire_op(OP_INV);
454
455 #ifdef CONFIG_ISA_ARCV2
456 if (slc_exists)
457 __slc_entire_op(OP_INV);
458 #endif
459 }
460
461 void flush_dcache_all(void)
462 {
463 __dc_entire_op(OP_FLUSH);
464
465 #ifdef CONFIG_ISA_ARCV2
466 if (slc_exists)
467 __slc_entire_op(OP_FLUSH);
468 #endif
469 }