arcv2: Set IOC aperture so it covers available DDR
[people/ms/u-boot.git] / arch / arc / lib / cache.c
1 /*
2 * Copyright (C) 2013-2014 Synopsys, Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 */
6
7 #include <config.h>
8 #include <common.h>
9 #include <linux/compiler.h>
10 #include <linux/kernel.h>
11 #include <linux/log2.h>
12 #include <asm/arcregs.h>
13 #include <asm/cache.h>
14
15 /* Bit values in IC_CTRL */
16 #define IC_CTRL_CACHE_DISABLE (1 << 0)
17
18 /* Bit values in DC_CTRL */
19 #define DC_CTRL_CACHE_DISABLE (1 << 0)
20 #define DC_CTRL_INV_MODE_FLUSH (1 << 6)
21 #define DC_CTRL_FLUSH_STATUS (1 << 8)
22 #define CACHE_VER_NUM_MASK 0xF
23 #define SLC_CTRL_SB (1 << 2)
24
25 #define OP_INV 0x1
26 #define OP_FLUSH 0x2
27 #define OP_INV_IC 0x3
28
29 /*
30 * By default that variable will fall into .bss section.
31 * But .bss section is not relocated and so it will be initilized before
32 * relocation but will be used after being zeroed.
33 */
34 int l1_line_sz __section(".data");
35 int dcache_exists __section(".data");
36 int icache_exists __section(".data");
37
38 #define CACHE_LINE_MASK (~(l1_line_sz - 1))
39
40 #ifdef CONFIG_ISA_ARCV2
41 int slc_line_sz __section(".data");
42 int slc_exists __section(".data");
43 int ioc_exists __section(".data");
44
45 static unsigned int __before_slc_op(const int op)
46 {
47 unsigned int reg = reg;
48
49 if (op == OP_INV) {
50 /*
51 * IM is set by default and implies Flush-n-inv
52 * Clear it here for vanilla inv
53 */
54 reg = read_aux_reg(ARC_AUX_SLC_CTRL);
55 write_aux_reg(ARC_AUX_SLC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
56 }
57
58 return reg;
59 }
60
61 static void __after_slc_op(const int op, unsigned int reg)
62 {
63 if (op & OP_FLUSH) { /* flush / flush-n-inv both wait */
64 /*
65 * Make sure "busy" bit reports correct status,
66 * see STAR 9001165532
67 */
68 read_aux_reg(ARC_AUX_SLC_CTRL);
69 while (read_aux_reg(ARC_AUX_SLC_CTRL) &
70 DC_CTRL_FLUSH_STATUS)
71 ;
72 }
73
74 /* Switch back to default Invalidate mode */
75 if (op == OP_INV)
76 write_aux_reg(ARC_AUX_SLC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
77 }
78
79 static inline void __slc_line_loop(unsigned long paddr, unsigned long sz,
80 const int op)
81 {
82 unsigned int aux_cmd;
83 int num_lines;
84
85 #define SLC_LINE_MASK (~(slc_line_sz - 1))
86
87 aux_cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
88
89 sz += paddr & ~SLC_LINE_MASK;
90 paddr &= SLC_LINE_MASK;
91
92 num_lines = DIV_ROUND_UP(sz, slc_line_sz);
93
94 while (num_lines-- > 0) {
95 write_aux_reg(aux_cmd, paddr);
96 paddr += slc_line_sz;
97 }
98 }
99
100 static inline void __slc_entire_op(const int cacheop)
101 {
102 int aux;
103 unsigned int ctrl_reg = __before_slc_op(cacheop);
104
105 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
106 aux = ARC_AUX_SLC_INVALIDATE;
107 else
108 aux = ARC_AUX_SLC_FLUSH;
109
110 write_aux_reg(aux, 0x1);
111
112 __after_slc_op(cacheop, ctrl_reg);
113 }
114
115 static inline void __slc_line_op(unsigned long paddr, unsigned long sz,
116 const int cacheop)
117 {
118 unsigned int ctrl_reg = __before_slc_op(cacheop);
119 __slc_line_loop(paddr, sz, cacheop);
120 __after_slc_op(cacheop, ctrl_reg);
121 }
122 #else
123 #define __slc_entire_op(cacheop)
124 #define __slc_line_op(paddr, sz, cacheop)
125 #endif
126
127 #ifdef CONFIG_ISA_ARCV2
128 static void read_decode_cache_bcr_arcv2(void)
129 {
130 union {
131 struct {
132 #ifdef CONFIG_CPU_BIG_ENDIAN
133 unsigned int pad:24, way:2, lsz:2, sz:4;
134 #else
135 unsigned int sz:4, lsz:2, way:2, pad:24;
136 #endif
137 } fields;
138 unsigned int word;
139 } slc_cfg;
140
141 union {
142 struct {
143 #ifdef CONFIG_CPU_BIG_ENDIAN
144 unsigned int pad:24, ver:8;
145 #else
146 unsigned int ver:8, pad:24;
147 #endif
148 } fields;
149 unsigned int word;
150 } sbcr;
151
152 sbcr.word = read_aux_reg(ARC_BCR_SLC);
153 if (sbcr.fields.ver) {
154 slc_cfg.word = read_aux_reg(ARC_AUX_SLC_CONFIG);
155 slc_exists = 1;
156 slc_line_sz = (slc_cfg.fields.lsz == 0) ? 128 : 64;
157 }
158
159 union {
160 struct bcr_clust_cfg {
161 #ifdef CONFIG_CPU_BIG_ENDIAN
162 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8;
163 #else
164 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7;
165 #endif
166 } fields;
167 unsigned int word;
168 } cbcr;
169
170 cbcr.word = read_aux_reg(ARC_BCR_CLUSTER);
171 if (cbcr.fields.c)
172 ioc_exists = 1;
173 }
174 #endif
175
176 void read_decode_cache_bcr(void)
177 {
178 int dc_line_sz = 0, ic_line_sz = 0;
179
180 union {
181 struct {
182 #ifdef CONFIG_CPU_BIG_ENDIAN
183 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
184 #else
185 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
186 #endif
187 } fields;
188 unsigned int word;
189 } ibcr, dbcr;
190
191 ibcr.word = read_aux_reg(ARC_BCR_IC_BUILD);
192 if (ibcr.fields.ver) {
193 icache_exists = 1;
194 l1_line_sz = ic_line_sz = 8 << ibcr.fields.line_len;
195 if (!ic_line_sz)
196 panic("Instruction exists but line length is 0\n");
197 }
198
199 dbcr.word = read_aux_reg(ARC_BCR_DC_BUILD);
200 if (dbcr.fields.ver){
201 dcache_exists = 1;
202 l1_line_sz = dc_line_sz = 16 << dbcr.fields.line_len;
203 if (!dc_line_sz)
204 panic("Data cache exists but line length is 0\n");
205 }
206
207 if (ic_line_sz && dc_line_sz && (ic_line_sz != dc_line_sz))
208 panic("Instruction and data cache line lengths differ\n");
209 }
210
211 void cache_init(void)
212 {
213 read_decode_cache_bcr();
214
215 #ifdef CONFIG_ISA_ARCV2
216 read_decode_cache_bcr_arcv2();
217
218 if (ioc_exists) {
219 /* IOC Aperture start is equal to DDR start */
220 unsigned int ap_base = CONFIG_SYS_SDRAM_BASE;
221 /* IOC Aperture size is equal to DDR size */
222 long ap_size = CONFIG_SYS_SDRAM_SIZE;
223
224 flush_dcache_all();
225 invalidate_dcache_all();
226
227 if (!is_power_of_2(ap_size) || ap_size < 4096)
228 panic("IOC Aperture size must be power of 2 and bigger 4Kib");
229
230 /*
231 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
232 * so setting 0x11 implies 512M, 0x12 implies 1G...
233 */
234 write_aux_reg(ARC_AUX_IO_COH_AP0_SIZE,
235 order_base_2(ap_size/1024) - 2);
236
237
238 /* IOC Aperture start must be aligned to the size of the aperture */
239 if (ap_base % ap_size != 0)
240 panic("IOC Aperture start must be aligned to the size of the aperture");
241
242 write_aux_reg(ARC_AUX_IO_COH_AP0_BASE, ap_base >> 12);
243 write_aux_reg(ARC_AUX_IO_COH_PARTIAL, 1);
244 write_aux_reg(ARC_AUX_IO_COH_ENABLE, 1);
245
246 }
247 #endif
248 }
249
250 int icache_status(void)
251 {
252 if (!icache_exists)
253 return 0;
254
255 if (read_aux_reg(ARC_AUX_IC_CTRL) & IC_CTRL_CACHE_DISABLE)
256 return 0;
257 else
258 return 1;
259 }
260
261 void icache_enable(void)
262 {
263 if (icache_exists)
264 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) &
265 ~IC_CTRL_CACHE_DISABLE);
266 }
267
268 void icache_disable(void)
269 {
270 if (icache_exists)
271 write_aux_reg(ARC_AUX_IC_CTRL, read_aux_reg(ARC_AUX_IC_CTRL) |
272 IC_CTRL_CACHE_DISABLE);
273 }
274
275 #ifndef CONFIG_SYS_DCACHE_OFF
276 void invalidate_icache_all(void)
277 {
278 /* Any write to IC_IVIC register triggers invalidation of entire I$ */
279 if (icache_status()) {
280 write_aux_reg(ARC_AUX_IC_IVIC, 1);
281 read_aux_reg(ARC_AUX_IC_CTRL); /* blocks */
282 }
283 }
284 #else
285 void invalidate_icache_all(void)
286 {
287 }
288 #endif
289
290 int dcache_status(void)
291 {
292 if (!dcache_exists)
293 return 0;
294
295 if (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_CACHE_DISABLE)
296 return 0;
297 else
298 return 1;
299 }
300
301 void dcache_enable(void)
302 {
303 if (!dcache_exists)
304 return;
305
306 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) &
307 ~(DC_CTRL_INV_MODE_FLUSH | DC_CTRL_CACHE_DISABLE));
308 }
309
310 void dcache_disable(void)
311 {
312 if (!dcache_exists)
313 return;
314
315 write_aux_reg(ARC_AUX_DC_CTRL, read_aux_reg(ARC_AUX_DC_CTRL) |
316 DC_CTRL_CACHE_DISABLE);
317 }
318
319 #ifndef CONFIG_SYS_DCACHE_OFF
320 /*
321 * Common Helper for Line Operations on {I,D}-Cache
322 */
323 static inline void __cache_line_loop(unsigned long paddr, unsigned long sz,
324 const int cacheop)
325 {
326 unsigned int aux_cmd;
327 #if (CONFIG_ARC_MMU_VER == 3)
328 unsigned int aux_tag;
329 #endif
330 int num_lines;
331
332 if (cacheop == OP_INV_IC) {
333 aux_cmd = ARC_AUX_IC_IVIL;
334 #if (CONFIG_ARC_MMU_VER == 3)
335 aux_tag = ARC_AUX_IC_PTAG;
336 #endif
337 } else {
338 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
339 aux_cmd = cacheop & OP_INV ? ARC_AUX_DC_IVDL : ARC_AUX_DC_FLDL;
340 #if (CONFIG_ARC_MMU_VER == 3)
341 aux_tag = ARC_AUX_DC_PTAG;
342 #endif
343 }
344
345 sz += paddr & ~CACHE_LINE_MASK;
346 paddr &= CACHE_LINE_MASK;
347
348 num_lines = DIV_ROUND_UP(sz, l1_line_sz);
349
350 while (num_lines-- > 0) {
351 #if (CONFIG_ARC_MMU_VER == 3)
352 write_aux_reg(aux_tag, paddr);
353 #endif
354 write_aux_reg(aux_cmd, paddr);
355 paddr += l1_line_sz;
356 }
357 }
358
359 static unsigned int __before_dc_op(const int op)
360 {
361 unsigned int reg;
362
363 if (op == OP_INV) {
364 /*
365 * IM is set by default and implies Flush-n-inv
366 * Clear it here for vanilla inv
367 */
368 reg = read_aux_reg(ARC_AUX_DC_CTRL);
369 write_aux_reg(ARC_AUX_DC_CTRL, reg & ~DC_CTRL_INV_MODE_FLUSH);
370 }
371
372 return reg;
373 }
374
375 static void __after_dc_op(const int op, unsigned int reg)
376 {
377 if (op & OP_FLUSH) /* flush / flush-n-inv both wait */
378 while (read_aux_reg(ARC_AUX_DC_CTRL) & DC_CTRL_FLUSH_STATUS)
379 ;
380
381 /* Switch back to default Invalidate mode */
382 if (op == OP_INV)
383 write_aux_reg(ARC_AUX_DC_CTRL, reg | DC_CTRL_INV_MODE_FLUSH);
384 }
385
386 static inline void __dc_entire_op(const int cacheop)
387 {
388 int aux;
389 unsigned int ctrl_reg = __before_dc_op(cacheop);
390
391 if (cacheop & OP_INV) /* Inv or flush-n-inv use same cmd reg */
392 aux = ARC_AUX_DC_IVDC;
393 else
394 aux = ARC_AUX_DC_FLSH;
395
396 write_aux_reg(aux, 0x1);
397
398 __after_dc_op(cacheop, ctrl_reg);
399 }
400
401 static inline void __dc_line_op(unsigned long paddr, unsigned long sz,
402 const int cacheop)
403 {
404 unsigned int ctrl_reg = __before_dc_op(cacheop);
405 __cache_line_loop(paddr, sz, cacheop);
406 __after_dc_op(cacheop, ctrl_reg);
407 }
408 #else
409 #define __dc_entire_op(cacheop)
410 #define __dc_line_op(paddr, sz, cacheop)
411 #endif /* !CONFIG_SYS_DCACHE_OFF */
412
413 void invalidate_dcache_range(unsigned long start, unsigned long end)
414 {
415 #ifdef CONFIG_ISA_ARCV2
416 if (!ioc_exists)
417 #endif
418 __dc_line_op(start, end - start, OP_INV);
419
420 #ifdef CONFIG_ISA_ARCV2
421 if (slc_exists && !ioc_exists)
422 __slc_line_op(start, end - start, OP_INV);
423 #endif
424 }
425
426 void flush_dcache_range(unsigned long start, unsigned long end)
427 {
428 #ifdef CONFIG_ISA_ARCV2
429 if (!ioc_exists)
430 #endif
431 __dc_line_op(start, end - start, OP_FLUSH);
432
433 #ifdef CONFIG_ISA_ARCV2
434 if (slc_exists && !ioc_exists)
435 __slc_line_op(start, end - start, OP_FLUSH);
436 #endif
437 }
438
439 void flush_cache(unsigned long start, unsigned long size)
440 {
441 flush_dcache_range(start, start + size);
442 }
443
444 void invalidate_dcache_all(void)
445 {
446 __dc_entire_op(OP_INV);
447
448 #ifdef CONFIG_ISA_ARCV2
449 if (slc_exists)
450 __slc_entire_op(OP_INV);
451 #endif
452 }
453
454 void flush_dcache_all(void)
455 {
456 __dc_entire_op(OP_FLUSH);
457
458 #ifdef CONFIG_ISA_ARCV2
459 if (slc_exists)
460 __slc_entire_op(OP_FLUSH);
461 #endif
462 }