]> git.ipfire.org Git - thirdparty/u-boot.git/blame - arch/arm/cpu/armv8/cache_v8.c
CONFIG_SPL_SYS_[DI]CACHE_OFF: add
[thirdparty/u-boot.git] / arch / arm / cpu / armv8 / cache_v8.c
CommitLineData
83d290c5 1// SPDX-License-Identifier: GPL-2.0+
0ae76531
DF
2/*
3 * (C) Copyright 2013
4 * David Feng <fenghua@phytium.com.cn>
5 *
5e2ec773
AG
6 * (C) Copyright 2016
7 * Alexander Graf <agraf@suse.de>
0ae76531
DF
8 */
9
10#include <common.h>
11#include <asm/system.h>
12#include <asm/armv8/mmu.h>
13
14DECLARE_GLOBAL_DATA_PTR;
15
10015025 16#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
94f7ff36 17
5e2ec773
AG
18/*
19 * With 4k page granule, a virtual address is split into 4 lookup parts
20 * spanning 9 bits each:
21 *
22 * _______________________________________________
23 * | | | | | | |
24 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
25 * |_______|_______|_______|_______|_______|_______|
26 * 63-48 47-39 38-30 29-21 20-12 11-00
27 *
28 * mask page size
29 *
30 * Lv0: FF8000000000 --
31 * Lv1: 7FC0000000 1G
32 * Lv2: 3FE00000 2M
33 * Lv3: 1FF000 4K
34 * off: FFF
35 */
94f7ff36 36
252cdb46 37u64 get_tcr(int el, u64 *pips, u64 *pva_bits)
0691484a
AG
38{
39 u64 max_addr = 0;
40 u64 ips, va_bits;
41 u64 tcr;
42 int i;
43
44 /* Find the largest address we need to support */
d473f0c6 45 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
cd4b0c5f 46 max_addr = max(max_addr, mem_map[i].virt + mem_map[i].size);
0691484a
AG
47
48 /* Calculate the maximum physical (and thus virtual) address */
49 if (max_addr > (1ULL << 44)) {
50 ips = 5;
51 va_bits = 48;
52 } else if (max_addr > (1ULL << 42)) {
53 ips = 4;
54 va_bits = 44;
55 } else if (max_addr > (1ULL << 40)) {
56 ips = 3;
57 va_bits = 42;
58 } else if (max_addr > (1ULL << 36)) {
59 ips = 2;
60 va_bits = 40;
61 } else if (max_addr > (1ULL << 32)) {
62 ips = 1;
63 va_bits = 36;
64 } else {
65 ips = 0;
66 va_bits = 32;
67 }
68
69 if (el == 1) {
9bb367a5 70 tcr = TCR_EL1_RSVD | (ips << 32) | TCR_EPD1_DISABLE;
0691484a
AG
71 } else if (el == 2) {
72 tcr = TCR_EL2_RSVD | (ips << 16);
73 } else {
74 tcr = TCR_EL3_RSVD | (ips << 16);
75 }
76
77 /* PTWs cacheable, inner/outer WBWA and inner shareable */
5e2ec773
AG
78 tcr |= TCR_TG0_4K | TCR_SHARED_INNER | TCR_ORGN_WBWA | TCR_IRGN_WBWA;
79 tcr |= TCR_T0SZ(va_bits);
0691484a
AG
80
81 if (pips)
82 *pips = ips;
83 if (pva_bits)
84 *pva_bits = va_bits;
85
86 return tcr;
87}
88
5e2ec773
AG
89#define MAX_PTE_ENTRIES 512
90
91static int pte_type(u64 *pte)
92{
93 return *pte & PTE_TYPE_MASK;
94}
95
96/* Returns the LSB number for a PTE on level <level> */
97static int level2shift(int level)
98{
99 /* Page is 12 bits wide, every level translates 9 bits */
100 return (12 + 9 * (3 - level));
101}
102
103static u64 *find_pte(u64 addr, int level)
94f7ff36 104{
5e2ec773
AG
105 int start_level = 0;
106 u64 *pte;
107 u64 idx;
108 u64 va_bits;
109 int i;
110
111 debug("addr=%llx level=%d\n", addr, level);
112
113 get_tcr(0, NULL, &va_bits);
114 if (va_bits < 39)
115 start_level = 1;
116
117 if (level < start_level)
118 return NULL;
119
120 /* Walk through all page table levels to find our PTE */
121 pte = (u64*)gd->arch.tlb_addr;
122 for (i = start_level; i < 4; i++) {
123 idx = (addr >> level2shift(i)) & 0x1FF;
124 pte += idx;
125 debug("idx=%llx PTE %p at level %d: %llx\n", idx, pte, i, *pte);
126
127 /* Found it */
128 if (i == level)
129 return pte;
130 /* PTE is no table (either invalid or block), can't traverse */
131 if (pte_type(pte) != PTE_TYPE_TABLE)
132 return NULL;
133 /* Off to the next level */
134 pte = (u64*)(*pte & 0x0000fffffffff000ULL);
135 }
136
137 /* Should never reach here */
138 return NULL;
139}
140
141/* Returns and creates a new full table (512 entries) */
142static u64 *create_table(void)
143{
144 u64 *new_table = (u64*)gd->arch.tlb_fillptr;
145 u64 pt_len = MAX_PTE_ENTRIES * sizeof(u64);
146
147 /* Allocate MAX_PTE_ENTRIES pte entries */
148 gd->arch.tlb_fillptr += pt_len;
149
150 if (gd->arch.tlb_fillptr - gd->arch.tlb_addr > gd->arch.tlb_size)
151 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
152 "Please increase the size in get_page_table_size()",
153 gd->arch.tlb_fillptr - gd->arch.tlb_addr,
154 gd->arch.tlb_size);
155
156 /* Mark all entries as invalid */
157 memset(new_table, 0, pt_len);
158
159 return new_table;
160}
161
162static void set_pte_table(u64 *pte, u64 *table)
163{
164 /* Point *pte to the new table */
165 debug("Setting %p to addr=%p\n", pte, table);
166 *pte = PTE_TYPE_TABLE | (ulong)table;
167}
168
f733d466
YS
169/* Splits a block PTE into table with subpages spanning the old block */
170static void split_block(u64 *pte, int level)
171{
172 u64 old_pte = *pte;
173 u64 *new_table;
174 u64 i = 0;
175 /* level describes the parent level, we need the child ones */
176 int levelshift = level2shift(level + 1);
177
178 if (pte_type(pte) != PTE_TYPE_BLOCK)
179 panic("PTE %p (%llx) is not a block. Some driver code wants to "
180 "modify dcache settings for an range not covered in "
181 "mem_map.", pte, old_pte);
182
183 new_table = create_table();
184 debug("Splitting pte %p (%llx) into %p\n", pte, old_pte, new_table);
185
186 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
187 new_table[i] = old_pte | (i << levelshift);
188
189 /* Level 3 block PTEs have the table type */
190 if ((level + 1) == 3)
191 new_table[i] |= PTE_TYPE_TABLE;
192
193 debug("Setting new_table[%lld] = %llx\n", i, new_table[i]);
194 }
195
196 /* Set the new table into effect */
197 set_pte_table(pte, new_table);
198}
199
5e2ec773
AG
200/* Add one mm_region map entry to the page tables */
201static void add_map(struct mm_region *map)
202{
203 u64 *pte;
cd4b0c5f
YS
204 u64 virt = map->virt;
205 u64 phys = map->phys;
5e2ec773
AG
206 u64 size = map->size;
207 u64 attrs = map->attrs | PTE_TYPE_BLOCK | PTE_BLOCK_AF;
208 u64 blocksize;
209 int level;
210 u64 *new_table;
211
212 while (size) {
cd4b0c5f 213 pte = find_pte(virt, 0);
5e2ec773 214 if (pte && (pte_type(pte) == PTE_TYPE_FAULT)) {
cd4b0c5f 215 debug("Creating table for virt 0x%llx\n", virt);
5e2ec773
AG
216 new_table = create_table();
217 set_pte_table(pte, new_table);
218 }
219
220 for (level = 1; level < 4; level++) {
cd4b0c5f 221 pte = find_pte(virt, level);
f733d466
YS
222 if (!pte)
223 panic("pte not found\n");
cd4b0c5f 224
5e2ec773 225 blocksize = 1ULL << level2shift(level);
cd4b0c5f
YS
226 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
227 virt, size, blocksize);
228 if (size >= blocksize && !(virt & (blocksize - 1))) {
5e2ec773 229 /* Page fits, create block PTE */
cd4b0c5f
YS
230 debug("Setting PTE %p to block virt=%llx\n",
231 pte, virt);
90351547
PF
232 if (level == 3)
233 *pte = phys | attrs | PTE_TYPE_PAGE;
234 else
235 *pte = phys | attrs;
cd4b0c5f
YS
236 virt += blocksize;
237 phys += blocksize;
5e2ec773
AG
238 size -= blocksize;
239 break;
f733d466 240 } else if (pte_type(pte) == PTE_TYPE_FAULT) {
5e2ec773 241 /* Page doesn't fit, create subpages */
cd4b0c5f
YS
242 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
243 virt, blocksize);
5e2ec773
AG
244 new_table = create_table();
245 set_pte_table(pte, new_table);
f733d466 246 } else if (pte_type(pte) == PTE_TYPE_BLOCK) {
cd4b0c5f
YS
247 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
248 virt, blocksize);
f733d466 249 split_block(pte, level);
5e2ec773
AG
250 }
251 }
252 }
253}
254
5e2ec773
AG
255enum pte_type {
256 PTE_INVAL,
257 PTE_BLOCK,
258 PTE_LEVEL,
259};
260
261/*
262 * This is a recursively called function to count the number of
263 * page tables we need to cover a particular PTE range. If you
264 * call this with level = -1 you basically get the full 48 bit
265 * coverage.
266 */
267static int count_required_pts(u64 addr, int level, u64 maxaddr)
268{
269 int levelshift = level2shift(level);
270 u64 levelsize = 1ULL << levelshift;
271 u64 levelmask = levelsize - 1;
272 u64 levelend = addr + levelsize;
273 int r = 0;
274 int i;
275 enum pte_type pte_type = PTE_INVAL;
276
d473f0c6 277 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++) {
5e2ec773 278 struct mm_region *map = &mem_map[i];
cd4b0c5f 279 u64 start = map->virt;
5e2ec773
AG
280 u64 end = start + map->size;
281
282 /* Check if the PTE would overlap with the map */
283 if (max(addr, start) <= min(levelend, end)) {
284 start = max(addr, start);
285 end = min(levelend, end);
286
287 /* We need a sub-pt for this level */
288 if ((start & levelmask) || (end & levelmask)) {
289 pte_type = PTE_LEVEL;
290 break;
94f7ff36
ST
291 }
292
5e2ec773
AG
293 /* Lv0 can not do block PTEs, so do levels here too */
294 if (level <= 0) {
295 pte_type = PTE_LEVEL;
296 break;
297 }
298
299 /* PTE is active, but fits into a block */
300 pte_type = PTE_BLOCK;
94f7ff36
ST
301 }
302 }
5e2ec773
AG
303
304 /*
305 * Block PTEs at this level are already covered by the parent page
306 * table, so we only need to count sub page tables.
307 */
308 if (pte_type == PTE_LEVEL) {
309 int sublevel = level + 1;
310 u64 sublevelsize = 1ULL << level2shift(sublevel);
311
312 /* Account for the new sub page table ... */
313 r = 1;
314
315 /* ... and for all child page tables that one might have */
316 for (i = 0; i < MAX_PTE_ENTRIES; i++) {
317 r += count_required_pts(addr, sublevel, maxaddr);
318 addr += sublevelsize;
319
320 if (addr >= maxaddr) {
321 /*
322 * We reached the end of address space, no need
323 * to look any further.
324 */
325 break;
326 }
327 }
328 }
329
330 return r;
331}
332
333/* Returns the estimated required size of all page tables */
c05016ab 334__weak u64 get_page_table_size(void)
5e2ec773
AG
335{
336 u64 one_pt = MAX_PTE_ENTRIES * sizeof(u64);
337 u64 size = 0;
338 u64 va_bits;
339 int start_level = 0;
340
341 get_tcr(0, NULL, &va_bits);
342 if (va_bits < 39)
343 start_level = 1;
344
345 /* Account for all page tables we would need to cover our memory map */
346 size = one_pt * count_required_pts(0, start_level - 1, 1ULL << va_bits);
347
348 /*
349 * We need to duplicate our page table once to have an emergency pt to
350 * resort to when splitting page tables later on
351 */
352 size *= 2;
353
354 /*
355 * We may need to split page tables later on if dcache settings change,
356 * so reserve up to 4 (random pick) page tables for that.
357 */
358 size += one_pt * 4;
359
360 return size;
361}
362
252cdb46 363void setup_pgtables(void)
5e2ec773
AG
364{
365 int i;
366
252cdb46
YS
367 if (!gd->arch.tlb_fillptr || !gd->arch.tlb_addr)
368 panic("Page table pointer not setup.");
369
5e2ec773
AG
370 /*
371 * Allocate the first level we're on with invalidate entries.
372 * If the starting level is 0 (va_bits >= 39), then this is our
373 * Lv0 page table, otherwise it's the entry Lv1 page table.
374 */
375 create_table();
376
377 /* Now add all MMU table entries one after another to the table */
d473f0c6 378 for (i = 0; mem_map[i].size || mem_map[i].attrs; i++)
5e2ec773 379 add_map(&mem_map[i]);
5e2ec773
AG
380}
381
382static void setup_all_pgtables(void)
383{
384 u64 tlb_addr = gd->arch.tlb_addr;
0e170947 385 u64 tlb_size = gd->arch.tlb_size;
5e2ec773
AG
386
387 /* Reset the fill ptr */
388 gd->arch.tlb_fillptr = tlb_addr;
389
390 /* Create normal system page tables */
391 setup_pgtables();
392
393 /* Create emergency page tables */
0e170947
AG
394 gd->arch.tlb_size -= (uintptr_t)gd->arch.tlb_fillptr -
395 (uintptr_t)gd->arch.tlb_addr;
5e2ec773
AG
396 gd->arch.tlb_addr = gd->arch.tlb_fillptr;
397 setup_pgtables();
398 gd->arch.tlb_emerg = gd->arch.tlb_addr;
399 gd->arch.tlb_addr = tlb_addr;
0e170947 400 gd->arch.tlb_size = tlb_size;
94f7ff36
ST
401}
402
0ae76531 403/* to activate the MMU we need to set up virtual memory */
3c6af3ba 404__weak void mmu_setup(void)
0ae76531 405{
8b19dff5 406 int el;
0ae76531 407
5e2ec773
AG
408 /* Set up page tables only once */
409 if (!gd->arch.tlb_fillptr)
410 setup_all_pgtables();
0691484a
AG
411
412 el = current_el();
413 set_ttbr_tcr_mair(el, gd->arch.tlb_addr, get_tcr(el, NULL, NULL),
414 MEMORY_ATTRIBUTES);
0691484a 415
0ae76531
DF
416 /* enable the mmu */
417 set_sctlr(get_sctlr() | CR_M);
418}
419
420/*
421 * Performs a invalidation of the entire data cache at all levels
422 */
423void invalidate_dcache_all(void)
424{
1e6ad55c 425 __asm_invalidate_dcache_all();
1ab557a0 426 __asm_invalidate_l3_dcache();
0ae76531
DF
427}
428
429/*
dcd468b8
YS
430 * Performs a clean & invalidation of the entire data cache at all levels.
431 * This function needs to be inline to avoid using stack.
1ab557a0 432 * __asm_flush_l3_dcache return status of timeout
0ae76531 433 */
dcd468b8 434inline void flush_dcache_all(void)
0ae76531 435{
dcd468b8
YS
436 int ret;
437
0ae76531 438 __asm_flush_dcache_all();
1ab557a0 439 ret = __asm_flush_l3_dcache();
dcd468b8
YS
440 if (ret)
441 debug("flushing dcache returns 0x%x\n", ret);
442 else
443 debug("flushing dcache successfully.\n");
0ae76531
DF
444}
445
add49671 446#ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
0ae76531
DF
447/*
448 * Invalidates range in all levels of D-cache/unified cache
449 */
450void invalidate_dcache_range(unsigned long start, unsigned long stop)
451{
6775a820 452 __asm_invalidate_dcache_range(start, stop);
0ae76531
DF
453}
454
455/*
456 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
457 */
458void flush_dcache_range(unsigned long start, unsigned long stop)
459{
460 __asm_flush_dcache_range(start, stop);
461}
add49671
VR
462#else
463void invalidate_dcache_range(unsigned long start, unsigned long stop)
464{
465}
466
467void flush_dcache_range(unsigned long start, unsigned long stop)
468{
469}
470#endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
0ae76531
DF
471
472void dcache_enable(void)
473{
474 /* The data cache is not active unless the mmu is enabled */
475 if (!(get_sctlr() & CR_M)) {
476 invalidate_dcache_all();
477 __asm_invalidate_tlb_all();
478 mmu_setup();
479 }
480
481 set_sctlr(get_sctlr() | CR_C);
482}
483
484void dcache_disable(void)
485{
486 uint32_t sctlr;
487
488 sctlr = get_sctlr();
489
490 /* if cache isn't enabled no need to disable */
491 if (!(sctlr & CR_C))
492 return;
493
494 set_sctlr(sctlr & ~(CR_C|CR_M));
495
496 flush_dcache_all();
497 __asm_invalidate_tlb_all();
498}
499
500int dcache_status(void)
501{
502 return (get_sctlr() & CR_C) != 0;
503}
504
dad17fd5
SDPP
505u64 *__weak arch_get_page_table(void) {
506 puts("No page table offset defined\n");
507
508 return NULL;
509}
510
5e2ec773
AG
511static bool is_aligned(u64 addr, u64 size, u64 align)
512{
513 return !(addr & (align - 1)) && !(size & (align - 1));
514}
515
7f9b9f31
YS
516/* Use flag to indicate if attrs has more than d-cache attributes */
517static u64 set_one_region(u64 start, u64 size, u64 attrs, bool flag, int level)
5e2ec773
AG
518{
519 int levelshift = level2shift(level);
520 u64 levelsize = 1ULL << levelshift;
521 u64 *pte = find_pte(start, level);
522
523 /* Can we can just modify the current level block PTE? */
524 if (is_aligned(start, size, levelsize)) {
7f9b9f31
YS
525 if (flag) {
526 *pte &= ~PMD_ATTRMASK;
527 *pte |= attrs & PMD_ATTRMASK;
528 } else {
529 *pte &= ~PMD_ATTRINDX_MASK;
530 *pte |= attrs & PMD_ATTRINDX_MASK;
531 }
5e2ec773
AG
532 debug("Set attrs=%llx pte=%p level=%d\n", attrs, pte, level);
533
534 return levelsize;
535 }
536
537 /* Unaligned or doesn't fit, maybe split block into table */
538 debug("addr=%llx level=%d pte=%p (%llx)\n", start, level, pte, *pte);
539
540 /* Maybe we need to split the block into a table */
541 if (pte_type(pte) == PTE_TYPE_BLOCK)
542 split_block(pte, level);
543
544 /* And then double-check it became a table or already is one */
545 if (pte_type(pte) != PTE_TYPE_TABLE)
546 panic("PTE %p (%llx) for addr=%llx should be a table",
547 pte, *pte, start);
548
549 /* Roll on to the next page table level */
550 return 0;
551}
552
553void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
554 enum dcache_option option)
555{
556 u64 attrs = PMD_ATTRINDX(option);
557 u64 real_start = start;
558 u64 real_size = size;
559
560 debug("start=%lx size=%lx\n", (ulong)start, (ulong)size);
561
252cdb46
YS
562 if (!gd->arch.tlb_emerg)
563 panic("Emergency page table not setup.");
564
5e2ec773
AG
565 /*
566 * We can not modify page tables that we're currently running on,
567 * so we first need to switch to the "emergency" page tables where
568 * we can safely modify our primary page tables and then switch back
569 */
570 __asm_switch_ttbr(gd->arch.tlb_emerg);
571
572 /*
573 * Loop through the address range until we find a page granule that fits
574 * our alignment constraints, then set it to the new cache attributes
575 */
576 while (size > 0) {
577 int level;
578 u64 r;
579
580 for (level = 1; level < 4; level++) {
7f9b9f31
YS
581 /* Set d-cache attributes only */
582 r = set_one_region(start, size, attrs, false, level);
5e2ec773
AG
583 if (r) {
584 /* PTE successfully replaced */
585 size -= r;
586 start += r;
587 break;
588 }
589 }
590
591 }
592
593 /* We're done modifying page tables, switch back to our primary ones */
594 __asm_switch_ttbr(gd->arch.tlb_addr);
595
596 /*
597 * Make sure there's nothing stale in dcache for a region that might
598 * have caches off now
599 */
600 flush_dcache_range(real_start, real_start + real_size);
601}
94f7ff36 602
7f9b9f31
YS
603/*
604 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
605 * The procecess is break-before-make. The target region will be marked as
606 * invalid during the process of changing.
607 */
608void mmu_change_region_attr(phys_addr_t addr, size_t siz, u64 attrs)
609{
610 int level;
611 u64 r, size, start;
612
613 start = addr;
614 size = siz;
615 /*
616 * Loop through the address range until we find a page granule that fits
617 * our alignment constraints, then set it to "invalid".
618 */
619 while (size > 0) {
620 for (level = 1; level < 4; level++) {
621 /* Set PTE to fault */
622 r = set_one_region(start, size, PTE_TYPE_FAULT, true,
623 level);
624 if (r) {
625 /* PTE successfully invalidated */
626 size -= r;
627 start += r;
628 break;
629 }
630 }
631 }
632
633 flush_dcache_range(gd->arch.tlb_addr,
634 gd->arch.tlb_addr + gd->arch.tlb_size);
635 __asm_invalidate_tlb_all();
636
637 /*
638 * Loop through the address range until we find a page granule that fits
639 * our alignment constraints, then set it to the new cache attributes
640 */
641 start = addr;
642 size = siz;
643 while (size > 0) {
644 for (level = 1; level < 4; level++) {
645 /* Set PTE to new attributes */
646 r = set_one_region(start, size, attrs, true, level);
647 if (r) {
648 /* PTE successfully updated */
649 size -= r;
650 start += r;
651 break;
652 }
653 }
654 }
655 flush_dcache_range(gd->arch.tlb_addr,
656 gd->arch.tlb_addr + gd->arch.tlb_size);
657 __asm_invalidate_tlb_all();
658}
659
10015025 660#else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
0ae76531 661
19503c31
AG
662/*
663 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
664 * running however really wants to have dcache and the MMU active. Check that
665 * everything is sane and give the developer a hint if it isn't.
666 */
667#ifndef CONFIG_SPL_BUILD
668#error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
669#endif
670
0ae76531
DF
671void invalidate_dcache_all(void)
672{
673}
674
675void flush_dcache_all(void)
676{
677}
678
0ae76531
DF
679void dcache_enable(void)
680{
681}
682
683void dcache_disable(void)
684{
685}
686
687int dcache_status(void)
688{
689 return 0;
690}
691
dad17fd5
SDPP
692void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
693 enum dcache_option option)
694{
695}
696
10015025 697#endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
0ae76531 698
10015025 699#if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
0ae76531
DF
700
701void icache_enable(void)
702{
1ab557a0 703 invalidate_icache_all();
0ae76531
DF
704 set_sctlr(get_sctlr() | CR_I);
705}
706
707void icache_disable(void)
708{
709 set_sctlr(get_sctlr() & ~CR_I);
710}
711
712int icache_status(void)
713{
714 return (get_sctlr() & CR_I) != 0;
715}
716
717void invalidate_icache_all(void)
718{
719 __asm_invalidate_icache_all();
1ab557a0 720 __asm_invalidate_l3_icache();
0ae76531
DF
721}
722
10015025 723#else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
0ae76531
DF
724
725void icache_enable(void)
726{
727}
728
729void icache_disable(void)
730{
731}
732
733int icache_status(void)
734{
735 return 0;
736}
737
738void invalidate_icache_all(void)
739{
740}
741
10015025 742#endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
0ae76531
DF
743
744/*
745 * Enable dCache & iCache, whether cache is actually enabled
746 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
747 */
2f78eae5 748void __weak enable_caches(void)
0ae76531
DF
749{
750 icache_enable();
751 dcache_enable();
752}