]>
git.ipfire.org Git - thirdparty/u-boot.git/blob - arch/arm/cpu/armv8/cache_v8.c
1 // SPDX-License-Identifier: GPL-2.0+
4 * David Feng <fenghua@phytium.com.cn>
7 * Alexander Graf <agraf@suse.de>
13 #include <asm/cache.h>
14 #include <asm/system.h>
15 #include <asm/armv8/mmu.h>
17 DECLARE_GLOBAL_DATA_PTR
;
19 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
22 * With 4k page granule, a virtual address is split into 4 lookup parts
23 * spanning 9 bits each:
25 * _______________________________________________
27 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
28 * |_______|_______|_______|_______|_______|_______|
29 * 63-48 47-39 38-30 29-21 20-12 11-00
33 * Lv0: FF8000000000 --
40 u64
get_tcr(int el
, u64
*pips
, u64
*pva_bits
)
47 /* Find the largest address we need to support */
48 for (i
= 0; mem_map
[i
].size
|| mem_map
[i
].attrs
; i
++)
49 max_addr
= max(max_addr
, mem_map
[i
].virt
+ mem_map
[i
].size
);
51 /* Calculate the maximum physical (and thus virtual) address */
52 if (max_addr
> (1ULL << 44)) {
55 } else if (max_addr
> (1ULL << 42)) {
58 } else if (max_addr
> (1ULL << 40)) {
61 } else if (max_addr
> (1ULL << 36)) {
64 } else if (max_addr
> (1ULL << 32)) {
73 tcr
= TCR_EL1_RSVD
| (ips
<< 32) | TCR_EPD1_DISABLE
;
75 tcr
= TCR_EL2_RSVD
| (ips
<< 16);
77 tcr
= TCR_EL3_RSVD
| (ips
<< 16);
80 /* PTWs cacheable, inner/outer WBWA and inner shareable */
81 tcr
|= TCR_TG0_4K
| TCR_SHARED_INNER
| TCR_ORGN_WBWA
| TCR_IRGN_WBWA
;
82 tcr
|= TCR_T0SZ(va_bits
);
92 #define MAX_PTE_ENTRIES 512
94 static int pte_type(u64
*pte
)
96 return *pte
& PTE_TYPE_MASK
;
99 /* Returns the LSB number for a PTE on level <level> */
100 static int level2shift(int level
)
102 /* Page is 12 bits wide, every level translates 9 bits */
103 return (12 + 9 * (3 - level
));
106 static u64
*find_pte(u64 addr
, int level
)
114 debug("addr=%llx level=%d\n", addr
, level
);
116 get_tcr(0, NULL
, &va_bits
);
120 if (level
< start_level
)
123 /* Walk through all page table levels to find our PTE */
124 pte
= (u64
*)gd
->arch
.tlb_addr
;
125 for (i
= start_level
; i
< 4; i
++) {
126 idx
= (addr
>> level2shift(i
)) & 0x1FF;
128 debug("idx=%llx PTE %p at level %d: %llx\n", idx
, pte
, i
, *pte
);
133 /* PTE is no table (either invalid or block), can't traverse */
134 if (pte_type(pte
) != PTE_TYPE_TABLE
)
136 /* Off to the next level */
137 pte
= (u64
*)(*pte
& 0x0000fffffffff000ULL
);
140 /* Should never reach here */
144 /* Returns and creates a new full table (512 entries) */
145 static u64
*create_table(void)
147 u64
*new_table
= (u64
*)gd
->arch
.tlb_fillptr
;
148 u64 pt_len
= MAX_PTE_ENTRIES
* sizeof(u64
);
150 /* Allocate MAX_PTE_ENTRIES pte entries */
151 gd
->arch
.tlb_fillptr
+= pt_len
;
153 if (gd
->arch
.tlb_fillptr
- gd
->arch
.tlb_addr
> gd
->arch
.tlb_size
)
154 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
155 "Please increase the size in get_page_table_size()",
156 gd
->arch
.tlb_fillptr
- gd
->arch
.tlb_addr
,
159 /* Mark all entries as invalid */
160 memset(new_table
, 0, pt_len
);
165 static void set_pte_table(u64
*pte
, u64
*table
)
167 /* Point *pte to the new table */
168 debug("Setting %p to addr=%p\n", pte
, table
);
169 *pte
= PTE_TYPE_TABLE
| (ulong
)table
;
172 /* Splits a block PTE into table with subpages spanning the old block */
173 static void split_block(u64
*pte
, int level
)
178 /* level describes the parent level, we need the child ones */
179 int levelshift
= level2shift(level
+ 1);
181 if (pte_type(pte
) != PTE_TYPE_BLOCK
)
182 panic("PTE %p (%llx) is not a block. Some driver code wants to "
183 "modify dcache settings for an range not covered in "
184 "mem_map.", pte
, old_pte
);
186 new_table
= create_table();
187 debug("Splitting pte %p (%llx) into %p\n", pte
, old_pte
, new_table
);
189 for (i
= 0; i
< MAX_PTE_ENTRIES
; i
++) {
190 new_table
[i
] = old_pte
| (i
<< levelshift
);
192 /* Level 3 block PTEs have the table type */
193 if ((level
+ 1) == 3)
194 new_table
[i
] |= PTE_TYPE_TABLE
;
196 debug("Setting new_table[%lld] = %llx\n", i
, new_table
[i
]);
199 /* Set the new table into effect */
200 set_pte_table(pte
, new_table
);
203 /* Add one mm_region map entry to the page tables */
204 static void add_map(struct mm_region
*map
)
207 u64 virt
= map
->virt
;
208 u64 phys
= map
->phys
;
209 u64 size
= map
->size
;
210 u64 attrs
= map
->attrs
| PTE_TYPE_BLOCK
| PTE_BLOCK_AF
;
216 pte
= find_pte(virt
, 0);
217 if (pte
&& (pte_type(pte
) == PTE_TYPE_FAULT
)) {
218 debug("Creating table for virt 0x%llx\n", virt
);
219 new_table
= create_table();
220 set_pte_table(pte
, new_table
);
223 for (level
= 1; level
< 4; level
++) {
224 pte
= find_pte(virt
, level
);
226 panic("pte not found\n");
228 blocksize
= 1ULL << level2shift(level
);
229 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
230 virt
, size
, blocksize
);
231 if (size
>= blocksize
&& !(virt
& (blocksize
- 1))) {
232 /* Page fits, create block PTE */
233 debug("Setting PTE %p to block virt=%llx\n",
236 *pte
= phys
| attrs
| PTE_TYPE_PAGE
;
243 } else if (pte_type(pte
) == PTE_TYPE_FAULT
) {
244 /* Page doesn't fit, create subpages */
245 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
247 new_table
= create_table();
248 set_pte_table(pte
, new_table
);
249 } else if (pte_type(pte
) == PTE_TYPE_BLOCK
) {
250 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
252 split_block(pte
, level
);
265 * This is a recursively called function to count the number of
266 * page tables we need to cover a particular PTE range. If you
267 * call this with level = -1 you basically get the full 48 bit
270 static int count_required_pts(u64 addr
, int level
, u64 maxaddr
)
272 int levelshift
= level2shift(level
);
273 u64 levelsize
= 1ULL << levelshift
;
274 u64 levelmask
= levelsize
- 1;
275 u64 levelend
= addr
+ levelsize
;
278 enum pte_type pte_type
= PTE_INVAL
;
280 for (i
= 0; mem_map
[i
].size
|| mem_map
[i
].attrs
; i
++) {
281 struct mm_region
*map
= &mem_map
[i
];
282 u64 start
= map
->virt
;
283 u64 end
= start
+ map
->size
;
285 /* Check if the PTE would overlap with the map */
286 if (max(addr
, start
) <= min(levelend
, end
)) {
287 start
= max(addr
, start
);
288 end
= min(levelend
, end
);
290 /* We need a sub-pt for this level */
291 if ((start
& levelmask
) || (end
& levelmask
)) {
292 pte_type
= PTE_LEVEL
;
296 /* Lv0 can not do block PTEs, so do levels here too */
298 pte_type
= PTE_LEVEL
;
302 /* PTE is active, but fits into a block */
303 pte_type
= PTE_BLOCK
;
308 * Block PTEs at this level are already covered by the parent page
309 * table, so we only need to count sub page tables.
311 if (pte_type
== PTE_LEVEL
) {
312 int sublevel
= level
+ 1;
313 u64 sublevelsize
= 1ULL << level2shift(sublevel
);
315 /* Account for the new sub page table ... */
318 /* ... and for all child page tables that one might have */
319 for (i
= 0; i
< MAX_PTE_ENTRIES
; i
++) {
320 r
+= count_required_pts(addr
, sublevel
, maxaddr
);
321 addr
+= sublevelsize
;
323 if (addr
>= maxaddr
) {
325 * We reached the end of address space, no need
326 * to look any further.
336 /* Returns the estimated required size of all page tables */
337 __weak u64
get_page_table_size(void)
339 u64 one_pt
= MAX_PTE_ENTRIES
* sizeof(u64
);
344 get_tcr(0, NULL
, &va_bits
);
348 /* Account for all page tables we would need to cover our memory map */
349 size
= one_pt
* count_required_pts(0, start_level
- 1, 1ULL << va_bits
);
352 * We need to duplicate our page table once to have an emergency pt to
353 * resort to when splitting page tables later on
358 * We may need to split page tables later on if dcache settings change,
359 * so reserve up to 4 (random pick) page tables for that.
366 void setup_pgtables(void)
370 if (!gd
->arch
.tlb_fillptr
|| !gd
->arch
.tlb_addr
)
371 panic("Page table pointer not setup.");
374 * Allocate the first level we're on with invalidate entries.
375 * If the starting level is 0 (va_bits >= 39), then this is our
376 * Lv0 page table, otherwise it's the entry Lv1 page table.
380 /* Now add all MMU table entries one after another to the table */
381 for (i
= 0; mem_map
[i
].size
|| mem_map
[i
].attrs
; i
++)
382 add_map(&mem_map
[i
]);
385 static void setup_all_pgtables(void)
387 u64 tlb_addr
= gd
->arch
.tlb_addr
;
388 u64 tlb_size
= gd
->arch
.tlb_size
;
390 /* Reset the fill ptr */
391 gd
->arch
.tlb_fillptr
= tlb_addr
;
393 /* Create normal system page tables */
396 /* Create emergency page tables */
397 gd
->arch
.tlb_size
-= (uintptr_t)gd
->arch
.tlb_fillptr
-
398 (uintptr_t)gd
->arch
.tlb_addr
;
399 gd
->arch
.tlb_addr
= gd
->arch
.tlb_fillptr
;
401 gd
->arch
.tlb_emerg
= gd
->arch
.tlb_addr
;
402 gd
->arch
.tlb_addr
= tlb_addr
;
403 gd
->arch
.tlb_size
= tlb_size
;
406 /* to activate the MMU we need to set up virtual memory */
407 __weak
void mmu_setup(void)
411 /* Set up page tables only once */
412 if (!gd
->arch
.tlb_fillptr
)
413 setup_all_pgtables();
416 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
, get_tcr(el
, NULL
, NULL
),
420 set_sctlr(get_sctlr() | CR_M
);
424 * Performs a invalidation of the entire data cache at all levels
426 void invalidate_dcache_all(void)
428 __asm_invalidate_dcache_all();
429 __asm_invalidate_l3_dcache();
433 * Performs a clean & invalidation of the entire data cache at all levels.
434 * This function needs to be inline to avoid using stack.
435 * __asm_flush_l3_dcache return status of timeout
437 inline void flush_dcache_all(void)
441 __asm_flush_dcache_all();
442 ret
= __asm_flush_l3_dcache();
444 debug("flushing dcache returns 0x%x\n", ret
);
446 debug("flushing dcache successfully.\n");
449 #ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
451 * Invalidates range in all levels of D-cache/unified cache
453 void invalidate_dcache_range(unsigned long start
, unsigned long stop
)
455 __asm_invalidate_dcache_range(start
, stop
);
459 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
461 void flush_dcache_range(unsigned long start
, unsigned long stop
)
463 __asm_flush_dcache_range(start
, stop
);
466 void invalidate_dcache_range(unsigned long start
, unsigned long stop
)
470 void flush_dcache_range(unsigned long start
, unsigned long stop
)
473 #endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
475 void dcache_enable(void)
477 /* The data cache is not active unless the mmu is enabled */
478 if (!(get_sctlr() & CR_M
)) {
479 invalidate_dcache_all();
480 __asm_invalidate_tlb_all();
484 set_sctlr(get_sctlr() | CR_C
);
487 void dcache_disable(void)
493 /* if cache isn't enabled no need to disable */
497 set_sctlr(sctlr
& ~(CR_C
|CR_M
));
500 __asm_invalidate_tlb_all();
503 int dcache_status(void)
505 return (get_sctlr() & CR_C
) != 0;
508 u64
*__weak
arch_get_page_table(void) {
509 puts("No page table offset defined\n");
514 static bool is_aligned(u64 addr
, u64 size
, u64 align
)
516 return !(addr
& (align
- 1)) && !(size
& (align
- 1));
519 /* Use flag to indicate if attrs has more than d-cache attributes */
520 static u64
set_one_region(u64 start
, u64 size
, u64 attrs
, bool flag
, int level
)
522 int levelshift
= level2shift(level
);
523 u64 levelsize
= 1ULL << levelshift
;
524 u64
*pte
= find_pte(start
, level
);
526 /* Can we can just modify the current level block PTE? */
527 if (is_aligned(start
, size
, levelsize
)) {
529 *pte
&= ~PMD_ATTRMASK
;
530 *pte
|= attrs
& PMD_ATTRMASK
;
532 *pte
&= ~PMD_ATTRINDX_MASK
;
533 *pte
|= attrs
& PMD_ATTRINDX_MASK
;
535 debug("Set attrs=%llx pte=%p level=%d\n", attrs
, pte
, level
);
540 /* Unaligned or doesn't fit, maybe split block into table */
541 debug("addr=%llx level=%d pte=%p (%llx)\n", start
, level
, pte
, *pte
);
543 /* Maybe we need to split the block into a table */
544 if (pte_type(pte
) == PTE_TYPE_BLOCK
)
545 split_block(pte
, level
);
547 /* And then double-check it became a table or already is one */
548 if (pte_type(pte
) != PTE_TYPE_TABLE
)
549 panic("PTE %p (%llx) for addr=%llx should be a table",
552 /* Roll on to the next page table level */
556 void mmu_set_region_dcache_behaviour(phys_addr_t start
, size_t size
,
557 enum dcache_option option
)
559 u64 attrs
= PMD_ATTRINDX(option
);
560 u64 real_start
= start
;
561 u64 real_size
= size
;
563 debug("start=%lx size=%lx\n", (ulong
)start
, (ulong
)size
);
565 if (!gd
->arch
.tlb_emerg
)
566 panic("Emergency page table not setup.");
569 * We can not modify page tables that we're currently running on,
570 * so we first need to switch to the "emergency" page tables where
571 * we can safely modify our primary page tables and then switch back
573 __asm_switch_ttbr(gd
->arch
.tlb_emerg
);
576 * Loop through the address range until we find a page granule that fits
577 * our alignment constraints, then set it to the new cache attributes
583 for (level
= 1; level
< 4; level
++) {
584 /* Set d-cache attributes only */
585 r
= set_one_region(start
, size
, attrs
, false, level
);
587 /* PTE successfully replaced */
596 /* We're done modifying page tables, switch back to our primary ones */
597 __asm_switch_ttbr(gd
->arch
.tlb_addr
);
600 * Make sure there's nothing stale in dcache for a region that might
601 * have caches off now
603 flush_dcache_range(real_start
, real_start
+ real_size
);
607 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
608 * The procecess is break-before-make. The target region will be marked as
609 * invalid during the process of changing.
611 void mmu_change_region_attr(phys_addr_t addr
, size_t siz
, u64 attrs
)
619 * Loop through the address range until we find a page granule that fits
620 * our alignment constraints, then set it to "invalid".
623 for (level
= 1; level
< 4; level
++) {
624 /* Set PTE to fault */
625 r
= set_one_region(start
, size
, PTE_TYPE_FAULT
, true,
628 /* PTE successfully invalidated */
636 flush_dcache_range(gd
->arch
.tlb_addr
,
637 gd
->arch
.tlb_addr
+ gd
->arch
.tlb_size
);
638 __asm_invalidate_tlb_all();
641 * Loop through the address range until we find a page granule that fits
642 * our alignment constraints, then set it to the new cache attributes
647 for (level
= 1; level
< 4; level
++) {
648 /* Set PTE to new attributes */
649 r
= set_one_region(start
, size
, attrs
, true, level
);
651 /* PTE successfully updated */
658 flush_dcache_range(gd
->arch
.tlb_addr
,
659 gd
->arch
.tlb_addr
+ gd
->arch
.tlb_size
);
660 __asm_invalidate_tlb_all();
663 #else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
666 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
667 * running however really wants to have dcache and the MMU active. Check that
668 * everything is sane and give the developer a hint if it isn't.
670 #ifndef CONFIG_SPL_BUILD
671 #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
674 void invalidate_dcache_all(void)
678 void flush_dcache_all(void)
682 void dcache_enable(void)
686 void dcache_disable(void)
690 int dcache_status(void)
695 void mmu_set_region_dcache_behaviour(phys_addr_t start
, size_t size
,
696 enum dcache_option option
)
700 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
702 #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
704 void icache_enable(void)
706 invalidate_icache_all();
707 set_sctlr(get_sctlr() | CR_I
);
710 void icache_disable(void)
712 set_sctlr(get_sctlr() & ~CR_I
);
715 int icache_status(void)
717 return (get_sctlr() & CR_I
) != 0;
720 void invalidate_icache_all(void)
722 __asm_invalidate_icache_all();
723 __asm_invalidate_l3_icache();
726 #else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
728 void icache_enable(void)
732 void icache_disable(void)
736 int icache_status(void)
741 void invalidate_icache_all(void)
745 #endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
748 * Enable dCache & iCache, whether cache is actually enabled
749 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
751 void __weak
enable_caches(void)