]>
git.ipfire.org Git - thirdparty/u-boot.git/blob - arch/arm/cpu/armv8/cache_v8.c
1 // SPDX-License-Identifier: GPL-2.0+
4 * David Feng <fenghua@phytium.com.cn>
7 * Alexander Graf <agraf@suse.de>
14 #include <asm/cache.h>
15 #include <asm/global_data.h>
16 #include <asm/system.h>
17 #include <asm/armv8/mmu.h>
19 DECLARE_GLOBAL_DATA_PTR
;
21 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
24 * With 4k page granule, a virtual address is split into 4 lookup parts
25 * spanning 9 bits each:
27 * _______________________________________________
29 * | 0 | Lv0 | Lv1 | Lv2 | Lv3 | off |
30 * |_______|_______|_______|_______|_______|_______|
31 * 63-48 47-39 38-30 29-21 20-12 11-00
35 * Lv0: FF8000000000 --
42 static int get_effective_el(void)
44 int el
= current_el();
50 * If we are using the EL2&0 translation regime, the TCR_EL2
51 * looks like the EL1 version, even though we are in EL2.
53 __asm__ ("mrs %0, HCR_EL2\n" : "=r" (hcr_el2
));
54 if (hcr_el2
& BIT(HCR_EL2_E2H_BIT
))
61 u64
get_tcr(u64
*pips
, u64
*pva_bits
)
63 int el
= get_effective_el();
69 /* Find the largest address we need to support */
70 for (i
= 0; mem_map
[i
].size
|| mem_map
[i
].attrs
; i
++)
71 max_addr
= max(max_addr
, mem_map
[i
].virt
+ mem_map
[i
].size
);
73 /* Calculate the maximum physical (and thus virtual) address */
74 if (max_addr
> (1ULL << 44)) {
77 } else if (max_addr
> (1ULL << 42)) {
80 } else if (max_addr
> (1ULL << 40)) {
83 } else if (max_addr
> (1ULL << 36)) {
86 } else if (max_addr
> (1ULL << 32)) {
95 tcr
= TCR_EL1_RSVD
| (ips
<< 32) | TCR_EPD1_DISABLE
;
97 tcr
= TCR_EL2_RSVD
| (ips
<< 16);
99 tcr
= TCR_EL3_RSVD
| (ips
<< 16);
102 /* PTWs cacheable, inner/outer WBWA and inner shareable */
103 tcr
|= TCR_TG0_4K
| TCR_SHARED_INNER
| TCR_ORGN_WBWA
| TCR_IRGN_WBWA
;
104 tcr
|= TCR_T0SZ(va_bits
);
114 #define MAX_PTE_ENTRIES 512
116 static int pte_type(u64
*pte
)
118 return *pte
& PTE_TYPE_MASK
;
121 /* Returns the LSB number for a PTE on level <level> */
122 static int level2shift(int level
)
124 /* Page is 12 bits wide, every level translates 9 bits */
125 return (12 + 9 * (3 - level
));
128 static u64
*find_pte(u64 addr
, int level
)
136 debug("addr=%llx level=%d\n", addr
, level
);
138 get_tcr(NULL
, &va_bits
);
142 if (level
< start_level
)
145 /* Walk through all page table levels to find our PTE */
146 pte
= (u64
*)gd
->arch
.tlb_addr
;
147 for (i
= start_level
; i
< 4; i
++) {
148 idx
= (addr
>> level2shift(i
)) & 0x1FF;
150 debug("idx=%llx PTE %p at level %d: %llx\n", idx
, pte
, i
, *pte
);
155 /* PTE is no table (either invalid or block), can't traverse */
156 if (pte_type(pte
) != PTE_TYPE_TABLE
)
158 /* Off to the next level */
159 pte
= (u64
*)(*pte
& 0x0000fffffffff000ULL
);
162 /* Should never reach here */
166 /* Returns and creates a new full table (512 entries) */
167 static u64
*create_table(void)
169 u64
*new_table
= (u64
*)gd
->arch
.tlb_fillptr
;
170 u64 pt_len
= MAX_PTE_ENTRIES
* sizeof(u64
);
172 /* Allocate MAX_PTE_ENTRIES pte entries */
173 gd
->arch
.tlb_fillptr
+= pt_len
;
175 if (gd
->arch
.tlb_fillptr
- gd
->arch
.tlb_addr
> gd
->arch
.tlb_size
)
176 panic("Insufficient RAM for page table: 0x%lx > 0x%lx. "
177 "Please increase the size in get_page_table_size()",
178 gd
->arch
.tlb_fillptr
- gd
->arch
.tlb_addr
,
181 /* Mark all entries as invalid */
182 memset(new_table
, 0, pt_len
);
187 static void set_pte_table(u64
*pte
, u64
*table
)
189 /* Point *pte to the new table */
190 debug("Setting %p to addr=%p\n", pte
, table
);
191 *pte
= PTE_TYPE_TABLE
| (ulong
)table
;
194 /* Splits a block PTE into table with subpages spanning the old block */
195 static void split_block(u64
*pte
, int level
)
200 /* level describes the parent level, we need the child ones */
201 int levelshift
= level2shift(level
+ 1);
203 if (pte_type(pte
) != PTE_TYPE_BLOCK
)
204 panic("PTE %p (%llx) is not a block. Some driver code wants to "
205 "modify dcache settings for an range not covered in "
206 "mem_map.", pte
, old_pte
);
208 new_table
= create_table();
209 debug("Splitting pte %p (%llx) into %p\n", pte
, old_pte
, new_table
);
211 for (i
= 0; i
< MAX_PTE_ENTRIES
; i
++) {
212 new_table
[i
] = old_pte
| (i
<< levelshift
);
214 /* Level 3 block PTEs have the table type */
215 if ((level
+ 1) == 3)
216 new_table
[i
] |= PTE_TYPE_TABLE
;
218 debug("Setting new_table[%lld] = %llx\n", i
, new_table
[i
]);
221 /* Set the new table into effect */
222 set_pte_table(pte
, new_table
);
225 /* Add one mm_region map entry to the page tables */
226 static void add_map(struct mm_region
*map
)
229 u64 virt
= map
->virt
;
230 u64 phys
= map
->phys
;
231 u64 size
= map
->size
;
232 u64 attrs
= map
->attrs
| PTE_TYPE_BLOCK
| PTE_BLOCK_AF
;
238 pte
= find_pte(virt
, 0);
239 if (pte
&& (pte_type(pte
) == PTE_TYPE_FAULT
)) {
240 debug("Creating table for virt 0x%llx\n", virt
);
241 new_table
= create_table();
242 set_pte_table(pte
, new_table
);
245 for (level
= 1; level
< 4; level
++) {
246 pte
= find_pte(virt
, level
);
248 panic("pte not found\n");
250 blocksize
= 1ULL << level2shift(level
);
251 debug("Checking if pte fits for virt=%llx size=%llx blocksize=%llx\n",
252 virt
, size
, blocksize
);
253 if (size
>= blocksize
&& !(virt
& (blocksize
- 1))) {
254 /* Page fits, create block PTE */
255 debug("Setting PTE %p to block virt=%llx\n",
258 *pte
= phys
| attrs
| PTE_TYPE_PAGE
;
265 } else if (pte_type(pte
) == PTE_TYPE_FAULT
) {
266 /* Page doesn't fit, create subpages */
267 debug("Creating subtable for virt 0x%llx blksize=%llx\n",
269 new_table
= create_table();
270 set_pte_table(pte
, new_table
);
271 } else if (pte_type(pte
) == PTE_TYPE_BLOCK
) {
272 debug("Split block into subtable for virt 0x%llx blksize=0x%llx\n",
274 split_block(pte
, level
);
287 * This is a recursively called function to count the number of
288 * page tables we need to cover a particular PTE range. If you
289 * call this with level = -1 you basically get the full 48 bit
292 static int count_required_pts(u64 addr
, int level
, u64 maxaddr
)
294 int levelshift
= level2shift(level
);
295 u64 levelsize
= 1ULL << levelshift
;
296 u64 levelmask
= levelsize
- 1;
297 u64 levelend
= addr
+ levelsize
;
300 enum pte_type pte_type
= PTE_INVAL
;
302 for (i
= 0; mem_map
[i
].size
|| mem_map
[i
].attrs
; i
++) {
303 struct mm_region
*map
= &mem_map
[i
];
304 u64 start
= map
->virt
;
305 u64 end
= start
+ map
->size
;
307 /* Check if the PTE would overlap with the map */
308 if (max(addr
, start
) <= min(levelend
, end
)) {
309 start
= max(addr
, start
);
310 end
= min(levelend
, end
);
312 /* We need a sub-pt for this level */
313 if ((start
& levelmask
) || (end
& levelmask
)) {
314 pte_type
= PTE_LEVEL
;
318 /* Lv0 can not do block PTEs, so do levels here too */
320 pte_type
= PTE_LEVEL
;
324 /* PTE is active, but fits into a block */
325 pte_type
= PTE_BLOCK
;
330 * Block PTEs at this level are already covered by the parent page
331 * table, so we only need to count sub page tables.
333 if (pte_type
== PTE_LEVEL
) {
334 int sublevel
= level
+ 1;
335 u64 sublevelsize
= 1ULL << level2shift(sublevel
);
337 /* Account for the new sub page table ... */
340 /* ... and for all child page tables that one might have */
341 for (i
= 0; i
< MAX_PTE_ENTRIES
; i
++) {
342 r
+= count_required_pts(addr
, sublevel
, maxaddr
);
343 addr
+= sublevelsize
;
345 if (addr
>= maxaddr
) {
347 * We reached the end of address space, no need
348 * to look any further.
358 /* Returns the estimated required size of all page tables */
359 __weak u64
get_page_table_size(void)
361 u64 one_pt
= MAX_PTE_ENTRIES
* sizeof(u64
);
366 get_tcr(NULL
, &va_bits
);
370 /* Account for all page tables we would need to cover our memory map */
371 size
= one_pt
* count_required_pts(0, start_level
- 1, 1ULL << va_bits
);
374 * We need to duplicate our page table once to have an emergency pt to
375 * resort to when splitting page tables later on
380 * We may need to split page tables later on if dcache settings change,
381 * so reserve up to 4 (random pick) page tables for that.
388 void setup_pgtables(void)
392 if (!gd
->arch
.tlb_fillptr
|| !gd
->arch
.tlb_addr
)
393 panic("Page table pointer not setup.");
396 * Allocate the first level we're on with invalidate entries.
397 * If the starting level is 0 (va_bits >= 39), then this is our
398 * Lv0 page table, otherwise it's the entry Lv1 page table.
402 /* Now add all MMU table entries one after another to the table */
403 for (i
= 0; mem_map
[i
].size
|| mem_map
[i
].attrs
; i
++)
404 add_map(&mem_map
[i
]);
407 static void setup_all_pgtables(void)
409 u64 tlb_addr
= gd
->arch
.tlb_addr
;
410 u64 tlb_size
= gd
->arch
.tlb_size
;
412 /* Reset the fill ptr */
413 gd
->arch
.tlb_fillptr
= tlb_addr
;
415 /* Create normal system page tables */
418 /* Create emergency page tables */
419 gd
->arch
.tlb_size
-= (uintptr_t)gd
->arch
.tlb_fillptr
-
420 (uintptr_t)gd
->arch
.tlb_addr
;
421 gd
->arch
.tlb_addr
= gd
->arch
.tlb_fillptr
;
423 gd
->arch
.tlb_emerg
= gd
->arch
.tlb_addr
;
424 gd
->arch
.tlb_addr
= tlb_addr
;
425 gd
->arch
.tlb_size
= tlb_size
;
428 /* to activate the MMU we need to set up virtual memory */
429 __weak
void mmu_setup(void)
433 /* Set up page tables only once */
434 if (!gd
->arch
.tlb_fillptr
)
435 setup_all_pgtables();
438 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
, get_tcr(NULL
, NULL
),
442 set_sctlr(get_sctlr() | CR_M
);
446 * Performs a invalidation of the entire data cache at all levels
448 void invalidate_dcache_all(void)
450 __asm_invalidate_dcache_all();
451 __asm_invalidate_l3_dcache();
455 * Performs a clean & invalidation of the entire data cache at all levels.
456 * This function needs to be inline to avoid using stack.
457 * __asm_flush_l3_dcache return status of timeout
459 inline void flush_dcache_all(void)
463 __asm_flush_dcache_all();
464 ret
= __asm_flush_l3_dcache();
466 debug("flushing dcache returns 0x%x\n", ret
);
468 debug("flushing dcache successfully.\n");
471 #ifndef CONFIG_SYS_DISABLE_DCACHE_OPS
473 * Invalidates range in all levels of D-cache/unified cache
475 void invalidate_dcache_range(unsigned long start
, unsigned long stop
)
477 __asm_invalidate_dcache_range(start
, stop
);
481 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
483 void flush_dcache_range(unsigned long start
, unsigned long stop
)
485 __asm_flush_dcache_range(start
, stop
);
488 void invalidate_dcache_range(unsigned long start
, unsigned long stop
)
492 void flush_dcache_range(unsigned long start
, unsigned long stop
)
495 #endif /* CONFIG_SYS_DISABLE_DCACHE_OPS */
497 void dcache_enable(void)
499 /* The data cache is not active unless the mmu is enabled */
500 if (!(get_sctlr() & CR_M
)) {
501 invalidate_dcache_all();
502 __asm_invalidate_tlb_all();
506 /* Set up page tables only once (it is done also by mmu_setup()) */
507 if (!gd
->arch
.tlb_fillptr
)
508 setup_all_pgtables();
510 set_sctlr(get_sctlr() | CR_C
);
513 void dcache_disable(void)
519 /* if cache isn't enabled no need to disable */
523 set_sctlr(sctlr
& ~(CR_C
|CR_M
));
526 __asm_invalidate_tlb_all();
529 int dcache_status(void)
531 return (get_sctlr() & CR_C
) != 0;
534 u64
*__weak
arch_get_page_table(void) {
535 puts("No page table offset defined\n");
540 static bool is_aligned(u64 addr
, u64 size
, u64 align
)
542 return !(addr
& (align
- 1)) && !(size
& (align
- 1));
545 /* Use flag to indicate if attrs has more than d-cache attributes */
546 static u64
set_one_region(u64 start
, u64 size
, u64 attrs
, bool flag
, int level
)
548 int levelshift
= level2shift(level
);
549 u64 levelsize
= 1ULL << levelshift
;
550 u64
*pte
= find_pte(start
, level
);
552 /* Can we can just modify the current level block PTE? */
553 if (is_aligned(start
, size
, levelsize
)) {
555 *pte
&= ~PMD_ATTRMASK
;
556 *pte
|= attrs
& PMD_ATTRMASK
;
558 *pte
&= ~PMD_ATTRINDX_MASK
;
559 *pte
|= attrs
& PMD_ATTRINDX_MASK
;
561 debug("Set attrs=%llx pte=%p level=%d\n", attrs
, pte
, level
);
566 /* Unaligned or doesn't fit, maybe split block into table */
567 debug("addr=%llx level=%d pte=%p (%llx)\n", start
, level
, pte
, *pte
);
569 /* Maybe we need to split the block into a table */
570 if (pte_type(pte
) == PTE_TYPE_BLOCK
)
571 split_block(pte
, level
);
573 /* And then double-check it became a table or already is one */
574 if (pte_type(pte
) != PTE_TYPE_TABLE
)
575 panic("PTE %p (%llx) for addr=%llx should be a table",
578 /* Roll on to the next page table level */
582 void mmu_set_region_dcache_behaviour(phys_addr_t start
, size_t size
,
583 enum dcache_option option
)
585 u64 attrs
= PMD_ATTRINDX(option
>> 2);
586 u64 real_start
= start
;
587 u64 real_size
= size
;
589 debug("start=%lx size=%lx\n", (ulong
)start
, (ulong
)size
);
591 if (!gd
->arch
.tlb_emerg
)
592 panic("Emergency page table not setup.");
595 * We can not modify page tables that we're currently running on,
596 * so we first need to switch to the "emergency" page tables where
597 * we can safely modify our primary page tables and then switch back
599 __asm_switch_ttbr(gd
->arch
.tlb_emerg
);
602 * Loop through the address range until we find a page granule that fits
603 * our alignment constraints, then set it to the new cache attributes
609 for (level
= 1; level
< 4; level
++) {
610 /* Set d-cache attributes only */
611 r
= set_one_region(start
, size
, attrs
, false, level
);
613 /* PTE successfully replaced */
622 /* We're done modifying page tables, switch back to our primary ones */
623 __asm_switch_ttbr(gd
->arch
.tlb_addr
);
626 * Make sure there's nothing stale in dcache for a region that might
627 * have caches off now
629 flush_dcache_range(real_start
, real_start
+ real_size
);
633 * Modify MMU table for a region with updated PXN/UXN/Memory type/valid bits.
634 * The procecess is break-before-make. The target region will be marked as
635 * invalid during the process of changing.
637 void mmu_change_region_attr(phys_addr_t addr
, size_t siz
, u64 attrs
)
645 * Loop through the address range until we find a page granule that fits
646 * our alignment constraints, then set it to "invalid".
649 for (level
= 1; level
< 4; level
++) {
650 /* Set PTE to fault */
651 r
= set_one_region(start
, size
, PTE_TYPE_FAULT
, true,
654 /* PTE successfully invalidated */
662 flush_dcache_range(gd
->arch
.tlb_addr
,
663 gd
->arch
.tlb_addr
+ gd
->arch
.tlb_size
);
664 __asm_invalidate_tlb_all();
667 * Loop through the address range until we find a page granule that fits
668 * our alignment constraints, then set it to the new cache attributes
673 for (level
= 1; level
< 4; level
++) {
674 /* Set PTE to new attributes */
675 r
= set_one_region(start
, size
, attrs
, true, level
);
677 /* PTE successfully updated */
684 flush_dcache_range(gd
->arch
.tlb_addr
,
685 gd
->arch
.tlb_addr
+ gd
->arch
.tlb_size
);
686 __asm_invalidate_tlb_all();
689 #else /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
692 * For SPL builds, we may want to not have dcache enabled. Any real U-Boot
693 * running however really wants to have dcache and the MMU active. Check that
694 * everything is sane and give the developer a hint if it isn't.
696 #ifndef CONFIG_SPL_BUILD
697 #error Please describe your MMU layout in CONFIG_SYS_MEM_MAP and enable dcache.
700 void invalidate_dcache_all(void)
704 void flush_dcache_all(void)
708 void dcache_enable(void)
712 void dcache_disable(void)
716 int dcache_status(void)
721 void mmu_set_region_dcache_behaviour(phys_addr_t start
, size_t size
,
722 enum dcache_option option
)
726 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
728 #if !CONFIG_IS_ENABLED(SYS_ICACHE_OFF)
730 void icache_enable(void)
732 invalidate_icache_all();
733 set_sctlr(get_sctlr() | CR_I
);
736 void icache_disable(void)
738 set_sctlr(get_sctlr() & ~CR_I
);
741 int icache_status(void)
743 return (get_sctlr() & CR_I
) != 0;
748 return (get_sctlr() & CR_M
) != 0;
751 void invalidate_icache_all(void)
753 __asm_invalidate_icache_all();
754 __asm_invalidate_l3_icache();
757 #else /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
759 void icache_enable(void)
763 void icache_disable(void)
767 int icache_status(void)
777 void invalidate_icache_all(void)
781 #endif /* !CONFIG_IS_ENABLED(SYS_ICACHE_OFF) */
784 * Enable dCache & iCache, whether cache is actually enabled
785 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
787 void __weak
enable_caches(void)