]>
git.ipfire.org Git - people/ms/u-boot.git/blob - arch/arm/cpu/armv8/cache_v8.c
3 * David Feng <fenghua@phytium.com.cn>
5 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
10 #include <asm/armv8/mmu.h>
12 DECLARE_GLOBAL_DATA_PTR
;
14 #ifndef CONFIG_SYS_DCACHE_OFF
16 #ifdef CONFIG_SYS_FULL_VA
17 static void set_ptl1_entry(u64 index
, u64 ptl2_entry
)
19 u64
*pgd
= (u64
*)gd
->arch
.tlb_addr
;
22 value
= ptl2_entry
| PTL1_TYPE_TABLE
;
26 static void set_ptl2_block(u64 ptl1
, u64 bfn
, u64 address
, u64 memory_attrs
)
28 u64
*pmd
= (u64
*)ptl1
;
31 value
= address
| PTL2_TYPE_BLOCK
| PTL2_BLOCK_AF
;
32 value
|= memory_attrs
;
36 static struct mm_region mem_map
[] = CONFIG_SYS_MEM_MAP
;
38 #define PTL1_ENTRIES CONFIG_SYS_PTL1_ENTRIES
39 #define PTL2_ENTRIES CONFIG_SYS_PTL2_ENTRIES
41 static void setup_pgtables(void)
44 unsigned long pmd
= 0;
45 unsigned long address
;
47 /* Setup the PMD pointers */
48 for (l1_e
= 0; l1_e
< CONFIG_SYS_MEM_MAP_SIZE
; l1_e
++) {
49 gd
->arch
.pmd_addr
[l1_e
] = gd
->arch
.tlb_addr
+
50 PTL1_ENTRIES
* sizeof(u64
);
51 gd
->arch
.pmd_addr
[l1_e
] += PTL2_ENTRIES
* sizeof(u64
) * l1_e
;
52 gd
->arch
.pmd_addr
[l1_e
] = ALIGN(gd
->arch
.pmd_addr
[l1_e
],
56 /* Setup the page tables */
57 for (l1_e
= 0; l1_e
< PTL1_ENTRIES
; l1_e
++) {
58 if (mem_map
[pmd
].base
==
59 (uintptr_t)l1_e
<< PTL2_BITS
) {
60 set_ptl1_entry(l1_e
, gd
->arch
.pmd_addr
[pmd
]);
62 for (l2_e
= 0; l2_e
< PTL2_ENTRIES
; l2_e
++) {
63 address
= mem_map
[pmd
].base
64 + (uintptr_t)l2_e
* BLOCK_SIZE
;
65 set_ptl2_block(gd
->arch
.pmd_addr
[pmd
], l2_e
,
66 address
, mem_map
[pmd
].attrs
);
71 set_ptl1_entry(l1_e
, 0);
78 inline void set_pgtable_section(u64
*page_table
, u64 index
, u64 section
,
79 u64 memory_type
, u64 attribute
)
83 value
= section
| PMD_TYPE_SECT
| PMD_SECT_AF
;
84 value
|= PMD_ATTRINDX(memory_type
);
86 page_table
[index
] = value
;
89 inline void set_pgtable_table(u64
*page_table
, u64 index
, u64
*table_addr
)
93 value
= (u64
)table_addr
| PMD_TYPE_TABLE
;
94 page_table
[index
] = value
;
98 /* to activate the MMU we need to set up virtual memory */
99 __weak
void mmu_setup(void)
101 #ifndef CONFIG_SYS_FULL_VA
103 u64
*page_table
= (u64
*)gd
->arch
.tlb_addr
, i
, j
;
107 #ifdef CONFIG_SYS_FULL_VA
108 unsigned long coreid
= read_mpidr() & CONFIG_COREID_MASK
;
110 /* Set up page tables only on BSP */
111 if (coreid
== BSP_COREID
)
114 /* Setup an identity-mapping for all spaces */
115 for (i
= 0; i
< (PGTABLE_SIZE
>> 3); i
++) {
116 set_pgtable_section(page_table
, i
, i
<< SECTION_SHIFT
,
117 MT_DEVICE_NGNRNE
, PMD_SECT_NON_SHARE
);
120 /* Setup an identity-mapping for all RAM space */
121 for (i
= 0; i
< CONFIG_NR_DRAM_BANKS
; i
++) {
122 ulong start
= bd
->bi_dram
[i
].start
;
123 ulong end
= bd
->bi_dram
[i
].start
+ bd
->bi_dram
[i
].size
;
124 for (j
= start
>> SECTION_SHIFT
;
125 j
< end
>> SECTION_SHIFT
; j
++) {
126 set_pgtable_section(page_table
, j
, j
<< SECTION_SHIFT
,
127 MT_NORMAL
, PMD_SECT_NON_SHARE
);
135 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
,
136 TCR_EL1_RSVD
| TCR_FLAGS
| TCR_EL1_IPS_BITS
,
138 } else if (el
== 2) {
139 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
,
140 TCR_EL2_RSVD
| TCR_FLAGS
| TCR_EL2_IPS_BITS
,
143 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
,
144 TCR_EL3_RSVD
| TCR_FLAGS
| TCR_EL3_IPS_BITS
,
148 set_sctlr(get_sctlr() | CR_M
);
152 * Performs a invalidation of the entire data cache at all levels
154 void invalidate_dcache_all(void)
156 __asm_invalidate_dcache_all();
160 * Performs a clean & invalidation of the entire data cache at all levels.
161 * This function needs to be inline to avoid using stack.
162 * __asm_flush_l3_cache return status of timeout
164 inline void flush_dcache_all(void)
168 __asm_flush_dcache_all();
169 ret
= __asm_flush_l3_cache();
171 debug("flushing dcache returns 0x%x\n", ret
);
173 debug("flushing dcache successfully.\n");
177 * Invalidates range in all levels of D-cache/unified cache
179 void invalidate_dcache_range(unsigned long start
, unsigned long stop
)
181 __asm_flush_dcache_range(start
, stop
);
185 * Flush range(clean & invalidate) from all levels of D-cache/unified cache
187 void flush_dcache_range(unsigned long start
, unsigned long stop
)
189 __asm_flush_dcache_range(start
, stop
);
192 void dcache_enable(void)
194 /* The data cache is not active unless the mmu is enabled */
195 if (!(get_sctlr() & CR_M
)) {
196 invalidate_dcache_all();
197 __asm_invalidate_tlb_all();
201 set_sctlr(get_sctlr() | CR_C
);
204 void dcache_disable(void)
210 /* if cache isn't enabled no need to disable */
214 set_sctlr(sctlr
& ~(CR_C
|CR_M
));
217 __asm_invalidate_tlb_all();
220 int dcache_status(void)
222 return (get_sctlr() & CR_C
) != 0;
225 u64
*__weak
arch_get_page_table(void) {
226 puts("No page table offset defined\n");
231 #ifndef CONFIG_SYS_FULL_VA
232 void mmu_set_region_dcache_behaviour(phys_addr_t start
, size_t size
,
233 enum dcache_option option
)
235 u64
*page_table
= arch_get_page_table();
238 if (page_table
== NULL
)
241 end
= ALIGN(start
+ size
, (1 << MMU_SECTION_SHIFT
)) >>
243 start
= start
>> MMU_SECTION_SHIFT
;
244 for (upto
= start
; upto
< end
; upto
++) {
245 page_table
[upto
] &= ~PMD_ATTRINDX_MASK
;
246 page_table
[upto
] |= PMD_ATTRINDX(option
);
248 asm volatile("dsb sy");
249 __asm_invalidate_tlb_all();
250 asm volatile("dsb sy");
252 start
= start
<< MMU_SECTION_SHIFT
;
253 end
= end
<< MMU_SECTION_SHIFT
;
254 flush_dcache_range(start
, end
);
255 asm volatile("dsb sy");
259 #else /* CONFIG_SYS_DCACHE_OFF */
261 void invalidate_dcache_all(void)
265 void flush_dcache_all(void)
269 void dcache_enable(void)
273 void dcache_disable(void)
277 int dcache_status(void)
282 void mmu_set_region_dcache_behaviour(phys_addr_t start
, size_t size
,
283 enum dcache_option option
)
287 #endif /* CONFIG_SYS_DCACHE_OFF */
289 #ifndef CONFIG_SYS_ICACHE_OFF
291 void icache_enable(void)
293 __asm_invalidate_icache_all();
294 set_sctlr(get_sctlr() | CR_I
);
297 void icache_disable(void)
299 set_sctlr(get_sctlr() & ~CR_I
);
302 int icache_status(void)
304 return (get_sctlr() & CR_I
) != 0;
307 void invalidate_icache_all(void)
309 __asm_invalidate_icache_all();
312 #else /* CONFIG_SYS_ICACHE_OFF */
314 void icache_enable(void)
318 void icache_disable(void)
322 int icache_status(void)
327 void invalidate_icache_all(void)
331 #endif /* CONFIG_SYS_ICACHE_OFF */
334 * Enable dCache & iCache, whether cache is actually enabled
335 * depend on CONFIG_SYS_DCACHE_OFF and CONFIG_SYS_ICACHE_OFF
337 void __weak
enable_caches(void)