1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright 2017-2021 NXP
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
8 #include <clock_legacy.h>
16 #include <asm/cache.h>
17 #include <asm/global_data.h>
19 #include <asm/ptrace.h>
20 #include <linux/arm-smccc.h>
21 #include <linux/errno.h>
22 #include <asm/system.h>
24 #include <asm/armv8/mmu.h>
26 #include <asm/arch/fsl_serdes.h>
27 #include <asm/arch/soc.h>
28 #include <asm/arch/cpu.h>
29 #include <asm/arch/speed.h>
30 #include <fsl_immap.h>
31 #include <asm/arch/mp.h>
32 #include <efi_loader.h>
33 #include <fsl-mc/fsl_mc.h>
34 #ifdef CONFIG_FSL_ESDHC
35 #include <fsl_esdhc.h>
37 #include <asm/armv8/sec_firmware.h>
38 #ifdef CONFIG_SYS_FSL_DDR
39 #include <fsl_ddr_sdram.h>
42 #include <asm/arch/clock.h>
44 #include <fsl_qbman.h>
47 #include <env_internal.h>
48 #ifdef CONFIG_CHAIN_OF_TRUST
49 #include <fsl_validate.h>
52 #include <linux/mii.h>
55 DECLARE_GLOBAL_DATA_PTR
;
57 static struct cpu_type cpu_type_list
[] = {
58 CPU_TYPE_ENTRY(LS2080A
, LS2080A
, 8),
59 CPU_TYPE_ENTRY(LS2085A
, LS2085A
, 8),
60 CPU_TYPE_ENTRY(LS2045A
, LS2045A
, 4),
61 CPU_TYPE_ENTRY(LS2088A
, LS2088A
, 8),
62 CPU_TYPE_ENTRY(LS2084A
, LS2084A
, 8),
63 CPU_TYPE_ENTRY(LS2048A
, LS2048A
, 4),
64 CPU_TYPE_ENTRY(LS2044A
, LS2044A
, 4),
65 CPU_TYPE_ENTRY(LS2081A
, LS2081A
, 8),
66 CPU_TYPE_ENTRY(LS2041A
, LS2041A
, 4),
67 CPU_TYPE_ENTRY(LS1043A
, LS1043A
, 4),
68 CPU_TYPE_ENTRY(LS1043A
, LS1043A_P23
, 4),
69 CPU_TYPE_ENTRY(LS1023A
, LS1023A
, 2),
70 CPU_TYPE_ENTRY(LS1023A
, LS1023A_P23
, 2),
71 CPU_TYPE_ENTRY(LS1046A
, LS1046A
, 4),
72 CPU_TYPE_ENTRY(LS1026A
, LS1026A
, 2),
73 CPU_TYPE_ENTRY(LS2040A
, LS2040A
, 4),
74 CPU_TYPE_ENTRY(LS1012A
, LS1012A
, 1),
75 CPU_TYPE_ENTRY(LS1017A
, LS1017A
, 1),
76 CPU_TYPE_ENTRY(LS1018A
, LS1018A
, 1),
77 CPU_TYPE_ENTRY(LS1027A
, LS1027A
, 2),
78 CPU_TYPE_ENTRY(LS1028A
, LS1028A
, 2),
79 CPU_TYPE_ENTRY(LS1088A
, LS1088A
, 8),
80 CPU_TYPE_ENTRY(LS1084A
, LS1084A
, 8),
81 CPU_TYPE_ENTRY(LS1048A
, LS1048A
, 4),
82 CPU_TYPE_ENTRY(LS1044A
, LS1044A
, 4),
83 CPU_TYPE_ENTRY(LX2160A
, LX2160A
, 16),
84 CPU_TYPE_ENTRY(LX2120A
, LX2120A
, 12),
85 CPU_TYPE_ENTRY(LX2080A
, LX2080A
, 8),
86 CPU_TYPE_ENTRY(LX2162A
, LX2162A
, 16),
87 CPU_TYPE_ENTRY(LX2122A
, LX2122A
, 12),
88 CPU_TYPE_ENTRY(LX2082A
, LX2082A
, 8),
91 #define EARLY_PGTABLE_SIZE 0x5000
92 static struct mm_region early_map
[] = {
93 #ifdef CONFIG_FSL_LSCH3
94 { CONFIG_SYS_FSL_CCSR_BASE
, CONFIG_SYS_FSL_CCSR_BASE
,
95 CONFIG_SYS_FSL_CCSR_SIZE
,
96 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
97 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
99 { CONFIG_SYS_FSL_OCRAM_BASE
, CONFIG_SYS_FSL_OCRAM_BASE
,
100 SYS_FSL_OCRAM_SPACE_SIZE
,
101 PTE_BLOCK_MEMTYPE(MT_NORMAL
) | PTE_BLOCK_NON_SHARE
103 { CONFIG_SYS_FSL_QSPI_BASE1
, CONFIG_SYS_FSL_QSPI_BASE1
,
104 CONFIG_SYS_FSL_QSPI_SIZE1
,
105 PTE_BLOCK_MEMTYPE(MT_NORMAL
) | PTE_BLOCK_NON_SHARE
},
106 #ifdef CONFIG_FSL_IFC
107 /* For IFC Region #1, only the first 4MB is cache-enabled */
108 { CONFIG_SYS_FSL_IFC_BASE1
, CONFIG_SYS_FSL_IFC_BASE1
,
109 CONFIG_SYS_FSL_IFC_SIZE1_1
,
110 PTE_BLOCK_MEMTYPE(MT_NORMAL
) | PTE_BLOCK_NON_SHARE
112 { CONFIG_SYS_FSL_IFC_BASE1
+ CONFIG_SYS_FSL_IFC_SIZE1_1
,
113 CONFIG_SYS_FSL_IFC_BASE1
+ CONFIG_SYS_FSL_IFC_SIZE1_1
,
114 CONFIG_SYS_FSL_IFC_SIZE1
- CONFIG_SYS_FSL_IFC_SIZE1_1
,
115 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_NON_SHARE
117 { CONFIG_SYS_FLASH_BASE
, CONFIG_SYS_FSL_IFC_BASE1
,
118 CONFIG_SYS_FSL_IFC_SIZE1
,
119 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_NON_SHARE
122 { CONFIG_SYS_FSL_DRAM_BASE1
, CONFIG_SYS_FSL_DRAM_BASE1
,
123 CONFIG_SYS_FSL_DRAM_SIZE1
,
124 #if defined(CONFIG_TFABOOT) || \
125 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
126 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
127 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
128 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_PXN
| PTE_BLOCK_UXN
|
130 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
132 #ifdef CONFIG_FSL_IFC
133 /* Map IFC region #2 up to CONFIG_SYS_FLASH_BASE for NAND boot */
134 { CONFIG_SYS_FSL_IFC_BASE2
, CONFIG_SYS_FSL_IFC_BASE2
,
135 CONFIG_SYS_FLASH_BASE
- CONFIG_SYS_FSL_IFC_BASE2
,
136 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_NON_SHARE
139 { CONFIG_SYS_FSL_DCSR_BASE
, CONFIG_SYS_FSL_DCSR_BASE
,
140 CONFIG_SYS_FSL_DCSR_SIZE
,
141 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
142 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
144 { CONFIG_SYS_FSL_DRAM_BASE2
, CONFIG_SYS_FSL_DRAM_BASE2
,
145 CONFIG_SYS_FSL_DRAM_SIZE2
,
146 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_PXN
| PTE_BLOCK_UXN
|
147 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
149 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
150 { CONFIG_SYS_FSL_DRAM_BASE3
, CONFIG_SYS_FSL_DRAM_BASE3
,
151 CONFIG_SYS_FSL_DRAM_SIZE3
,
152 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_PXN
| PTE_BLOCK_UXN
|
153 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
156 #elif defined(CONFIG_FSL_LSCH2)
157 { CONFIG_SYS_FSL_CCSR_BASE
, CONFIG_SYS_FSL_CCSR_BASE
,
158 CONFIG_SYS_FSL_CCSR_SIZE
,
159 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
160 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
162 { CONFIG_SYS_FSL_OCRAM_BASE
, CONFIG_SYS_FSL_OCRAM_BASE
,
163 SYS_FSL_OCRAM_SPACE_SIZE
,
164 PTE_BLOCK_MEMTYPE(MT_NORMAL
) | PTE_BLOCK_NON_SHARE
166 { CONFIG_SYS_FSL_DCSR_BASE
, CONFIG_SYS_FSL_DCSR_BASE
,
167 CONFIG_SYS_FSL_DCSR_SIZE
,
168 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
169 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
171 { CONFIG_SYS_FSL_QSPI_BASE
, CONFIG_SYS_FSL_QSPI_BASE
,
172 CONFIG_SYS_FSL_QSPI_SIZE
,
173 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_NON_SHARE
175 #ifdef CONFIG_FSL_IFC
176 { CONFIG_SYS_FSL_IFC_BASE
, CONFIG_SYS_FSL_IFC_BASE
,
177 CONFIG_SYS_FSL_IFC_SIZE
,
178 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_NON_SHARE
181 { CONFIG_SYS_FSL_DRAM_BASE1
, CONFIG_SYS_FSL_DRAM_BASE1
,
182 CONFIG_SYS_FSL_DRAM_SIZE1
,
183 #if defined(CONFIG_TFABOOT) || \
184 (defined(CONFIG_SPL) && !defined(CONFIG_SPL_BUILD))
185 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
186 #else /* Start with nGnRnE and PXN and UXN to prevent speculative access */
187 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_PXN
| PTE_BLOCK_UXN
|
189 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
191 { CONFIG_SYS_FSL_DRAM_BASE2
, CONFIG_SYS_FSL_DRAM_BASE2
,
192 CONFIG_SYS_FSL_DRAM_SIZE2
,
193 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_PXN
| PTE_BLOCK_UXN
|
194 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
197 {}, /* list terminator */
200 static struct mm_region final_map
[] = {
201 #ifdef CONFIG_FSL_LSCH3
202 { CONFIG_SYS_FSL_CCSR_BASE
, CONFIG_SYS_FSL_CCSR_BASE
,
203 CONFIG_SYS_FSL_CCSR_SIZE
,
204 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
205 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
207 { CONFIG_SYS_FSL_OCRAM_BASE
, CONFIG_SYS_FSL_OCRAM_BASE
,
208 SYS_FSL_OCRAM_SPACE_SIZE
,
209 PTE_BLOCK_MEMTYPE(MT_NORMAL
) | PTE_BLOCK_NON_SHARE
211 { CONFIG_SYS_FSL_DRAM_BASE1
, CONFIG_SYS_FSL_DRAM_BASE1
,
212 CONFIG_SYS_FSL_DRAM_SIZE1
,
213 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
214 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
216 { CONFIG_SYS_FSL_QSPI_BASE1
, CONFIG_SYS_FSL_QSPI_BASE1
,
217 CONFIG_SYS_FSL_QSPI_SIZE1
,
218 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
219 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
221 { CONFIG_SYS_FSL_QSPI_BASE2
, CONFIG_SYS_FSL_QSPI_BASE2
,
222 CONFIG_SYS_FSL_QSPI_SIZE2
,
223 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
224 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
226 #ifdef CONFIG_FSL_IFC
227 { CONFIG_SYS_FSL_IFC_BASE2
, CONFIG_SYS_FSL_IFC_BASE2
,
228 CONFIG_SYS_FSL_IFC_SIZE2
,
229 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
230 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
233 { CONFIG_SYS_FSL_DCSR_BASE
, CONFIG_SYS_FSL_DCSR_BASE
,
234 CONFIG_SYS_FSL_DCSR_SIZE
,
235 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
236 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
238 { CONFIG_SYS_FSL_MC_BASE
, CONFIG_SYS_FSL_MC_BASE
,
239 CONFIG_SYS_FSL_MC_SIZE
,
240 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
241 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
243 { CONFIG_SYS_FSL_NI_BASE
, CONFIG_SYS_FSL_NI_BASE
,
244 CONFIG_SYS_FSL_NI_SIZE
,
245 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
246 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
248 /* For QBMAN portal, only the first 64MB is cache-enabled */
249 { CONFIG_SYS_FSL_QBMAN_BASE
, CONFIG_SYS_FSL_QBMAN_BASE
,
250 CONFIG_SYS_FSL_QBMAN_SIZE_1
,
251 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
252 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
| PTE_BLOCK_NS
254 { CONFIG_SYS_FSL_QBMAN_BASE
+ CONFIG_SYS_FSL_QBMAN_SIZE_1
,
255 CONFIG_SYS_FSL_QBMAN_BASE
+ CONFIG_SYS_FSL_QBMAN_SIZE_1
,
256 CONFIG_SYS_FSL_QBMAN_SIZE
- CONFIG_SYS_FSL_QBMAN_SIZE_1
,
257 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
258 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
260 { CONFIG_SYS_PCIE1_PHYS_ADDR
, CONFIG_SYS_PCIE1_PHYS_ADDR
,
261 CONFIG_SYS_PCIE1_PHYS_SIZE
,
262 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
263 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
265 { CONFIG_SYS_PCIE2_PHYS_ADDR
, CONFIG_SYS_PCIE2_PHYS_ADDR
,
266 CONFIG_SYS_PCIE2_PHYS_SIZE
,
267 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
268 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
270 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
271 { CONFIG_SYS_PCIE3_PHYS_ADDR
, CONFIG_SYS_PCIE3_PHYS_ADDR
,
272 CONFIG_SYS_PCIE3_PHYS_SIZE
,
273 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
274 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
277 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
278 { CONFIG_SYS_PCIE4_PHYS_ADDR
, CONFIG_SYS_PCIE4_PHYS_ADDR
,
279 CONFIG_SYS_PCIE4_PHYS_SIZE
,
280 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
281 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
284 #ifdef SYS_PCIE5_PHYS_ADDR
285 { SYS_PCIE5_PHYS_ADDR
, SYS_PCIE5_PHYS_ADDR
,
287 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
288 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
291 #ifdef SYS_PCIE6_PHYS_ADDR
292 { SYS_PCIE6_PHYS_ADDR
, SYS_PCIE6_PHYS_ADDR
,
294 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
295 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
298 { CONFIG_SYS_FSL_WRIOP1_BASE
, CONFIG_SYS_FSL_WRIOP1_BASE
,
299 CONFIG_SYS_FSL_WRIOP1_SIZE
,
300 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
301 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
303 { CONFIG_SYS_FSL_AIOP1_BASE
, CONFIG_SYS_FSL_AIOP1_BASE
,
304 CONFIG_SYS_FSL_AIOP1_SIZE
,
305 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
306 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
308 { CONFIG_SYS_FSL_PEBUF_BASE
, CONFIG_SYS_FSL_PEBUF_BASE
,
309 CONFIG_SYS_FSL_PEBUF_SIZE
,
310 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
311 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
313 { CONFIG_SYS_FSL_DRAM_BASE2
, CONFIG_SYS_FSL_DRAM_BASE2
,
314 CONFIG_SYS_FSL_DRAM_SIZE2
,
315 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
316 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
318 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
319 { CONFIG_SYS_FSL_DRAM_BASE3
, CONFIG_SYS_FSL_DRAM_BASE3
,
320 CONFIG_SYS_FSL_DRAM_SIZE3
,
321 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
322 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
325 #elif defined(CONFIG_FSL_LSCH2)
326 { CONFIG_SYS_FSL_BOOTROM_BASE
, CONFIG_SYS_FSL_BOOTROM_BASE
,
327 CONFIG_SYS_FSL_BOOTROM_SIZE
,
328 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
329 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
331 { CONFIG_SYS_FSL_CCSR_BASE
, CONFIG_SYS_FSL_CCSR_BASE
,
332 CONFIG_SYS_FSL_CCSR_SIZE
,
333 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
334 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
336 { CONFIG_SYS_FSL_OCRAM_BASE
, CONFIG_SYS_FSL_OCRAM_BASE
,
337 SYS_FSL_OCRAM_SPACE_SIZE
,
338 PTE_BLOCK_MEMTYPE(MT_NORMAL
) | PTE_BLOCK_NON_SHARE
340 { CONFIG_SYS_FSL_DCSR_BASE
, CONFIG_SYS_FSL_DCSR_BASE
,
341 CONFIG_SYS_FSL_DCSR_SIZE
,
342 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
343 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
345 { CONFIG_SYS_FSL_QSPI_BASE
, CONFIG_SYS_FSL_QSPI_BASE
,
346 CONFIG_SYS_FSL_QSPI_SIZE
,
347 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
348 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
350 #ifdef CONFIG_FSL_IFC
351 { CONFIG_SYS_FSL_IFC_BASE
, CONFIG_SYS_FSL_IFC_BASE
,
352 CONFIG_SYS_FSL_IFC_SIZE
,
353 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) | PTE_BLOCK_NON_SHARE
356 { CONFIG_SYS_FSL_DRAM_BASE1
, CONFIG_SYS_FSL_DRAM_BASE1
,
357 CONFIG_SYS_FSL_DRAM_SIZE1
,
358 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
359 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
361 { CONFIG_SYS_FSL_QBMAN_BASE
, CONFIG_SYS_FSL_QBMAN_BASE
,
362 CONFIG_SYS_FSL_QBMAN_SIZE
,
363 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
364 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
366 { CONFIG_SYS_FSL_DRAM_BASE2
, CONFIG_SYS_FSL_DRAM_BASE2
,
367 CONFIG_SYS_FSL_DRAM_SIZE2
,
368 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
369 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
371 { CONFIG_SYS_PCIE1_PHYS_ADDR
, CONFIG_SYS_PCIE1_PHYS_ADDR
,
372 CONFIG_SYS_PCIE1_PHYS_SIZE
,
373 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
374 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
376 { CONFIG_SYS_PCIE2_PHYS_ADDR
, CONFIG_SYS_PCIE2_PHYS_ADDR
,
377 CONFIG_SYS_PCIE2_PHYS_SIZE
,
378 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
379 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
381 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
382 { CONFIG_SYS_PCIE3_PHYS_ADDR
, CONFIG_SYS_PCIE3_PHYS_ADDR
,
383 CONFIG_SYS_PCIE3_PHYS_SIZE
,
384 PTE_BLOCK_MEMTYPE(MT_DEVICE_NGNRNE
) |
385 PTE_BLOCK_NON_SHARE
| PTE_BLOCK_PXN
| PTE_BLOCK_UXN
388 { CONFIG_SYS_FSL_DRAM_BASE3
, CONFIG_SYS_FSL_DRAM_BASE3
,
389 CONFIG_SYS_FSL_DRAM_SIZE3
,
390 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
391 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
394 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
395 {}, /* space holder for secure mem */
400 struct mm_region
*mem_map
= early_map
;
402 void cpu_name(char *name
)
404 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
405 unsigned int i
, svr
, ver
;
407 svr
= gur_in32(&gur
->svr
);
408 ver
= SVR_SOC_VER(svr
);
410 for (i
= 0; i
< ARRAY_SIZE(cpu_type_list
); i
++)
411 if ((cpu_type_list
[i
].soc_ver
& SVR_WO_E
) == ver
) {
412 strcpy(name
, cpu_type_list
[i
].name
);
413 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
414 if (IS_C_PROCESSOR(svr
))
418 if (IS_E_PROCESSOR(svr
))
421 sprintf(name
+ strlen(name
), " Rev%d.%d",
422 SVR_MAJ(svr
), SVR_MIN(svr
));
426 if (i
== ARRAY_SIZE(cpu_type_list
))
427 strcpy(name
, "unknown");
430 #if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF)
432 * To start MMU before DDR is available, we create MMU table in SRAM.
433 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
434 * levels of translation tables here to cover 40-bit address space.
435 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
436 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
437 * Note, the debug print in cache_v8.c is not usable for debugging
438 * these early MMU tables because UART is not yet available.
440 static inline void early_mmu_setup(void)
442 unsigned int el
= current_el();
444 /* global data is already setup, no allocation yet */
446 gd
->arch
.tlb_addr
= CONFIG_SYS_FSL_OCRAM_BASE
;
448 gd
->arch
.tlb_addr
= CONFIG_SYS_DDR_SDRAM_BASE
;
449 gd
->arch
.tlb_fillptr
= gd
->arch
.tlb_addr
;
450 gd
->arch
.tlb_size
= EARLY_PGTABLE_SIZE
;
452 /* Create early page tables */
455 /* point TTBR to the new table */
456 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
,
457 get_tcr(el
, NULL
, NULL
) &
458 ~(TCR_ORGN_MASK
| TCR_IRGN_MASK
),
461 set_sctlr(get_sctlr() | CR_M
);
464 static void fix_pcie_mmu_map(void)
466 #ifdef CONFIG_ARCH_LS2080A
469 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
471 svr
= gur_in32(&gur
->svr
);
472 ver
= SVR_SOC_VER(svr
);
474 /* Fix PCIE base and size for LS2088A */
475 if ((ver
== SVR_LS2088A
) || (ver
== SVR_LS2084A
) ||
476 (ver
== SVR_LS2048A
) || (ver
== SVR_LS2044A
) ||
477 (ver
== SVR_LS2081A
) || (ver
== SVR_LS2041A
)) {
478 for (i
= 0; i
< ARRAY_SIZE(final_map
); i
++) {
479 switch (final_map
[i
].phys
) {
480 case CONFIG_SYS_PCIE1_PHYS_ADDR
:
481 final_map
[i
].phys
= 0x2000000000ULL
;
482 final_map
[i
].virt
= 0x2000000000ULL
;
483 final_map
[i
].size
= 0x800000000ULL
;
485 case CONFIG_SYS_PCIE2_PHYS_ADDR
:
486 final_map
[i
].phys
= 0x2800000000ULL
;
487 final_map
[i
].virt
= 0x2800000000ULL
;
488 final_map
[i
].size
= 0x800000000ULL
;
490 #ifdef CONFIG_SYS_PCIE3_PHYS_ADDR
491 case CONFIG_SYS_PCIE3_PHYS_ADDR
:
492 final_map
[i
].phys
= 0x3000000000ULL
;
493 final_map
[i
].virt
= 0x3000000000ULL
;
494 final_map
[i
].size
= 0x800000000ULL
;
497 #ifdef CONFIG_SYS_PCIE4_PHYS_ADDR
498 case CONFIG_SYS_PCIE4_PHYS_ADDR
:
499 final_map
[i
].phys
= 0x3800000000ULL
;
500 final_map
[i
].virt
= 0x3800000000ULL
;
501 final_map
[i
].size
= 0x800000000ULL
;
513 * The final tables look similar to early tables, but different in detail.
514 * These tables are in DRAM. Sub tables are added to enable cache for
517 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
518 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
520 static inline void final_mmu_setup(void)
522 u64 tlb_addr_save
= gd
->arch
.tlb_addr
;
523 unsigned int el
= current_el();
526 /* fix the final_map before filling in the block entries */
531 /* Update mapping for DDR to actual size */
532 for (index
= 0; index
< ARRAY_SIZE(final_map
) - 2; index
++) {
534 * Find the entry for DDR mapping and update the address and
535 * size. Zero-sized mapping will be skipped when creating MMU
538 switch (final_map
[index
].virt
) {
539 case CONFIG_SYS_FSL_DRAM_BASE1
:
540 final_map
[index
].virt
= gd
->bd
->bi_dram
[0].start
;
541 final_map
[index
].phys
= gd
->bd
->bi_dram
[0].start
;
542 final_map
[index
].size
= gd
->bd
->bi_dram
[0].size
;
544 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
545 case CONFIG_SYS_FSL_DRAM_BASE2
:
546 #if (CONFIG_NR_DRAM_BANKS >= 2)
547 final_map
[index
].virt
= gd
->bd
->bi_dram
[1].start
;
548 final_map
[index
].phys
= gd
->bd
->bi_dram
[1].start
;
549 final_map
[index
].size
= gd
->bd
->bi_dram
[1].size
;
551 final_map
[index
].size
= 0;
555 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
556 case CONFIG_SYS_FSL_DRAM_BASE3
:
557 #if (CONFIG_NR_DRAM_BANKS >= 3)
558 final_map
[index
].virt
= gd
->bd
->bi_dram
[2].start
;
559 final_map
[index
].phys
= gd
->bd
->bi_dram
[2].start
;
560 final_map
[index
].size
= gd
->bd
->bi_dram
[2].size
;
562 final_map
[index
].size
= 0;
571 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
572 if (gd
->arch
.secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
) {
575 * Only use gd->arch.secure_ram if the address is
576 * recalculated. Align to 4KB for MMU table.
578 /* put page tables in secure ram */
579 index
= ARRAY_SIZE(final_map
) - 2;
580 gd
->arch
.tlb_addr
= gd
->arch
.secure_ram
& ~0xfff;
581 final_map
[index
].virt
= gd
->arch
.secure_ram
& ~0x3;
582 final_map
[index
].phys
= final_map
[index
].virt
;
583 final_map
[index
].size
= CONFIG_SYS_MEM_RESERVE_SECURE
;
584 final_map
[index
].attrs
= PTE_BLOCK_OUTER_SHARE
;
585 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_SECURED
;
586 tlb_addr_save
= gd
->arch
.tlb_addr
;
588 /* Use allocated (board_f.c) memory for TLB */
589 tlb_addr_save
= gd
->arch
.tlb_allocated
;
590 gd
->arch
.tlb_addr
= tlb_addr_save
;
595 /* Reset the fill ptr */
596 gd
->arch
.tlb_fillptr
= tlb_addr_save
;
598 /* Create normal system page tables */
601 /* Create emergency page tables */
602 gd
->arch
.tlb_addr
= gd
->arch
.tlb_fillptr
;
603 gd
->arch
.tlb_emerg
= gd
->arch
.tlb_addr
;
605 gd
->arch
.tlb_addr
= tlb_addr_save
;
607 /* Disable cache and MMU */
608 dcache_disable(); /* TLBs are invalidated */
609 invalidate_icache_all();
611 /* point TTBR to the new table */
612 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
, get_tcr(el
, NULL
, NULL
),
615 set_sctlr(get_sctlr() | CR_M
);
618 u64
get_page_table_size(void)
623 int arch_cpu_init(void)
626 * This function is called before U-Boot relocates itself to speed up
627 * on system running. It is not necessary to run if performance is not
628 * critical. Skip if MMU is already enabled by SPL or other means.
630 if (get_sctlr() & CR_M
)
634 __asm_invalidate_dcache_all();
635 __asm_invalidate_tlb_all();
637 set_sctlr(get_sctlr() | CR_C
);
647 * This function is called from common/board_r.c.
648 * It recreates MMU table in main memory.
650 void enable_caches(void)
653 __asm_invalidate_tlb_all();
657 #endif /* !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) */
659 #ifdef CONFIG_TFABOOT
660 enum boot_src
__get_boot_src(u32 porsr1
)
662 enum boot_src src
= BOOT_SOURCE_RESERVED
;
663 u32 rcw_src
= (porsr1
& RCW_SRC_MASK
) >> RCW_SRC_BIT
;
664 #if !defined(CONFIG_NXP_LSCH3_2)
667 debug("%s: rcw_src 0x%x\n", __func__
, rcw_src
);
669 #if defined(CONFIG_FSL_LSCH3)
670 #if defined(CONFIG_NXP_LSCH3_2)
672 case RCW_SRC_SDHC1_VAL
:
673 src
= BOOT_SOURCE_SD_MMC
;
675 case RCW_SRC_SDHC2_VAL
:
676 src
= BOOT_SOURCE_SD_MMC2
;
678 case RCW_SRC_I2C1_VAL
:
679 src
= BOOT_SOURCE_I2C1_EXTENDED
;
681 case RCW_SRC_FLEXSPI_NAND2K_VAL
:
682 src
= BOOT_SOURCE_XSPI_NAND
;
684 case RCW_SRC_FLEXSPI_NAND4K_VAL
:
685 src
= BOOT_SOURCE_XSPI_NAND
;
687 case RCW_SRC_RESERVED_1_VAL
:
688 src
= BOOT_SOURCE_RESERVED
;
690 case RCW_SRC_FLEXSPI_NOR_24B
:
691 src
= BOOT_SOURCE_XSPI_NOR
;
694 src
= BOOT_SOURCE_RESERVED
;
697 val
= rcw_src
& RCW_SRC_TYPE_MASK
;
698 if (val
== RCW_SRC_NOR_VAL
) {
699 val
= rcw_src
& NOR_TYPE_MASK
;
704 src
= BOOT_SOURCE_IFC_NOR
;
707 src
= BOOT_SOURCE_RESERVED
;
710 /* RCW SRC Serial Flash */
711 val
= rcw_src
& RCW_SRC_SERIAL_MASK
;
713 case RCW_SRC_QSPI_VAL
:
714 /* RCW SRC Serial NOR (QSPI) */
715 src
= BOOT_SOURCE_QSPI_NOR
;
717 case RCW_SRC_SD_CARD_VAL
:
718 /* RCW SRC SD Card */
719 src
= BOOT_SOURCE_SD_MMC
;
721 case RCW_SRC_EMMC_VAL
:
723 src
= BOOT_SOURCE_SD_MMC
;
725 case RCW_SRC_I2C1_VAL
:
726 /* RCW SRC I2C1 Extended */
727 src
= BOOT_SOURCE_I2C1_EXTENDED
;
730 src
= BOOT_SOURCE_RESERVED
;
734 #elif defined(CONFIG_FSL_LSCH2)
736 val
= rcw_src
& RCW_SRC_NAND_MASK
;
737 if (val
== RCW_SRC_NAND_VAL
) {
738 val
= rcw_src
& NAND_RESERVED_MASK
;
739 if (val
!= NAND_RESERVED_1
&& val
!= NAND_RESERVED_2
)
740 src
= BOOT_SOURCE_IFC_NAND
;
744 val
= rcw_src
& RCW_SRC_NOR_MASK
;
745 if (val
== NOR_8B_VAL
|| val
== NOR_16B_VAL
) {
746 src
= BOOT_SOURCE_IFC_NOR
;
751 src
= BOOT_SOURCE_QSPI_NOR
;
754 src
= BOOT_SOURCE_SD_MMC
;
757 src
= BOOT_SOURCE_RESERVED
;
763 if (CONFIG_IS_ENABLED(SYS_FSL_ERRATUM_A010539
) && !rcw_src
)
764 src
= BOOT_SOURCE_QSPI_NOR
;
766 debug("%s: src 0x%x\n", __func__
, src
);
770 enum boot_src
get_boot_src(void)
772 struct arm_smccc_res res
;
775 #if defined(CONFIG_FSL_LSCH3)
776 u32 __iomem
*dcfg_ccsr
= (u32 __iomem
*)DCFG_BASE
;
777 #elif defined(CONFIG_FSL_LSCH2)
778 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
781 if (current_el() == 2) {
782 arm_smccc_smc(SIP_SVC_RCW
, 0, 0, 0, 0, 0, 0, 0, &res
);
787 if (current_el() == 3 || !porsr1
) {
788 #ifdef CONFIG_FSL_LSCH3
789 porsr1
= in_le32(dcfg_ccsr
+ DCFG_PORSR1
/ 4);
790 #elif defined(CONFIG_FSL_LSCH2)
791 porsr1
= in_be32(&gur
->porsr1
);
795 debug("%s: porsr1 0x%x\n", __func__
, porsr1
);
797 return __get_boot_src(porsr1
);
800 #ifdef CONFIG_ENV_IS_IN_MMC
801 int mmc_get_env_dev(void)
803 enum boot_src src
= get_boot_src();
804 int dev
= CONFIG_SYS_MMC_ENV_DEV
;
807 case BOOT_SOURCE_SD_MMC
:
810 case BOOT_SOURCE_SD_MMC2
:
821 enum env_location
arch_env_get_location(enum env_operation op
, int prio
)
823 enum boot_src src
= get_boot_src();
824 enum env_location env_loc
= ENVL_NOWHERE
;
829 #ifdef CONFIG_ENV_IS_NOWHERE
834 case BOOT_SOURCE_IFC_NOR
:
835 env_loc
= ENVL_FLASH
;
837 case BOOT_SOURCE_QSPI_NOR
:
839 case BOOT_SOURCE_XSPI_NOR
:
840 env_loc
= ENVL_SPI_FLASH
;
842 case BOOT_SOURCE_IFC_NAND
:
844 case BOOT_SOURCE_QSPI_NAND
:
846 case BOOT_SOURCE_XSPI_NAND
:
849 case BOOT_SOURCE_SD_MMC
:
851 case BOOT_SOURCE_SD_MMC2
:
854 case BOOT_SOURCE_I2C1_EXTENDED
:
862 #endif /* CONFIG_TFABOOT */
864 u32
initiator_type(u32 cluster
, int init_id
)
866 struct ccsr_gur
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
867 u32 idx
= (cluster
>> (init_id
* 8)) & TP_CLUSTER_INIT_MASK
;
870 type
= gur_in32(&gur
->tp_ityp
[idx
]);
871 if (type
& TP_ITYP_AV
)
877 u32
cpu_pos_mask(void)
879 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
881 u32 cluster
, type
, mask
= 0;
886 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
887 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
888 type
= initiator_type(cluster
, j
);
889 if (type
&& (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
))
890 mask
|= 1 << (i
* TP_INIT_PER_CLUSTER
+ j
);
893 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
900 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
901 int i
= 0, count
= 0;
902 u32 cluster
, type
, mask
= 0;
907 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
908 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
909 type
= initiator_type(cluster
, j
);
911 if (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
)
917 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
923 * Return the number of cores on this SOC.
925 int cpu_numcores(void)
927 return hweight32(cpu_mask());
930 int fsl_qoriq_core_to_cluster(unsigned int core
)
932 struct ccsr_gur __iomem
*gur
=
933 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
934 int i
= 0, count
= 0;
940 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
941 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
942 if (initiator_type(cluster
, j
)) {
949 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
951 return -1; /* cannot identify the cluster */
954 u32
fsl_qoriq_core_to_type(unsigned int core
)
956 struct ccsr_gur __iomem
*gur
=
957 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
958 int i
= 0, count
= 0;
964 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
965 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
966 type
= initiator_type(cluster
, j
);
974 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
976 return -1; /* cannot identify the cluster */
979 #ifndef CONFIG_FSL_LSCH3
982 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
984 return gur_in32(&gur
->svr
);
988 #ifdef CONFIG_DISPLAY_CPUINFO
989 int print_cpuinfo(void)
991 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
992 struct sys_info sysinfo
;
994 unsigned int i
, core
;
995 u32 type
, rcw
, svr
= gur_in32(&gur
->svr
);
1000 printf(" %s (0x%x)\n", buf
, svr
);
1001 memset((u8
*)buf
, 0x00, ARRAY_SIZE(buf
));
1002 get_sys_info(&sysinfo
);
1003 puts("Clock Configuration:");
1004 for_each_cpu(i
, core
, cpu_numcores(), cpu_mask()) {
1007 type
= TP_ITYP_VER(fsl_qoriq_core_to_type(core
));
1008 printf("CPU%d(%s):%-4s MHz ", core
,
1009 type
== TY_ITYP_VER_A7
? "A7 " :
1010 (type
== TY_ITYP_VER_A53
? "A53" :
1011 (type
== TY_ITYP_VER_A57
? "A57" :
1012 (type
== TY_ITYP_VER_A72
? "A72" : " "))),
1013 strmhz(buf
, sysinfo
.freq_processor
[core
]));
1015 /* Display platform clock as Bus frequency. */
1016 printf("\n Bus: %-4s MHz ",
1017 strmhz(buf
, sysinfo
.freq_systembus
/ CONFIG_SYS_FSL_PCLK_DIV
));
1018 printf("DDR: %-4s MT/s", strmhz(buf
, sysinfo
.freq_ddrbus
));
1019 #ifdef CONFIG_SYS_DPAA_FMAN
1020 printf(" FMAN: %-4s MHz", strmhz(buf
, sysinfo
.freq_fman
[0]));
1022 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
1023 if (soc_has_dp_ddr()) {
1024 printf(" DP-DDR: %-4s MT/s",
1025 strmhz(buf
, sysinfo
.freq_ddrbus2
));
1031 * Display the RCW, so that no one gets confused as to what RCW
1032 * we're actually using for this boot.
1034 puts("Reset Configuration Word (RCW):");
1035 for (i
= 0; i
< ARRAY_SIZE(gur
->rcwsr
); i
++) {
1036 rcw
= gur_in32(&gur
->rcwsr
[i
]);
1038 printf("\n %08x:", i
* 4);
1039 printf(" %08x", rcw
);
1047 #ifdef CONFIG_FSL_ESDHC
1048 int cpu_mmc_init(struct bd_info
*bis
)
1050 return fsl_esdhc_mmc_init(bis
);
1054 int cpu_eth_init(struct bd_info
*bis
)
1058 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1059 error
= fsl_mc_ldpaa_init(bis
);
1061 #ifdef CONFIG_FMAN_ENET
1062 fm_standard_init(bis
);
1067 int check_psci(void)
1069 unsigned int psci_ver
;
1071 psci_ver
= sec_firmware_support_psci_version();
1072 if (psci_ver
== PSCI_INVALID_VER
)
1078 static void config_core_prefetch(void)
1081 char buffer
[HWCONFIG_BUFFER_SIZE
];
1082 const char *prefetch_arg
= NULL
;
1083 struct arm_smccc_res res
;
1087 if (env_get_f("hwconfig", buffer
, sizeof(buffer
)) > 0)
1092 prefetch_arg
= hwconfig_subarg_f("core_prefetch", "disable",
1096 mask
= simple_strtoul(prefetch_arg
, NULL
, 0) & 0xff;
1098 printf("Core0 prefetch can't be disabled\n");
1102 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
1103 arm_smccc_smc(SIP_PREFETCH_DISABLE_64
, mask
, 0, 0, 0, 0, 0, 0,
1107 printf("Prefetch disable config failed for mask ");
1109 printf("Prefetch disable config passed for mask ");
1110 printf("0x%x\n", mask
);
1114 #ifdef CONFIG_PCIE_ECAM_GENERIC
1115 __weak
void set_ecam_icids(void)
1120 int arch_early_init_r(void)
1122 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
1125 * erratum A009635 is valid only for LS2080A SoC and
1126 * its personalitiesi
1128 svr_dev_id
= get_svr();
1129 if (IS_SVR_DEV(svr_dev_id
, SVR_DEV(SVR_LS2080A
)))
1132 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
1133 erratum_a009942_check_cpo();
1136 debug("PSCI: PSCI does not exist.\n");
1138 /* if PSCI does not exist, boot secondary cores here */
1139 if (fsl_layerscape_wake_seconday_cores())
1140 printf("Did not wake secondary cores\n");
1143 config_core_prefetch();
1145 #ifdef CONFIG_SYS_HAS_SERDES
1148 #ifdef CONFIG_SYS_FSL_HAS_RGMII
1149 /* some dpmacs in armv8a based freescale layerscape SOCs can be
1150 * configured via both serdes(sgmii, 10gbase-r, xlaui etc) bits and via
1151 * EC*_PMUX(rgmii) bits in RCW.
1152 * e.g. dpmac 17 and 18 in LX2160A can be configured as SGMII from
1153 * serdes bits and as RGMII via EC1_PMUX/EC2_PMUX bits
1154 * Now if a dpmac is enabled as RGMII through ECx_PMUX then it takes
1155 * precedence over SerDes protocol. i.e. in LX2160A if we select serdes
1156 * protocol that configures dpmac17 as SGMII and set the EC1_PMUX as
1157 * RGMII, then the dpmac is RGMII and not SGMII.
1159 * Therefore, even thought fsl_rgmii_init is after fsl_serdes_init
1160 * function of SOC, the dpmac will be enabled as RGMII even if it was
1161 * also enabled before as SGMII. If ECx_PMUX is not configured for
1162 * RGMII, DPMAC will remain configured as SGMII from fsl_serdes_init().
1166 #ifdef CONFIG_FMAN_ENET
1167 #ifndef CONFIG_DM_ETH
1171 #ifdef CONFIG_SYS_DPAA_QBMAN
1172 setup_qbman_portals();
1174 #ifdef CONFIG_PCIE_ECAM_GENERIC
1180 int timer_init(void)
1182 u32 __iomem
*cntcr
= (u32
*)CONFIG_SYS_FSL_TIMER_ADDR
;
1183 #ifdef CONFIG_FSL_LSCH3
1184 u32 __iomem
*cltbenr
= (u32
*)CONFIG_SYS_FSL_PMU_CLTBENR
;
1186 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1187 defined(CONFIG_ARCH_LS1028A)
1188 u32 __iomem
*pctbenr
= (u32
*)FSL_PMU_PCTBENR_OFFSET
;
1191 #ifdef COUNTER_FREQUENCY_REAL
1192 unsigned long cntfrq
= COUNTER_FREQUENCY_REAL
;
1194 /* Update with accurate clock frequency */
1195 if (current_el() == 3)
1196 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq
) : "memory");
1199 #ifdef CONFIG_FSL_LSCH3
1200 /* Enable timebase for all clusters.
1201 * It is safe to do so even some clusters are not enabled.
1203 out_le32(cltbenr
, 0xf);
1206 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A) || \
1207 defined(CONFIG_ARCH_LS1028A)
1209 * In certain Layerscape SoCs, the clock for each core's
1210 * has an enable bit in the PMU Physical Core Time Base Enable
1211 * Register (PCTBENR), which allows the watchdog to operate.
1213 setbits_le32(pctbenr
, 0xff);
1215 * For LS2080A SoC and its personalities, timer controller
1216 * offset is different
1218 svr_dev_id
= get_svr();
1219 if (IS_SVR_DEV(svr_dev_id
, SVR_DEV(SVR_LS2080A
)))
1220 cntcr
= (u32
*)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR
;
1224 /* Enable clock for timer
1225 * This is a global setting.
1227 out_le32(cntcr
, 0x1);
1232 __efi_runtime_data u32 __iomem
*rstcr
= (u32
*)CONFIG_SYS_FSL_RST_ADDR
;
1234 void __efi_runtime
reset_cpu(void)
1236 #if defined(CONFIG_ARCH_LX2160A) || defined(CONFIG_ARCH_LX2162A)
1237 /* clear the RST_REQ_MSK and SW_RST_REQ */
1238 out_le32(rstcr
, 0x0);
1240 /* initiate the sw reset request */
1241 out_le32(rstcr
, 0x1);
1245 /* Raise RESET_REQ_B */
1246 val
= scfg_in32(rstcr
);
1248 scfg_out32(rstcr
, val
);
1252 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_PSCI_RESET)
1254 void __efi_runtime EFIAPI
efi_reset_system(
1255 enum efi_reset_type reset_type
,
1256 efi_status_t reset_status
,
1257 unsigned long data_size
, void *reset_data
)
1259 switch (reset_type
) {
1260 case EFI_RESET_COLD
:
1261 case EFI_RESET_WARM
:
1262 case EFI_RESET_PLATFORM_SPECIFIC
:
1265 case EFI_RESET_SHUTDOWN
:
1266 /* Nothing we can do */
1273 efi_status_t
efi_reset_system_init(void)
1275 return efi_add_runtime_mmio(&rstcr
, sizeof(*rstcr
));
1281 * Calculate reserved memory with given memory bank
1282 * Return aligned memory size on success
1283 * Return (ram_size + needed size) for failure
1285 phys_size_t
board_reserve_ram_top(phys_size_t ram_size
)
1287 phys_size_t ram_top
= ram_size
;
1289 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
1290 ram_top
= mc_get_dram_block_size();
1291 if (ram_top
> ram_size
)
1292 return ram_size
+ ram_top
;
1294 ram_top
= ram_size
- ram_top
;
1295 /* The start address of MC reserved memory needs to be aligned. */
1296 ram_top
&= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN
- 1);
1299 return ram_size
- ram_top
;
1302 phys_size_t
get_effective_memsize(void)
1304 phys_size_t ea_size
, rem
= 0;
1307 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
1308 * first region is 2GB space at 0x8000_0000. Secure memory needs to
1309 * allocated from first region. If the memory extends to the second
1310 * region (or the third region if applicable), Management Complex (MC)
1311 * memory should be put into the highest region, i.e. the end of DDR
1312 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
1313 * U-Boot doesn't relocate itself into higher address. Should DDR be
1314 * configured to skip the first region, this function needs to be
1317 if (gd
->ram_size
> CONFIG_MAX_MEM_MAPPED
) {
1318 ea_size
= CONFIG_MAX_MEM_MAPPED
;
1319 rem
= gd
->ram_size
- ea_size
;
1321 ea_size
= gd
->ram_size
;
1324 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1325 /* Check if we have enough space for secure memory */
1326 if (ea_size
> CONFIG_SYS_MEM_RESERVE_SECURE
)
1327 ea_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
1329 printf("Error: No enough space for secure memory.\n");
1331 /* Check if we have enough memory for MC */
1332 if (rem
< board_reserve_ram_top(rem
)) {
1333 /* Not enough memory in high region to reserve */
1334 if (ea_size
> board_reserve_ram_top(ea_size
))
1335 ea_size
-= board_reserve_ram_top(ea_size
);
1337 printf("Error: No enough space for reserved memory.\n");
1343 #ifdef CONFIG_TFABOOT
1344 phys_size_t
tfa_get_dram_size(void)
1346 struct arm_smccc_res res
;
1348 arm_smccc_smc(SMC_DRAM_BANK_INFO
, -1, 0, 0, 0, 0, 0, 0, &res
);
1355 static int tfa_dram_init_banksize(void)
1358 phys_size_t dram_size
= tfa_get_dram_size();
1359 struct arm_smccc_res res
;
1361 debug("dram_size %llx\n", dram_size
);
1367 arm_smccc_smc(SMC_DRAM_BANK_INFO
, i
, 0, 0, 0, 0, 0, 0, &res
);
1373 debug("bank[%d]: start %lx, size %lx\n", i
, res
.a1
, res
.a2
);
1374 gd
->bd
->bi_dram
[i
].start
= res
.a1
;
1375 gd
->bd
->bi_dram
[i
].size
= res
.a2
;
1377 dram_size
-= gd
->bd
->bi_dram
[i
].size
;
1380 } while (dram_size
);
1385 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1386 /* Assign memory for MC */
1387 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1388 if (gd
->bd
->bi_dram
[2].size
>=
1389 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
)) {
1390 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[2].start
+
1391 gd
->bd
->bi_dram
[2].size
-
1392 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
);
1396 if (gd
->bd
->bi_dram
[1].size
>=
1397 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
)) {
1398 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[1].start
+
1399 gd
->bd
->bi_dram
[1].size
-
1400 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
);
1401 } else if (gd
->bd
->bi_dram
[0].size
>
1402 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
)) {
1403 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[0].start
+
1404 gd
->bd
->bi_dram
[0].size
-
1405 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
);
1408 #endif /* CONFIG_RESV_RAM */
1414 int dram_init_banksize(void)
1416 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1417 phys_size_t dp_ddr_size
;
1420 #ifdef CONFIG_TFABOOT
1421 if (!tfa_dram_init_banksize())
1425 * gd->ram_size has the total size of DDR memory, less reserved secure
1426 * memory. The DDR extends from low region to high region(s) presuming
1427 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
1428 * the location of secure memory. gd->arch.resv_ram tracks the location
1429 * of reserved memory for Management Complex (MC). Because gd->ram_size
1430 * is reduced by this function if secure memory is reserved, checking
1431 * gd->arch.secure_ram should be done to avoid running it repeatedly.
1434 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1435 if (gd
->arch
.secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
) {
1436 debug("No need to run again, skip %s\n", __func__
);
1442 gd
->bd
->bi_dram
[0].start
= CONFIG_SYS_SDRAM_BASE
;
1443 if (gd
->ram_size
> CONFIG_SYS_DDR_BLOCK1_SIZE
) {
1444 gd
->bd
->bi_dram
[0].size
= CONFIG_SYS_DDR_BLOCK1_SIZE
;
1445 gd
->bd
->bi_dram
[1].start
= CONFIG_SYS_DDR_BLOCK2_BASE
;
1446 gd
->bd
->bi_dram
[1].size
= gd
->ram_size
-
1447 CONFIG_SYS_DDR_BLOCK1_SIZE
;
1448 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1449 if (gd
->bi_dram
[1].size
> CONFIG_SYS_DDR_BLOCK2_SIZE
) {
1450 gd
->bd
->bi_dram
[2].start
= CONFIG_SYS_DDR_BLOCK3_BASE
;
1451 gd
->bd
->bi_dram
[2].size
= gd
->bd
->bi_dram
[1].size
-
1452 CONFIG_SYS_DDR_BLOCK2_SIZE
;
1453 gd
->bd
->bi_dram
[1].size
= CONFIG_SYS_DDR_BLOCK2_SIZE
;
1457 gd
->bd
->bi_dram
[0].size
= gd
->ram_size
;
1459 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1460 if (gd
->bd
->bi_dram
[0].size
>
1461 CONFIG_SYS_MEM_RESERVE_SECURE
) {
1462 gd
->bd
->bi_dram
[0].size
-=
1463 CONFIG_SYS_MEM_RESERVE_SECURE
;
1464 gd
->arch
.secure_ram
= gd
->bd
->bi_dram
[0].start
+
1465 gd
->bd
->bi_dram
[0].size
;
1466 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_MAINTAINED
;
1467 gd
->ram_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
1469 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
1471 #if defined(CONFIG_RESV_RAM) && !defined(CONFIG_SPL_BUILD)
1472 /* Assign memory for MC */
1473 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1474 if (gd
->bd
->bi_dram
[2].size
>=
1475 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
)) {
1476 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[2].start
+
1477 gd
->bd
->bi_dram
[2].size
-
1478 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
);
1482 if (gd
->bd
->bi_dram
[1].size
>=
1483 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
)) {
1484 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[1].start
+
1485 gd
->bd
->bi_dram
[1].size
-
1486 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
);
1487 } else if (gd
->bd
->bi_dram
[0].size
>
1488 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
)) {
1489 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[0].start
+
1490 gd
->bd
->bi_dram
[0].size
-
1491 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
);
1494 #endif /* CONFIG_RESV_RAM */
1496 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1497 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1498 #error "This SoC shouldn't have DP DDR"
1500 if (soc_has_dp_ddr()) {
1501 /* initialize DP-DDR here */
1504 * DDR controller use 0 as the base address for binding.
1505 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
1507 dp_ddr_size
= fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY
,
1509 CONFIG_DP_DDR_NUM_CTRLS
,
1510 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR
,
1513 gd
->bd
->bi_dram
[2].start
= CONFIG_SYS_DP_DDR_BASE
;
1514 gd
->bd
->bi_dram
[2].size
= dp_ddr_size
;
1516 puts("Not detected");
1521 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
1522 debug("%s is called. gd->ram_size is reduced to %lu\n",
1523 __func__
, (ulong
)gd
->ram_size
);
1529 #if CONFIG_IS_ENABLED(EFI_LOADER)
1530 void efi_add_known_memory(void)
1533 phys_addr_t ram_start
;
1534 phys_size_t ram_size
;
1537 for (i
= 0; i
< CONFIG_NR_DRAM_BANKS
; i
++) {
1538 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
1539 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1540 #error "This SoC shouldn't have DP DDR"
1543 continue; /* skip DP-DDR */
1545 ram_start
= gd
->bd
->bi_dram
[i
].start
;
1546 ram_size
= gd
->bd
->bi_dram
[i
].size
;
1547 #ifdef CONFIG_RESV_RAM
1548 if (gd
->arch
.resv_ram
>= ram_start
&&
1549 gd
->arch
.resv_ram
< ram_start
+ ram_size
)
1550 ram_size
= gd
->arch
.resv_ram
- ram_start
;
1552 efi_add_memory_map(ram_start
, ram_size
,
1553 EFI_CONVENTIONAL_MEMORY
);
1559 * Before DDR size is known, early MMU table have DDR mapped as device memory
1560 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
1561 * needs to be set for these mappings.
1562 * If a special case configures DDR with holes in the mapping, the holes need
1563 * to be marked as invalid. This is not implemented in this function.
1565 void update_early_mmu_table(void)
1567 if (!gd
->arch
.tlb_addr
)
1570 if (gd
->ram_size
<= CONFIG_SYS_FSL_DRAM_SIZE1
) {
1571 mmu_change_region_attr(
1572 CONFIG_SYS_SDRAM_BASE
,
1574 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
1575 PTE_BLOCK_OUTER_SHARE
|
1579 mmu_change_region_attr(
1580 CONFIG_SYS_SDRAM_BASE
,
1581 CONFIG_SYS_DDR_BLOCK1_SIZE
,
1582 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
1583 PTE_BLOCK_OUTER_SHARE
|
1586 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
1587 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
1588 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
1590 if (gd
->ram_size
- CONFIG_SYS_DDR_BLOCK1_SIZE
>
1591 CONFIG_SYS_DDR_BLOCK2_SIZE
) {
1592 mmu_change_region_attr(
1593 CONFIG_SYS_DDR_BLOCK2_BASE
,
1594 CONFIG_SYS_DDR_BLOCK2_SIZE
,
1595 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
1596 PTE_BLOCK_OUTER_SHARE
|
1599 mmu_change_region_attr(
1600 CONFIG_SYS_DDR_BLOCK3_BASE
,
1602 CONFIG_SYS_DDR_BLOCK1_SIZE
-
1603 CONFIG_SYS_DDR_BLOCK2_SIZE
,
1604 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
1605 PTE_BLOCK_OUTER_SHARE
|
1611 mmu_change_region_attr(
1612 CONFIG_SYS_DDR_BLOCK2_BASE
,
1614 CONFIG_SYS_DDR_BLOCK1_SIZE
,
1615 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
1616 PTE_BLOCK_OUTER_SHARE
|
1623 __weak
int dram_init(void)
1625 #ifdef CONFIG_SYS_FSL_DDR
1627 #if (!defined(CONFIG_SPL) && !defined(CONFIG_TFABOOT)) || \
1628 defined(CONFIG_SPL_BUILD)
1629 /* This will break-before-make MMU for DDR */
1630 update_early_mmu_table();
1637 #ifdef CONFIG_ARCH_MISC_INIT
1638 __weak
int serdes_misc_init(void)
1643 int arch_misc_init(void)
1645 if (IS_ENABLED(CONFIG_FSL_CAAM
)) {
1646 struct udevice
*dev
;
1649 ret
= uclass_get_device_by_driver(UCLASS_MISC
, DM_DRIVER_GET(caam_jr
), &dev
);
1651 printf("Failed to initialize caam_jr: %d\n", ret
);