2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
21 #include <fsl_debug_server.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
26 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
27 #include <asm/armv8/sec_firmware.h>
30 DECLARE_GLOBAL_DATA_PTR
;
32 struct mm_region
*mem_map
= early_map
;
34 void cpu_name(char *name
)
36 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
37 unsigned int i
, svr
, ver
;
39 svr
= gur_in32(&gur
->svr
);
40 ver
= SVR_SOC_VER(svr
);
42 for (i
= 0; i
< ARRAY_SIZE(cpu_type_list
); i
++)
43 if ((cpu_type_list
[i
].soc_ver
& SVR_WO_E
) == ver
) {
44 strcpy(name
, cpu_type_list
[i
].name
);
46 if (IS_E_PROCESSOR(svr
))
51 if (i
== ARRAY_SIZE(cpu_type_list
))
52 strcpy(name
, "unknown");
55 #ifndef CONFIG_SYS_DCACHE_OFF
57 * To start MMU before DDR is available, we create MMU table in SRAM.
58 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
59 * levels of translation tables here to cover 40-bit address space.
60 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
61 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
62 * Note, the debug print in cache_v8.c is not usable for debugging
63 * these early MMU tables because UART is not yet available.
65 static inline void early_mmu_setup(void)
67 unsigned int el
= current_el();
69 /* global data is already setup, no allocation yet */
70 gd
->arch
.tlb_addr
= CONFIG_SYS_FSL_OCRAM_BASE
;
71 gd
->arch
.tlb_fillptr
= gd
->arch
.tlb_addr
;
72 gd
->arch
.tlb_size
= EARLY_PGTABLE_SIZE
;
74 /* Create early page tables */
77 /* point TTBR to the new table */
78 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
,
79 get_tcr(el
, NULL
, NULL
) &
80 ~(TCR_ORGN_MASK
| TCR_IRGN_MASK
),
83 set_sctlr(get_sctlr() | CR_M
);
87 * The final tables look similar to early tables, but different in detail.
88 * These tables are in DRAM. Sub tables are added to enable cache for
91 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
92 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
94 static inline void final_mmu_setup(void)
96 u64 tlb_addr_save
= gd
->arch
.tlb_addr
;
97 unsigned int el
= current_el();
98 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
104 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
105 if (gd
->arch
.secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
) {
108 * Only use gd->arch.secure_ram if the address is
109 * recalculated. Align to 4KB for MMU table.
111 /* put page tables in secure ram */
112 index
= ARRAY_SIZE(final_map
) - 2;
113 gd
->arch
.tlb_addr
= gd
->arch
.secure_ram
& ~0xfff;
114 final_map
[index
].virt
= gd
->arch
.secure_ram
& ~0x3;
115 final_map
[index
].phys
= final_map
[index
].virt
;
116 final_map
[index
].size
= CONFIG_SYS_MEM_RESERVE_SECURE
;
117 final_map
[index
].attrs
= PTE_BLOCK_OUTER_SHARE
;
118 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_SECURED
;
119 tlb_addr_save
= gd
->arch
.tlb_addr
;
121 /* Use allocated (board_f.c) memory for TLB */
122 tlb_addr_save
= gd
->arch
.tlb_allocated
;
123 gd
->arch
.tlb_addr
= tlb_addr_save
;
128 /* Reset the fill ptr */
129 gd
->arch
.tlb_fillptr
= tlb_addr_save
;
131 /* Create normal system page tables */
134 /* Create emergency page tables */
135 gd
->arch
.tlb_addr
= gd
->arch
.tlb_fillptr
;
136 gd
->arch
.tlb_emerg
= gd
->arch
.tlb_addr
;
138 gd
->arch
.tlb_addr
= tlb_addr_save
;
140 /* flush new MMU table */
141 flush_dcache_range(gd
->arch
.tlb_addr
,
142 gd
->arch
.tlb_addr
+ gd
->arch
.tlb_size
);
144 /* point TTBR to the new table */
145 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
, get_tcr(el
, NULL
, NULL
),
148 * MMU is already enabled, just need to invalidate TLB to load the
149 * new table. The new table is compatible with the current table, if
150 * MMU somehow walks through the new table before invalidation TLB,
151 * it still works. So we don't need to turn off MMU here.
155 u64
get_page_table_size(void)
160 int arch_cpu_init(void)
163 __asm_invalidate_dcache_all();
164 __asm_invalidate_tlb_all();
166 set_sctlr(get_sctlr() | CR_C
);
176 * This function is called from common/board_r.c.
177 * It recreates MMU table in main memory.
179 void enable_caches(void)
182 __asm_invalidate_tlb_all();
188 static inline u32
initiator_type(u32 cluster
, int init_id
)
190 struct ccsr_gur
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
191 u32 idx
= (cluster
>> (init_id
* 8)) & TP_CLUSTER_INIT_MASK
;
194 type
= gur_in32(&gur
->tp_ityp
[idx
]);
195 if (type
& TP_ITYP_AV
)
203 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
204 int i
= 0, count
= 0;
205 u32 cluster
, type
, mask
= 0;
210 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
211 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
212 type
= initiator_type(cluster
, j
);
214 if (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
)
220 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
226 * Return the number of cores on this SOC.
228 int cpu_numcores(void)
230 return hweight32(cpu_mask());
233 int fsl_qoriq_core_to_cluster(unsigned int core
)
235 struct ccsr_gur __iomem
*gur
=
236 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
237 int i
= 0, count
= 0;
243 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
244 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
245 if (initiator_type(cluster
, j
)) {
252 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
254 return -1; /* cannot identify the cluster */
257 u32
fsl_qoriq_core_to_type(unsigned int core
)
259 struct ccsr_gur __iomem
*gur
=
260 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
261 int i
= 0, count
= 0;
267 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
268 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
269 type
= initiator_type(cluster
, j
);
277 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
279 return -1; /* cannot identify the cluster */
284 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
286 return gur_in32(&gur
->svr
);
289 #ifdef CONFIG_DISPLAY_CPUINFO
290 int print_cpuinfo(void)
292 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
293 struct sys_info sysinfo
;
295 unsigned int i
, core
;
296 u32 type
, rcw
, svr
= gur_in32(&gur
->svr
);
301 printf(" %s (0x%x)\n", buf
, svr
);
302 memset((u8
*)buf
, 0x00, ARRAY_SIZE(buf
));
303 get_sys_info(&sysinfo
);
304 puts("Clock Configuration:");
305 for_each_cpu(i
, core
, cpu_numcores(), cpu_mask()) {
308 type
= TP_ITYP_VER(fsl_qoriq_core_to_type(core
));
309 printf("CPU%d(%s):%-4s MHz ", core
,
310 type
== TY_ITYP_VER_A7
? "A7 " :
311 (type
== TY_ITYP_VER_A53
? "A53" :
312 (type
== TY_ITYP_VER_A57
? "A57" :
313 (type
== TY_ITYP_VER_A72
? "A72" : " "))),
314 strmhz(buf
, sysinfo
.freq_processor
[core
]));
316 printf("\n Bus: %-4s MHz ",
317 strmhz(buf
, sysinfo
.freq_systembus
));
318 printf("DDR: %-4s MT/s", strmhz(buf
, sysinfo
.freq_ddrbus
));
319 #ifdef CONFIG_SYS_DPAA_FMAN
320 printf(" FMAN: %-4s MHz", strmhz(buf
, sysinfo
.freq_fman
[0]));
322 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
323 if (soc_has_dp_ddr()) {
324 printf(" DP-DDR: %-4s MT/s",
325 strmhz(buf
, sysinfo
.freq_ddrbus2
));
331 * Display the RCW, so that no one gets confused as to what RCW
332 * we're actually using for this boot.
334 puts("Reset Configuration Word (RCW):");
335 for (i
= 0; i
< ARRAY_SIZE(gur
->rcwsr
); i
++) {
336 rcw
= gur_in32(&gur
->rcwsr
[i
]);
338 printf("\n %08x:", i
* 4);
339 printf(" %08x", rcw
);
347 #ifdef CONFIG_FSL_ESDHC
348 int cpu_mmc_init(bd_t
*bis
)
350 return fsl_esdhc_mmc_init(bis
);
354 int cpu_eth_init(bd_t
*bis
)
358 #ifdef CONFIG_FSL_MC_ENET
359 error
= fsl_mc_ldpaa_init(bis
);
361 #ifdef CONFIG_FMAN_ENET
362 fm_standard_init(bis
);
367 int arch_early_init_r(void)
371 u32 psci_ver
= 0xffffffff;
374 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
379 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && defined(CONFIG_ARMV8_PSCI)
380 /* Check the psci version to determine if the psci is supported */
381 psci_ver
= sec_firmware_support_psci_version();
383 if (psci_ver
== 0xffffffff) {
384 rv
= fsl_layerscape_wake_seconday_cores();
386 printf("Did not wake secondary cores\n");
390 #ifdef CONFIG_SYS_HAS_SERDES
393 #ifdef CONFIG_FMAN_ENET
401 u32 __iomem
*cntcr
= (u32
*)CONFIG_SYS_FSL_TIMER_ADDR
;
402 #ifdef CONFIG_FSL_LSCH3
403 u32 __iomem
*cltbenr
= (u32
*)CONFIG_SYS_FSL_PMU_CLTBENR
;
405 #ifdef CONFIG_LS2080A
406 u32 __iomem
*pctbenr
= (u32
*)FSL_PMU_PCTBENR_OFFSET
;
408 #ifdef COUNTER_FREQUENCY_REAL
409 unsigned long cntfrq
= COUNTER_FREQUENCY_REAL
;
411 /* Update with accurate clock frequency */
412 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq
) : "memory");
415 #ifdef CONFIG_FSL_LSCH3
416 /* Enable timebase for all clusters.
417 * It is safe to do so even some clusters are not enabled.
419 out_le32(cltbenr
, 0xf);
422 #ifdef CONFIG_LS2080A
424 * In certain Layerscape SoCs, the clock for each core's
425 * has an enable bit in the PMU Physical Core Time Base Enable
426 * Register (PCTBENR), which allows the watchdog to operate.
428 setbits_le32(pctbenr
, 0xff);
431 /* Enable clock for timer
432 * This is a global setting.
434 out_le32(cntcr
, 0x1);
439 void reset_cpu(ulong addr
)
441 u32 __iomem
*rstcr
= (u32
*)CONFIG_SYS_FSL_RST_ADDR
;
444 /* Raise RESET_REQ_B */
445 val
= scfg_in32(rstcr
);
447 scfg_out32(rstcr
, val
);
450 phys_size_t
board_reserve_ram_top(phys_size_t ram_size
)
452 phys_size_t ram_top
= ram_size
;
454 #ifdef CONFIG_SYS_MEM_TOP_HIDE
455 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function
457 /* Carve the Debug Server private DRAM block from the end of DRAM */
458 #ifdef CONFIG_FSL_DEBUG_SERVER
459 ram_top
-= debug_server_get_dram_block_size();
462 /* Carve the MC private DRAM block from the end of DRAM */
463 #ifdef CONFIG_FSL_MC_ENET
464 ram_top
-= mc_get_dram_block_size();
465 ram_top
&= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN
- 1);