2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
21 #include <fsl_debug_server.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
27 DECLARE_GLOBAL_DATA_PTR
;
29 static struct mm_region layerscape_mem_map
[] = {
35 struct mm_region
*mem_map
= layerscape_mem_map
;
37 void cpu_name(char *name
)
39 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
40 unsigned int i
, svr
, ver
;
42 svr
= gur_in32(&gur
->svr
);
43 ver
= SVR_SOC_VER(svr
);
45 for (i
= 0; i
< ARRAY_SIZE(cpu_type_list
); i
++)
46 if ((cpu_type_list
[i
].soc_ver
& SVR_WO_E
) == ver
) {
47 strcpy(name
, cpu_type_list
[i
].name
);
49 if (IS_E_PROCESSOR(svr
))
54 if (i
== ARRAY_SIZE(cpu_type_list
))
55 strcpy(name
, "unknown");
58 #ifndef CONFIG_SYS_DCACHE_OFF
59 static void set_pgtable_section(u64
*page_table
, u64 index
, u64 section
,
60 u64 memory_type
, u64 attribute
)
64 value
= section
| PTE_TYPE_BLOCK
| PTE_BLOCK_AF
;
65 value
|= PMD_ATTRINDX(memory_type
);
67 page_table
[index
] = value
;
70 static void set_pgtable_table(u64
*page_table
, u64 index
, u64
*table_addr
)
74 value
= (u64
)table_addr
| PTE_TYPE_TABLE
;
75 page_table
[index
] = value
;
79 * Set the block entries according to the information of the table.
81 static int set_block_entry(const struct sys_mmu_table
*list
,
82 struct table_info
*table
)
84 u64 block_size
= 0, block_shift
= 0;
85 u64 block_addr
, index
;
88 if (table
->entry_size
== BLOCK_SIZE_L1
) {
89 block_size
= BLOCK_SIZE_L1
;
90 block_shift
= SECTION_SHIFT_L1
;
91 } else if (table
->entry_size
== BLOCK_SIZE_L2
) {
92 block_size
= BLOCK_SIZE_L2
;
93 block_shift
= SECTION_SHIFT_L2
;
98 block_addr
= list
->phys_addr
;
99 index
= (list
->virt_addr
- table
->table_base
) >> block_shift
;
101 for (j
= 0; j
< (list
->size
>> block_shift
); j
++) {
102 set_pgtable_section(table
->ptr
,
107 block_addr
+= block_size
;
115 * Find the corresponding table entry for the list.
117 static int find_table(const struct sys_mmu_table
*list
,
118 struct table_info
*table
, u64
*level0_table
)
120 u64 index
= 0, level
= 0;
121 u64
*level_table
= level0_table
;
122 u64 temp_base
= 0, block_size
= 0, block_shift
= 0;
126 block_size
= BLOCK_SIZE_L0
;
127 block_shift
= SECTION_SHIFT_L0
;
128 } else if (level
== 1) {
129 block_size
= BLOCK_SIZE_L1
;
130 block_shift
= SECTION_SHIFT_L1
;
131 } else if (level
== 2) {
132 block_size
= BLOCK_SIZE_L2
;
133 block_shift
= SECTION_SHIFT_L2
;
137 while (list
->virt_addr
>= temp_base
) {
139 temp_base
+= block_size
;
142 temp_base
-= block_size
;
144 if ((level_table
[index
- 1] & PTE_TYPE_MASK
) ==
146 level_table
= (u64
*)(level_table
[index
- 1] &
154 if ((list
->phys_addr
+ list
->size
) >
155 (temp_base
+ block_size
* NUM_OF_ENTRY
))
159 * Check the address and size of the list member is
160 * aligned with the block size.
162 if (((list
->phys_addr
& (block_size
- 1)) != 0) ||
163 ((list
->size
& (block_size
- 1)) != 0))
166 table
->ptr
= level_table
;
167 table
->table_base
= temp_base
-
168 ((index
- 1) << block_shift
);
169 table
->entry_size
= block_size
;
178 * To start MMU before DDR is available, we create MMU table in SRAM.
179 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
180 * levels of translation tables here to cover 40-bit address space.
181 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
182 * Level 0 IA[39], table address @0
183 * Level 1 IA[38:30], table address @0x1000, 0x2000
184 * Level 2 IA[29:21], table address @0x3000, 0x4000
185 * Address above 0x5000 is free for other purpose.
187 static inline void early_mmu_setup(void)
190 u64
*level0_table
= (u64
*)CONFIG_SYS_FSL_OCRAM_BASE
;
191 u64
*level1_table0
= (u64
*)(CONFIG_SYS_FSL_OCRAM_BASE
+ 0x1000);
192 u64
*level1_table1
= (u64
*)(CONFIG_SYS_FSL_OCRAM_BASE
+ 0x2000);
193 u64
*level2_table0
= (u64
*)(CONFIG_SYS_FSL_OCRAM_BASE
+ 0x3000);
194 u64
*level2_table1
= (u64
*)(CONFIG_SYS_FSL_OCRAM_BASE
+ 0x4000);
196 struct table_info table
= {level0_table
, 0, BLOCK_SIZE_L0
};
198 /* Invalidate all table entries */
199 memset(level0_table
, 0, 0x5000);
201 /* Fill in the table entries */
202 set_pgtable_table(level0_table
, 0, level1_table0
);
203 set_pgtable_table(level0_table
, 1, level1_table1
);
204 set_pgtable_table(level1_table0
, 0, level2_table0
);
206 #ifdef CONFIG_FSL_LSCH3
207 set_pgtable_table(level1_table0
,
208 CONFIG_SYS_FLASH_BASE
>> SECTION_SHIFT_L1
,
210 #elif defined(CONFIG_FSL_LSCH2)
211 set_pgtable_table(level1_table0
, 1, level2_table1
);
213 /* Find the table and fill in the block entries */
214 for (i
= 0; i
< ARRAY_SIZE(early_mmu_table
); i
++) {
215 if (find_table(&early_mmu_table
[i
],
216 &table
, level0_table
) == 0) {
218 * If find_table() returns error, it cannot be dealt
219 * with here. Breakpoint can be added for debugging.
221 set_block_entry(&early_mmu_table
[i
], &table
);
223 * If set_block_entry() returns error, it cannot be
224 * dealt with here too.
231 set_ttbr_tcr_mair(el
, (u64
)level0_table
, LAYERSCAPE_TCR
,
233 set_sctlr(get_sctlr() | CR_M
);
236 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
238 * Called from final mmu setup. The phys_addr is new, non-existing
239 * address. A new sub table is created @level2_table_secure to cover
240 * size of CONFIG_SYS_MEM_RESERVE_SECURE memory.
242 static inline int final_secure_ddr(u64
*level0_table
,
243 u64
*level2_table_secure
,
244 phys_addr_t phys_addr
)
247 struct table_info table
= {};
248 struct sys_mmu_table ddr_entry
= {
249 0, 0, BLOCK_SIZE_L1
, MT_NORMAL
,
250 PTE_BLOCK_OUTER_SHARE
| PTE_BLOCK_NS
254 /* Need to create a new table */
255 ddr_entry
.virt_addr
= phys_addr
& ~(BLOCK_SIZE_L1
- 1);
256 ddr_entry
.phys_addr
= phys_addr
& ~(BLOCK_SIZE_L1
- 1);
257 ret
= find_table(&ddr_entry
, &table
, level0_table
);
260 index
= (ddr_entry
.virt_addr
- table
.table_base
) >> SECTION_SHIFT_L1
;
261 set_pgtable_table(table
.ptr
, index
, level2_table_secure
);
262 table
.ptr
= level2_table_secure
;
263 table
.table_base
= ddr_entry
.virt_addr
;
264 table
.entry_size
= BLOCK_SIZE_L2
;
265 ret
= set_block_entry(&ddr_entry
, &table
);
267 printf("MMU error: could not fill non-secure ddr block entries\n");
270 ddr_entry
.virt_addr
= phys_addr
;
271 ddr_entry
.phys_addr
= phys_addr
;
272 ddr_entry
.size
= CONFIG_SYS_MEM_RESERVE_SECURE
;
273 ddr_entry
.attribute
= PTE_BLOCK_OUTER_SHARE
;
274 ret
= find_table(&ddr_entry
, &table
, level0_table
);
276 printf("MMU error: could not find secure ddr table\n");
279 ret
= set_block_entry(&ddr_entry
, &table
);
281 printf("MMU error: could not set secure ddr block entry\n");
288 * The final tables look similar to early tables, but different in detail.
289 * These tables are in DRAM. Sub tables are added to enable cache for
292 * Put the MMU table in secure memory if gd->secure_ram is valid.
293 * OCRAM will be not used for this purpose so gd->secure_ram can't be 0.
295 * Level 1 table 0 contains 512 entries for each 1GB from 0 to 512GB.
296 * Level 1 table 1 contains 512 entries for each 1GB from 512GB to 1TB.
297 * Level 2 table 0 contains 512 entries for each 2MB from 0 to 1GB.
300 * Level 2 table 1 contains 512 entries for each 2MB from 32GB to 33GB.
302 * Level 2 table 1 contains 512 entries for each 2MB from 1GB to 2GB.
303 * Level 2 table 2 contains 512 entries for each 2MB from 20GB to 21GB.
305 static inline void final_mmu_setup(void)
307 unsigned int el
= current_el();
309 u64
*level0_table
= (u64
*)gd
->arch
.tlb_addr
;
314 #ifdef CONFIG_FSL_LSCH2
317 struct table_info table
= {NULL
, 0, BLOCK_SIZE_L0
};
319 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
320 u64
*level2_table_secure
;
324 * Only use gd->secure_ram if the address is recalculated
325 * Align to 4KB for MMU table
327 if (gd
->secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
)
328 level0_table
= (u64
*)(gd
->secure_ram
& ~0xfff);
330 printf("MMU warning: gd->secure_ram is not maintained, disabled.\n");
333 level1_table0
= level0_table
+ 512;
334 level1_table1
= level1_table0
+ 512;
335 level2_table0
= level1_table1
+ 512;
336 level2_table1
= level2_table0
+ 512;
337 #ifdef CONFIG_FSL_LSCH2
338 level2_table2
= level2_table1
+ 512;
340 table
.ptr
= level0_table
;
342 /* Invalidate all table entries */
343 memset(level0_table
, 0, PGTABLE_SIZE
);
345 /* Fill in the table entries */
346 set_pgtable_table(level0_table
, 0, level1_table0
);
347 set_pgtable_table(level0_table
, 1, level1_table1
);
348 set_pgtable_table(level1_table0
, 0, level2_table0
);
349 #ifdef CONFIG_FSL_LSCH3
350 set_pgtable_table(level1_table0
,
351 CONFIG_SYS_FSL_QBMAN_BASE
>> SECTION_SHIFT_L1
,
353 #elif defined(CONFIG_FSL_LSCH2)
354 set_pgtable_table(level1_table0
, 1, level2_table1
);
355 set_pgtable_table(level1_table0
,
356 CONFIG_SYS_FSL_QBMAN_BASE
>> SECTION_SHIFT_L1
,
360 /* Find the table and fill in the block entries */
361 for (i
= 0; i
< ARRAY_SIZE(final_mmu_table
); i
++) {
362 if (find_table(&final_mmu_table
[i
],
363 &table
, level0_table
) == 0) {
364 if (set_block_entry(&final_mmu_table
[i
],
366 printf("MMU error: could not set block entry for %p\n",
367 &final_mmu_table
[i
]);
371 printf("MMU error: could not find the table for %p\n",
372 &final_mmu_table
[i
]);
375 /* Set the secure memory to secure in MMU */
376 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
377 if (el
== 3 && gd
->secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
) {
378 #ifdef CONFIG_FSL_LSCH3
379 level2_table_secure
= level2_table1
+ 512;
380 #elif defined(CONFIG_FSL_LSCH2)
381 level2_table_secure
= level2_table2
+ 512;
383 if (!final_secure_ddr(level0_table
,
385 gd
->secure_ram
& ~0x3)) {
386 gd
->secure_ram
|= MEM_RESERVE_SECURE_SECURED
;
387 debug("Now MMU table is in secured memory at 0x%llx\n",
388 gd
->secure_ram
& ~0x3);
390 printf("MMU warning: Failed to secure DDR\n");
395 /* flush new MMU table */
396 flush_dcache_range((ulong
)level0_table
,
397 (ulong
)level0_table
+ gd
->arch
.tlb_size
);
399 /* point TTBR to the new table */
400 set_ttbr_tcr_mair(el
, (u64
)level0_table
, LAYERSCAPE_TCR_FINAL
,
403 * MMU is already enabled, just need to invalidate TLB to load the
404 * new table. The new table is compatible with the current table, if
405 * MMU somehow walks through the new table before invalidation TLB,
406 * it still works. So we don't need to turn off MMU here.
410 u64
get_page_table_size(void)
415 int arch_cpu_init(void)
418 __asm_invalidate_dcache_all();
419 __asm_invalidate_tlb_all();
421 set_sctlr(get_sctlr() | CR_C
);
426 * This function is called from lib/board.c.
427 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
428 * There is no need to disable d-cache for this operation.
430 void enable_caches(void)
433 __asm_invalidate_tlb_all();
437 static inline u32
initiator_type(u32 cluster
, int init_id
)
439 struct ccsr_gur
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
440 u32 idx
= (cluster
>> (init_id
* 8)) & TP_CLUSTER_INIT_MASK
;
443 type
= gur_in32(&gur
->tp_ityp
[idx
]);
444 if (type
& TP_ITYP_AV
)
452 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
453 int i
= 0, count
= 0;
454 u32 cluster
, type
, mask
= 0;
459 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
460 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
461 type
= initiator_type(cluster
, j
);
463 if (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
)
469 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
475 * Return the number of cores on this SOC.
477 int cpu_numcores(void)
479 return hweight32(cpu_mask());
482 int fsl_qoriq_core_to_cluster(unsigned int core
)
484 struct ccsr_gur __iomem
*gur
=
485 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
486 int i
= 0, count
= 0;
492 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
493 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
494 if (initiator_type(cluster
, j
)) {
501 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
503 return -1; /* cannot identify the cluster */
506 u32
fsl_qoriq_core_to_type(unsigned int core
)
508 struct ccsr_gur __iomem
*gur
=
509 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
510 int i
= 0, count
= 0;
516 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
517 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
518 type
= initiator_type(cluster
, j
);
526 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
528 return -1; /* cannot identify the cluster */
533 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
535 return gur_in32(&gur
->svr
);
538 #ifdef CONFIG_DISPLAY_CPUINFO
539 int print_cpuinfo(void)
541 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
542 struct sys_info sysinfo
;
544 unsigned int i
, core
;
545 u32 type
, rcw
, svr
= gur_in32(&gur
->svr
);
550 printf(" %s (0x%x)\n", buf
, svr
);
551 memset((u8
*)buf
, 0x00, ARRAY_SIZE(buf
));
552 get_sys_info(&sysinfo
);
553 puts("Clock Configuration:");
554 for_each_cpu(i
, core
, cpu_numcores(), cpu_mask()) {
557 type
= TP_ITYP_VER(fsl_qoriq_core_to_type(core
));
558 printf("CPU%d(%s):%-4s MHz ", core
,
559 type
== TY_ITYP_VER_A7
? "A7 " :
560 (type
== TY_ITYP_VER_A53
? "A53" :
561 (type
== TY_ITYP_VER_A57
? "A57" : " ")),
562 strmhz(buf
, sysinfo
.freq_processor
[core
]));
564 printf("\n Bus: %-4s MHz ",
565 strmhz(buf
, sysinfo
.freq_systembus
));
566 printf("DDR: %-4s MT/s", strmhz(buf
, sysinfo
.freq_ddrbus
));
567 #ifdef CONFIG_SYS_DPAA_FMAN
568 printf(" FMAN: %-4s MHz", strmhz(buf
, sysinfo
.freq_fman
[0]));
570 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
571 if (soc_has_dp_ddr()) {
572 printf(" DP-DDR: %-4s MT/s",
573 strmhz(buf
, sysinfo
.freq_ddrbus2
));
579 * Display the RCW, so that no one gets confused as to what RCW
580 * we're actually using for this boot.
582 puts("Reset Configuration Word (RCW):");
583 for (i
= 0; i
< ARRAY_SIZE(gur
->rcwsr
); i
++) {
584 rcw
= gur_in32(&gur
->rcwsr
[i
]);
586 printf("\n %08x:", i
* 4);
587 printf(" %08x", rcw
);
595 #ifdef CONFIG_FSL_ESDHC
596 int cpu_mmc_init(bd_t
*bis
)
598 return fsl_esdhc_mmc_init(bis
);
602 int cpu_eth_init(bd_t
*bis
)
606 #ifdef CONFIG_FSL_MC_ENET
607 error
= fsl_mc_ldpaa_init(bis
);
609 #ifdef CONFIG_FMAN_ENET
610 fm_standard_init(bis
);
615 int arch_early_init_r(void)
621 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
626 rv
= fsl_layerscape_wake_seconday_cores();
628 printf("Did not wake secondary cores\n");
631 #ifdef CONFIG_SYS_HAS_SERDES
634 #ifdef CONFIG_FMAN_ENET
642 u32 __iomem
*cntcr
= (u32
*)CONFIG_SYS_FSL_TIMER_ADDR
;
643 #ifdef CONFIG_FSL_LSCH3
644 u32 __iomem
*cltbenr
= (u32
*)CONFIG_SYS_FSL_PMU_CLTBENR
;
646 #ifdef CONFIG_LS2080A
647 u32 __iomem
*pctbenr
= (u32
*)FSL_PMU_PCTBENR_OFFSET
;
649 #ifdef COUNTER_FREQUENCY_REAL
650 unsigned long cntfrq
= COUNTER_FREQUENCY_REAL
;
652 /* Update with accurate clock frequency */
653 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq
) : "memory");
656 #ifdef CONFIG_FSL_LSCH3
657 /* Enable timebase for all clusters.
658 * It is safe to do so even some clusters are not enabled.
660 out_le32(cltbenr
, 0xf);
663 #ifdef CONFIG_LS2080A
665 * In certain Layerscape SoCs, the clock for each core's
666 * has an enable bit in the PMU Physical Core Time Base Enable
667 * Register (PCTBENR), which allows the watchdog to operate.
669 setbits_le32(pctbenr
, 0xff);
672 /* Enable clock for timer
673 * This is a global setting.
675 out_le32(cntcr
, 0x1);
680 void reset_cpu(ulong addr
)
682 u32 __iomem
*rstcr
= (u32
*)CONFIG_SYS_FSL_RST_ADDR
;
685 /* Raise RESET_REQ_B */
686 val
= scfg_in32(rstcr
);
688 scfg_out32(rstcr
, val
);
691 phys_size_t
board_reserve_ram_top(phys_size_t ram_size
)
693 phys_size_t ram_top
= ram_size
;
695 #ifdef CONFIG_SYS_MEM_TOP_HIDE
696 #error CONFIG_SYS_MEM_TOP_HIDE not to be used together with this function
698 /* Carve the Debug Server private DRAM block from the end of DRAM */
699 #ifdef CONFIG_FSL_DEBUG_SERVER
700 ram_top
-= debug_server_get_dram_block_size();
703 /* Carve the MC private DRAM block from the end of DRAM */
704 #ifdef CONFIG_FSL_MC_ENET
705 ram_top
-= mc_get_dram_block_size();
706 ram_top
&= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN
- 1);