1 // SPDX-License-Identifier: GPL-2.0+
4 * Copyright 2014-2015 Freescale Semiconductor, Inc.
8 #include <fsl_ddr_sdram.h>
10 #include <linux/errno.h>
11 #include <asm/system.h>
12 #include <asm/armv8/mmu.h>
14 #include <asm/arch/fsl_serdes.h>
15 #include <asm/arch/soc.h>
16 #include <asm/arch/cpu.h>
17 #include <asm/arch/speed.h>
18 #include <fsl_immap.h>
19 #include <asm/arch/mp.h>
20 #include <efi_loader.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
26 #include <asm/armv8/sec_firmware.h>
27 #ifdef CONFIG_SYS_FSL_DDR
30 #include <asm/arch/clock.h>
32 #include <fsl_qbman.h>
34 DECLARE_GLOBAL_DATA_PTR
;
36 struct mm_region
*mem_map
= early_map
;
38 void cpu_name(char *name
)
40 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
41 unsigned int i
, svr
, ver
;
43 svr
= gur_in32(&gur
->svr
);
44 ver
= SVR_SOC_VER(svr
);
46 for (i
= 0; i
< ARRAY_SIZE(cpu_type_list
); i
++)
47 if ((cpu_type_list
[i
].soc_ver
& SVR_WO_E
) == ver
) {
48 strcpy(name
, cpu_type_list
[i
].name
);
50 if (IS_E_PROCESSOR(svr
))
53 sprintf(name
+ strlen(name
), " Rev%d.%d",
54 SVR_MAJ(svr
), SVR_MIN(svr
));
58 if (i
== ARRAY_SIZE(cpu_type_list
))
59 strcpy(name
, "unknown");
62 #ifndef CONFIG_SYS_DCACHE_OFF
64 * To start MMU before DDR is available, we create MMU table in SRAM.
65 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
66 * levels of translation tables here to cover 40-bit address space.
67 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
68 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
69 * Note, the debug print in cache_v8.c is not usable for debugging
70 * these early MMU tables because UART is not yet available.
72 static inline void early_mmu_setup(void)
74 unsigned int el
= current_el();
76 /* global data is already setup, no allocation yet */
77 gd
->arch
.tlb_addr
= CONFIG_SYS_FSL_OCRAM_BASE
;
78 gd
->arch
.tlb_fillptr
= gd
->arch
.tlb_addr
;
79 gd
->arch
.tlb_size
= EARLY_PGTABLE_SIZE
;
81 /* Create early page tables */
84 /* point TTBR to the new table */
85 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
,
86 get_tcr(el
, NULL
, NULL
) &
87 ~(TCR_ORGN_MASK
| TCR_IRGN_MASK
),
90 set_sctlr(get_sctlr() | CR_M
);
93 static void fix_pcie_mmu_map(void)
95 #ifdef CONFIG_ARCH_LS2080A
98 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
100 svr
= gur_in32(&gur
->svr
);
101 ver
= SVR_SOC_VER(svr
);
103 /* Fix PCIE base and size for LS2088A */
104 if ((ver
== SVR_LS2088A
) || (ver
== SVR_LS2084A
) ||
105 (ver
== SVR_LS2048A
) || (ver
== SVR_LS2044A
) ||
106 (ver
== SVR_LS2081A
) || (ver
== SVR_LS2041A
)) {
107 for (i
= 0; i
< ARRAY_SIZE(final_map
); i
++) {
108 switch (final_map
[i
].phys
) {
109 case CONFIG_SYS_PCIE1_PHYS_ADDR
:
110 final_map
[i
].phys
= 0x2000000000ULL
;
111 final_map
[i
].virt
= 0x2000000000ULL
;
112 final_map
[i
].size
= 0x800000000ULL
;
114 case CONFIG_SYS_PCIE2_PHYS_ADDR
:
115 final_map
[i
].phys
= 0x2800000000ULL
;
116 final_map
[i
].virt
= 0x2800000000ULL
;
117 final_map
[i
].size
= 0x800000000ULL
;
119 case CONFIG_SYS_PCIE3_PHYS_ADDR
:
120 final_map
[i
].phys
= 0x3000000000ULL
;
121 final_map
[i
].virt
= 0x3000000000ULL
;
122 final_map
[i
].size
= 0x800000000ULL
;
124 case CONFIG_SYS_PCIE4_PHYS_ADDR
:
125 final_map
[i
].phys
= 0x3800000000ULL
;
126 final_map
[i
].virt
= 0x3800000000ULL
;
127 final_map
[i
].size
= 0x800000000ULL
;
138 * The final tables look similar to early tables, but different in detail.
139 * These tables are in DRAM. Sub tables are added to enable cache for
142 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
143 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
145 static inline void final_mmu_setup(void)
147 u64 tlb_addr_save
= gd
->arch
.tlb_addr
;
148 unsigned int el
= current_el();
151 /* fix the final_map before filling in the block entries */
156 /* Update mapping for DDR to actual size */
157 for (index
= 0; index
< ARRAY_SIZE(final_map
) - 2; index
++) {
159 * Find the entry for DDR mapping and update the address and
160 * size. Zero-sized mapping will be skipped when creating MMU
163 switch (final_map
[index
].virt
) {
164 case CONFIG_SYS_FSL_DRAM_BASE1
:
165 final_map
[index
].virt
= gd
->bd
->bi_dram
[0].start
;
166 final_map
[index
].phys
= gd
->bd
->bi_dram
[0].start
;
167 final_map
[index
].size
= gd
->bd
->bi_dram
[0].size
;
169 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
170 case CONFIG_SYS_FSL_DRAM_BASE2
:
171 #if (CONFIG_NR_DRAM_BANKS >= 2)
172 final_map
[index
].virt
= gd
->bd
->bi_dram
[1].start
;
173 final_map
[index
].phys
= gd
->bd
->bi_dram
[1].start
;
174 final_map
[index
].size
= gd
->bd
->bi_dram
[1].size
;
176 final_map
[index
].size
= 0;
180 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
181 case CONFIG_SYS_FSL_DRAM_BASE3
:
182 #if (CONFIG_NR_DRAM_BANKS >= 3)
183 final_map
[index
].virt
= gd
->bd
->bi_dram
[2].start
;
184 final_map
[index
].phys
= gd
->bd
->bi_dram
[2].start
;
185 final_map
[index
].size
= gd
->bd
->bi_dram
[2].size
;
187 final_map
[index
].size
= 0;
196 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
197 if (gd
->arch
.secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
) {
200 * Only use gd->arch.secure_ram if the address is
201 * recalculated. Align to 4KB for MMU table.
203 /* put page tables in secure ram */
204 index
= ARRAY_SIZE(final_map
) - 2;
205 gd
->arch
.tlb_addr
= gd
->arch
.secure_ram
& ~0xfff;
206 final_map
[index
].virt
= gd
->arch
.secure_ram
& ~0x3;
207 final_map
[index
].phys
= final_map
[index
].virt
;
208 final_map
[index
].size
= CONFIG_SYS_MEM_RESERVE_SECURE
;
209 final_map
[index
].attrs
= PTE_BLOCK_OUTER_SHARE
;
210 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_SECURED
;
211 tlb_addr_save
= gd
->arch
.tlb_addr
;
213 /* Use allocated (board_f.c) memory for TLB */
214 tlb_addr_save
= gd
->arch
.tlb_allocated
;
215 gd
->arch
.tlb_addr
= tlb_addr_save
;
220 /* Reset the fill ptr */
221 gd
->arch
.tlb_fillptr
= tlb_addr_save
;
223 /* Create normal system page tables */
226 /* Create emergency page tables */
227 gd
->arch
.tlb_addr
= gd
->arch
.tlb_fillptr
;
228 gd
->arch
.tlb_emerg
= gd
->arch
.tlb_addr
;
230 gd
->arch
.tlb_addr
= tlb_addr_save
;
232 /* Disable cache and MMU */
233 dcache_disable(); /* TLBs are invalidated */
234 invalidate_icache_all();
236 /* point TTBR to the new table */
237 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
, get_tcr(el
, NULL
, NULL
),
240 set_sctlr(get_sctlr() | CR_M
);
243 u64
get_page_table_size(void)
248 int arch_cpu_init(void)
251 * This function is called before U-Boot relocates itself to speed up
252 * on system running. It is not necessary to run if performance is not
253 * critical. Skip if MMU is already enabled by SPL or other means.
255 if (get_sctlr() & CR_M
)
259 __asm_invalidate_dcache_all();
260 __asm_invalidate_tlb_all();
262 set_sctlr(get_sctlr() | CR_C
);
272 * This function is called from common/board_r.c.
273 * It recreates MMU table in main memory.
275 void enable_caches(void)
278 __asm_invalidate_tlb_all();
284 u32
initiator_type(u32 cluster
, int init_id
)
286 struct ccsr_gur
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
287 u32 idx
= (cluster
>> (init_id
* 8)) & TP_CLUSTER_INIT_MASK
;
290 type
= gur_in32(&gur
->tp_ityp
[idx
]);
291 if (type
& TP_ITYP_AV
)
297 u32
cpu_pos_mask(void)
299 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
301 u32 cluster
, type
, mask
= 0;
306 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
307 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
308 type
= initiator_type(cluster
, j
);
309 if (type
&& (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
))
310 mask
|= 1 << (i
* TP_INIT_PER_CLUSTER
+ j
);
313 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
320 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
321 int i
= 0, count
= 0;
322 u32 cluster
, type
, mask
= 0;
327 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
328 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
329 type
= initiator_type(cluster
, j
);
331 if (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
)
337 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
343 * Return the number of cores on this SOC.
345 int cpu_numcores(void)
347 return hweight32(cpu_mask());
350 int fsl_qoriq_core_to_cluster(unsigned int core
)
352 struct ccsr_gur __iomem
*gur
=
353 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
354 int i
= 0, count
= 0;
360 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
361 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
362 if (initiator_type(cluster
, j
)) {
369 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
371 return -1; /* cannot identify the cluster */
374 u32
fsl_qoriq_core_to_type(unsigned int core
)
376 struct ccsr_gur __iomem
*gur
=
377 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
378 int i
= 0, count
= 0;
384 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
385 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
386 type
= initiator_type(cluster
, j
);
394 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
396 return -1; /* cannot identify the cluster */
399 #ifndef CONFIG_FSL_LSCH3
402 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
404 return gur_in32(&gur
->svr
);
408 #ifdef CONFIG_DISPLAY_CPUINFO
409 int print_cpuinfo(void)
411 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
412 struct sys_info sysinfo
;
414 unsigned int i
, core
;
415 u32 type
, rcw
, svr
= gur_in32(&gur
->svr
);
420 printf(" %s (0x%x)\n", buf
, svr
);
421 memset((u8
*)buf
, 0x00, ARRAY_SIZE(buf
));
422 get_sys_info(&sysinfo
);
423 puts("Clock Configuration:");
424 for_each_cpu(i
, core
, cpu_numcores(), cpu_mask()) {
427 type
= TP_ITYP_VER(fsl_qoriq_core_to_type(core
));
428 printf("CPU%d(%s):%-4s MHz ", core
,
429 type
== TY_ITYP_VER_A7
? "A7 " :
430 (type
== TY_ITYP_VER_A53
? "A53" :
431 (type
== TY_ITYP_VER_A57
? "A57" :
432 (type
== TY_ITYP_VER_A72
? "A72" : " "))),
433 strmhz(buf
, sysinfo
.freq_processor
[core
]));
435 /* Display platform clock as Bus frequency. */
436 printf("\n Bus: %-4s MHz ",
437 strmhz(buf
, sysinfo
.freq_systembus
/ CONFIG_SYS_FSL_PCLK_DIV
));
438 printf("DDR: %-4s MT/s", strmhz(buf
, sysinfo
.freq_ddrbus
));
439 #ifdef CONFIG_SYS_DPAA_FMAN
440 printf(" FMAN: %-4s MHz", strmhz(buf
, sysinfo
.freq_fman
[0]));
442 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
443 if (soc_has_dp_ddr()) {
444 printf(" DP-DDR: %-4s MT/s",
445 strmhz(buf
, sysinfo
.freq_ddrbus2
));
451 * Display the RCW, so that no one gets confused as to what RCW
452 * we're actually using for this boot.
454 puts("Reset Configuration Word (RCW):");
455 for (i
= 0; i
< ARRAY_SIZE(gur
->rcwsr
); i
++) {
456 rcw
= gur_in32(&gur
->rcwsr
[i
]);
458 printf("\n %08x:", i
* 4);
459 printf(" %08x", rcw
);
467 #ifdef CONFIG_FSL_ESDHC
468 int cpu_mmc_init(bd_t
*bis
)
470 return fsl_esdhc_mmc_init(bis
);
474 int cpu_eth_init(bd_t
*bis
)
478 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
479 error
= fsl_mc_ldpaa_init(bis
);
481 #ifdef CONFIG_FMAN_ENET
482 fm_standard_init(bis
);
487 static inline int check_psci(void)
489 unsigned int psci_ver
;
491 psci_ver
= sec_firmware_support_psci_version();
492 if (psci_ver
== PSCI_INVALID_VER
)
498 static void config_core_prefetch(void)
501 char buffer
[HWCONFIG_BUFFER_SIZE
];
502 const char *prefetch_arg
= NULL
;
507 if (env_get_f("hwconfig", buffer
, sizeof(buffer
)) > 0)
510 prefetch_arg
= hwconfig_subarg_f("core_prefetch", "disable",
514 mask
= simple_strtoul(prefetch_arg
, NULL
, 0) & 0xff;
516 printf("Core0 prefetch can't be disabled\n");
520 #define SIP_PREFETCH_DISABLE_64 0xC200FF13
521 regs
.regs
[0] = SIP_PREFETCH_DISABLE_64
;
526 printf("Prefetch disable config failed for mask ");
528 printf("Prefetch disable config passed for mask ");
529 printf("0x%x\n", mask
);
533 int arch_early_init_r(void)
535 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
538 * erratum A009635 is valid only for LS2080A SoC and
541 svr_dev_id
= get_svr();
542 if (IS_SVR_DEV(svr_dev_id
, SVR_DEV(SVR_LS2080A
)))
545 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
546 erratum_a009942_check_cpo();
549 debug("PSCI: PSCI does not exist.\n");
551 /* if PSCI does not exist, boot secondary cores here */
552 if (fsl_layerscape_wake_seconday_cores())
553 printf("Did not wake secondary cores\n");
556 #ifdef CONFIG_SYS_FSL_HAS_RGMII
560 config_core_prefetch();
562 #ifdef CONFIG_SYS_HAS_SERDES
565 #ifdef CONFIG_FMAN_ENET
568 #ifdef CONFIG_SYS_DPAA_QBMAN
569 setup_qbman_portals();
576 u32 __iomem
*cntcr
= (u32
*)CONFIG_SYS_FSL_TIMER_ADDR
;
577 #ifdef CONFIG_FSL_LSCH3
578 u32 __iomem
*cltbenr
= (u32
*)CONFIG_SYS_FSL_PMU_CLTBENR
;
580 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
581 u32 __iomem
*pctbenr
= (u32
*)FSL_PMU_PCTBENR_OFFSET
;
584 #ifdef COUNTER_FREQUENCY_REAL
585 unsigned long cntfrq
= COUNTER_FREQUENCY_REAL
;
587 /* Update with accurate clock frequency */
588 if (current_el() == 3)
589 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq
) : "memory");
592 #ifdef CONFIG_FSL_LSCH3
593 /* Enable timebase for all clusters.
594 * It is safe to do so even some clusters are not enabled.
596 out_le32(cltbenr
, 0xf);
599 #if defined(CONFIG_ARCH_LS2080A) || defined(CONFIG_ARCH_LS1088A)
601 * In certain Layerscape SoCs, the clock for each core's
602 * has an enable bit in the PMU Physical Core Time Base Enable
603 * Register (PCTBENR), which allows the watchdog to operate.
605 setbits_le32(pctbenr
, 0xff);
607 * For LS2080A SoC and its personalities, timer controller
608 * offset is different
610 svr_dev_id
= get_svr();
611 if (IS_SVR_DEV(svr_dev_id
, SVR_DEV(SVR_LS2080A
)))
612 cntcr
= (u32
*)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR
;
616 /* Enable clock for timer
617 * This is a global setting.
619 out_le32(cntcr
, 0x1);
624 __efi_runtime_data u32 __iomem
*rstcr
= (u32
*)CONFIG_SYS_FSL_RST_ADDR
;
626 void __efi_runtime
reset_cpu(ulong addr
)
630 /* Raise RESET_REQ_B */
631 val
= scfg_in32(rstcr
);
633 scfg_out32(rstcr
, val
);
636 #ifdef CONFIG_EFI_LOADER
638 void __efi_runtime EFIAPI
efi_reset_system(
639 enum efi_reset_type reset_type
,
640 efi_status_t reset_status
,
641 unsigned long data_size
, void *reset_data
)
643 switch (reset_type
) {
646 case EFI_RESET_PLATFORM_SPECIFIC
:
649 case EFI_RESET_SHUTDOWN
:
650 /* Nothing we can do */
657 efi_status_t
efi_reset_system_init(void)
659 return efi_add_runtime_mmio(&rstcr
, sizeof(*rstcr
));
665 * Calculate reserved memory with given memory bank
666 * Return aligned memory size on success
667 * Return (ram_size + needed size) for failure
669 phys_size_t
board_reserve_ram_top(phys_size_t ram_size
)
671 phys_size_t ram_top
= ram_size
;
673 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
674 ram_top
= mc_get_dram_block_size();
675 if (ram_top
> ram_size
)
676 return ram_size
+ ram_top
;
678 ram_top
= ram_size
- ram_top
;
679 /* The start address of MC reserved memory needs to be aligned. */
680 ram_top
&= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN
- 1);
683 return ram_size
- ram_top
;
686 phys_size_t
get_effective_memsize(void)
688 phys_size_t ea_size
, rem
= 0;
691 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
692 * first region is 2GB space at 0x8000_0000. Secure memory needs to
693 * allocated from first region. If the memory extends to the second
694 * region (or the third region if applicable), Management Complex (MC)
695 * memory should be put into the highest region, i.e. the end of DDR
696 * memory. CONFIG_MAX_MEM_MAPPED is set to the size of first region so
697 * U-Boot doesn't relocate itself into higher address. Should DDR be
698 * configured to skip the first region, this function needs to be
701 if (gd
->ram_size
> CONFIG_MAX_MEM_MAPPED
) {
702 ea_size
= CONFIG_MAX_MEM_MAPPED
;
703 rem
= gd
->ram_size
- ea_size
;
705 ea_size
= gd
->ram_size
;
708 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
709 /* Check if we have enough space for secure memory */
710 if (ea_size
> CONFIG_SYS_MEM_RESERVE_SECURE
)
711 ea_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
713 printf("Error: No enough space for secure memory.\n");
715 /* Check if we have enough memory for MC */
716 if (rem
< board_reserve_ram_top(rem
)) {
717 /* Not enough memory in high region to reserve */
718 if (ea_size
> board_reserve_ram_top(ea_size
))
719 ea_size
-= board_reserve_ram_top(ea_size
);
721 printf("Error: No enough space for reserved memory.\n");
727 int dram_init_banksize(void)
729 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
730 phys_size_t dp_ddr_size
;
734 * gd->ram_size has the total size of DDR memory, less reserved secure
735 * memory. The DDR extends from low region to high region(s) presuming
736 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
737 * the location of secure memory. gd->arch.resv_ram tracks the location
738 * of reserved memory for Management Complex (MC). Because gd->ram_size
739 * is reduced by this function if secure memory is reserved, checking
740 * gd->arch.secure_ram should be done to avoid running it repeatedly.
743 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
744 if (gd
->arch
.secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
) {
745 debug("No need to run again, skip %s\n", __func__
);
751 gd
->bd
->bi_dram
[0].start
= CONFIG_SYS_SDRAM_BASE
;
752 if (gd
->ram_size
> CONFIG_SYS_DDR_BLOCK1_SIZE
) {
753 gd
->bd
->bi_dram
[0].size
= CONFIG_SYS_DDR_BLOCK1_SIZE
;
754 gd
->bd
->bi_dram
[1].start
= CONFIG_SYS_DDR_BLOCK2_BASE
;
755 gd
->bd
->bi_dram
[1].size
= gd
->ram_size
-
756 CONFIG_SYS_DDR_BLOCK1_SIZE
;
757 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
758 if (gd
->bi_dram
[1].size
> CONFIG_SYS_DDR_BLOCK2_SIZE
) {
759 gd
->bd
->bi_dram
[2].start
= CONFIG_SYS_DDR_BLOCK3_BASE
;
760 gd
->bd
->bi_dram
[2].size
= gd
->bd
->bi_dram
[1].size
-
761 CONFIG_SYS_DDR_BLOCK2_SIZE
;
762 gd
->bd
->bi_dram
[1].size
= CONFIG_SYS_DDR_BLOCK2_SIZE
;
766 gd
->bd
->bi_dram
[0].size
= gd
->ram_size
;
768 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
769 if (gd
->bd
->bi_dram
[0].size
>
770 CONFIG_SYS_MEM_RESERVE_SECURE
) {
771 gd
->bd
->bi_dram
[0].size
-=
772 CONFIG_SYS_MEM_RESERVE_SECURE
;
773 gd
->arch
.secure_ram
= gd
->bd
->bi_dram
[0].start
+
774 gd
->bd
->bi_dram
[0].size
;
775 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_MAINTAINED
;
776 gd
->ram_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
778 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
780 #if defined(CONFIG_FSL_MC_ENET) && !defined(CONFIG_SPL_BUILD)
781 /* Assign memory for MC */
782 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
783 if (gd
->bd
->bi_dram
[2].size
>=
784 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
)) {
785 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[2].start
+
786 gd
->bd
->bi_dram
[2].size
-
787 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
);
791 if (gd
->bd
->bi_dram
[1].size
>=
792 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
)) {
793 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[1].start
+
794 gd
->bd
->bi_dram
[1].size
-
795 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
);
796 } else if (gd
->bd
->bi_dram
[0].size
>
797 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
)) {
798 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[0].start
+
799 gd
->bd
->bi_dram
[0].size
-
800 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
);
803 #endif /* CONFIG_FSL_MC_ENET */
805 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
806 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
807 #error "This SoC shouldn't have DP DDR"
809 if (soc_has_dp_ddr()) {
810 /* initialize DP-DDR here */
813 * DDR controller use 0 as the base address for binding.
814 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
816 dp_ddr_size
= fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY
,
818 CONFIG_DP_DDR_NUM_CTRLS
,
819 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR
,
822 gd
->bd
->bi_dram
[2].start
= CONFIG_SYS_DP_DDR_BASE
;
823 gd
->bd
->bi_dram
[2].size
= dp_ddr_size
;
825 puts("Not detected");
830 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
831 debug("%s is called. gd->ram_size is reduced to %lu\n",
832 __func__
, (ulong
)gd
->ram_size
);
838 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD)
839 void efi_add_known_memory(void)
842 phys_addr_t ram_start
, start
;
843 phys_size_t ram_size
;
847 for (i
= 0; i
< CONFIG_NR_DRAM_BANKS
; i
++) {
848 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
849 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
850 #error "This SoC shouldn't have DP DDR"
853 continue; /* skip DP-DDR */
855 ram_start
= gd
->bd
->bi_dram
[i
].start
;
856 ram_size
= gd
->bd
->bi_dram
[i
].size
;
857 #ifdef CONFIG_RESV_RAM
858 if (gd
->arch
.resv_ram
>= ram_start
&&
859 gd
->arch
.resv_ram
< ram_start
+ ram_size
)
860 ram_size
= gd
->arch
.resv_ram
- ram_start
;
862 start
= (ram_start
+ EFI_PAGE_MASK
) & ~EFI_PAGE_MASK
;
863 pages
= (ram_size
+ EFI_PAGE_MASK
) >> EFI_PAGE_SHIFT
;
865 efi_add_memory_map(start
, pages
, EFI_CONVENTIONAL_MEMORY
,
872 * Before DDR size is known, early MMU table have DDR mapped as device memory
873 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
874 * needs to be set for these mappings.
875 * If a special case configures DDR with holes in the mapping, the holes need
876 * to be marked as invalid. This is not implemented in this function.
878 void update_early_mmu_table(void)
880 if (!gd
->arch
.tlb_addr
)
883 if (gd
->ram_size
<= CONFIG_SYS_FSL_DRAM_SIZE1
) {
884 mmu_change_region_attr(
885 CONFIG_SYS_SDRAM_BASE
,
887 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
888 PTE_BLOCK_OUTER_SHARE
|
892 mmu_change_region_attr(
893 CONFIG_SYS_SDRAM_BASE
,
894 CONFIG_SYS_DDR_BLOCK1_SIZE
,
895 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
896 PTE_BLOCK_OUTER_SHARE
|
899 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
900 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
901 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
903 if (gd
->ram_size
- CONFIG_SYS_DDR_BLOCK1_SIZE
>
904 CONFIG_SYS_DDR_BLOCK2_SIZE
) {
905 mmu_change_region_attr(
906 CONFIG_SYS_DDR_BLOCK2_BASE
,
907 CONFIG_SYS_DDR_BLOCK2_SIZE
,
908 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
909 PTE_BLOCK_OUTER_SHARE
|
912 mmu_change_region_attr(
913 CONFIG_SYS_DDR_BLOCK3_BASE
,
915 CONFIG_SYS_DDR_BLOCK1_SIZE
-
916 CONFIG_SYS_DDR_BLOCK2_SIZE
,
917 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
918 PTE_BLOCK_OUTER_SHARE
|
924 mmu_change_region_attr(
925 CONFIG_SYS_DDR_BLOCK2_BASE
,
927 CONFIG_SYS_DDR_BLOCK1_SIZE
,
928 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
929 PTE_BLOCK_OUTER_SHARE
|
936 __weak
int dram_init(void)
939 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
940 /* This will break-before-make MMU for DDR */
941 update_early_mmu_table();