2 * Copyright 2014-2015 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <linux/errno.h>
10 #include <asm/system.h>
11 #include <asm/armv8/mmu.h>
13 #include <asm/arch/fsl_serdes.h>
14 #include <asm/arch/soc.h>
15 #include <asm/arch/cpu.h>
16 #include <asm/arch/speed.h>
18 #include <asm/arch/mp.h>
20 #include <efi_loader.h>
22 #include <fsl-mc/fsl_mc.h>
23 #ifdef CONFIG_FSL_ESDHC
24 #include <fsl_esdhc.h>
26 #ifdef CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT
27 #include <asm/armv8/sec_firmware.h>
29 #ifdef CONFIG_SYS_FSL_DDR
33 DECLARE_GLOBAL_DATA_PTR
;
35 struct mm_region
*mem_map
= early_map
;
37 void cpu_name(char *name
)
39 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
40 unsigned int i
, svr
, ver
;
42 svr
= gur_in32(&gur
->svr
);
43 ver
= SVR_SOC_VER(svr
);
45 for (i
= 0; i
< ARRAY_SIZE(cpu_type_list
); i
++)
46 if ((cpu_type_list
[i
].soc_ver
& SVR_WO_E
) == ver
) {
47 strcpy(name
, cpu_type_list
[i
].name
);
49 if (IS_E_PROCESSOR(svr
))
52 sprintf(name
+ strlen(name
), " Rev%d.%d",
53 SVR_MAJ(svr
), SVR_MIN(svr
));
57 if (i
== ARRAY_SIZE(cpu_type_list
))
58 strcpy(name
, "unknown");
61 #ifndef CONFIG_SYS_DCACHE_OFF
63 * To start MMU before DDR is available, we create MMU table in SRAM.
64 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
65 * levels of translation tables here to cover 40-bit address space.
66 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
67 * Address above EARLY_PGTABLE_SIZE (0x5000) is free for other purpose.
68 * Note, the debug print in cache_v8.c is not usable for debugging
69 * these early MMU tables because UART is not yet available.
71 static inline void early_mmu_setup(void)
73 unsigned int el
= current_el();
75 /* global data is already setup, no allocation yet */
76 gd
->arch
.tlb_addr
= CONFIG_SYS_FSL_OCRAM_BASE
;
77 gd
->arch
.tlb_fillptr
= gd
->arch
.tlb_addr
;
78 gd
->arch
.tlb_size
= EARLY_PGTABLE_SIZE
;
80 /* Create early page tables */
83 /* point TTBR to the new table */
84 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
,
85 get_tcr(el
, NULL
, NULL
) &
86 ~(TCR_ORGN_MASK
| TCR_IRGN_MASK
),
89 set_sctlr(get_sctlr() | CR_M
);
92 static void fix_pcie_mmu_map(void)
97 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
99 svr
= gur_in32(&gur
->svr
);
100 ver
= SVR_SOC_VER(svr
);
102 /* Fix PCIE base and size for LS2088A */
103 if ((ver
== SVR_LS2088A
) || (ver
== SVR_LS2084A
) ||
104 (ver
== SVR_LS2048A
) || (ver
== SVR_LS2044A
)) {
105 for (i
= 0; i
< ARRAY_SIZE(final_map
); i
++) {
106 switch (final_map
[i
].phys
) {
107 case CONFIG_SYS_PCIE1_PHYS_ADDR
:
108 final_map
[i
].phys
= 0x2000000000ULL
;
109 final_map
[i
].virt
= 0x2000000000ULL
;
110 final_map
[i
].size
= 0x800000000ULL
;
112 case CONFIG_SYS_PCIE2_PHYS_ADDR
:
113 final_map
[i
].phys
= 0x2800000000ULL
;
114 final_map
[i
].virt
= 0x2800000000ULL
;
115 final_map
[i
].size
= 0x800000000ULL
;
117 case CONFIG_SYS_PCIE3_PHYS_ADDR
:
118 final_map
[i
].phys
= 0x3000000000ULL
;
119 final_map
[i
].virt
= 0x3000000000ULL
;
120 final_map
[i
].size
= 0x800000000ULL
;
122 case CONFIG_SYS_PCIE4_PHYS_ADDR
:
123 final_map
[i
].phys
= 0x3800000000ULL
;
124 final_map
[i
].virt
= 0x3800000000ULL
;
125 final_map
[i
].size
= 0x800000000ULL
;
136 * The final tables look similar to early tables, but different in detail.
137 * These tables are in DRAM. Sub tables are added to enable cache for
140 * Put the MMU table in secure memory if gd->arch.secure_ram is valid.
141 * OCRAM will be not used for this purpose so gd->arch.secure_ram can't be 0.
143 static inline void final_mmu_setup(void)
145 u64 tlb_addr_save
= gd
->arch
.tlb_addr
;
146 unsigned int el
= current_el();
149 /* fix the final_map before filling in the block entries */
154 /* Update mapping for DDR to actual size */
155 for (index
= 0; index
< ARRAY_SIZE(final_map
) - 2; index
++) {
157 * Find the entry for DDR mapping and update the address and
158 * size. Zero-sized mapping will be skipped when creating MMU
161 switch (final_map
[index
].virt
) {
162 case CONFIG_SYS_FSL_DRAM_BASE1
:
163 final_map
[index
].virt
= gd
->bd
->bi_dram
[0].start
;
164 final_map
[index
].phys
= gd
->bd
->bi_dram
[0].start
;
165 final_map
[index
].size
= gd
->bd
->bi_dram
[0].size
;
167 #ifdef CONFIG_SYS_FSL_DRAM_BASE2
168 case CONFIG_SYS_FSL_DRAM_BASE2
:
169 #if (CONFIG_NR_DRAM_BANKS >= 2)
170 final_map
[index
].virt
= gd
->bd
->bi_dram
[1].start
;
171 final_map
[index
].phys
= gd
->bd
->bi_dram
[1].start
;
172 final_map
[index
].size
= gd
->bd
->bi_dram
[1].size
;
174 final_map
[index
].size
= 0;
178 #ifdef CONFIG_SYS_FSL_DRAM_BASE3
179 case CONFIG_SYS_FSL_DRAM_BASE3
:
180 #if (CONFIG_NR_DRAM_BANKS >= 3)
181 final_map
[index
].virt
= gd
->bd
->bi_dram
[2].start
;
182 final_map
[index
].phys
= gd
->bd
->bi_dram
[2].start
;
183 final_map
[index
].size
= gd
->bd
->bi_dram
[2].size
;
185 final_map
[index
].size
= 0;
194 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
195 if (gd
->arch
.secure_ram
& MEM_RESERVE_SECURE_MAINTAINED
) {
198 * Only use gd->arch.secure_ram if the address is
199 * recalculated. Align to 4KB for MMU table.
201 /* put page tables in secure ram */
202 index
= ARRAY_SIZE(final_map
) - 2;
203 gd
->arch
.tlb_addr
= gd
->arch
.secure_ram
& ~0xfff;
204 final_map
[index
].virt
= gd
->arch
.secure_ram
& ~0x3;
205 final_map
[index
].phys
= final_map
[index
].virt
;
206 final_map
[index
].size
= CONFIG_SYS_MEM_RESERVE_SECURE
;
207 final_map
[index
].attrs
= PTE_BLOCK_OUTER_SHARE
;
208 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_SECURED
;
209 tlb_addr_save
= gd
->arch
.tlb_addr
;
211 /* Use allocated (board_f.c) memory for TLB */
212 tlb_addr_save
= gd
->arch
.tlb_allocated
;
213 gd
->arch
.tlb_addr
= tlb_addr_save
;
218 /* Reset the fill ptr */
219 gd
->arch
.tlb_fillptr
= tlb_addr_save
;
221 /* Create normal system page tables */
224 /* Create emergency page tables */
225 gd
->arch
.tlb_addr
= gd
->arch
.tlb_fillptr
;
226 gd
->arch
.tlb_emerg
= gd
->arch
.tlb_addr
;
228 gd
->arch
.tlb_addr
= tlb_addr_save
;
230 /* Disable cache and MMU */
231 dcache_disable(); /* TLBs are invalidated */
232 invalidate_icache_all();
234 /* point TTBR to the new table */
235 set_ttbr_tcr_mair(el
, gd
->arch
.tlb_addr
, get_tcr(el
, NULL
, NULL
),
238 set_sctlr(get_sctlr() | CR_M
);
241 u64
get_page_table_size(void)
246 int arch_cpu_init(void)
249 __asm_invalidate_dcache_all();
250 __asm_invalidate_tlb_all();
252 set_sctlr(get_sctlr() | CR_C
);
262 * This function is called from common/board_r.c.
263 * It recreates MMU table in main memory.
265 void enable_caches(void)
268 __asm_invalidate_tlb_all();
274 u32
initiator_type(u32 cluster
, int init_id
)
276 struct ccsr_gur
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
277 u32 idx
= (cluster
>> (init_id
* 8)) & TP_CLUSTER_INIT_MASK
;
280 type
= gur_in32(&gur
->tp_ityp
[idx
]);
281 if (type
& TP_ITYP_AV
)
287 u32
cpu_pos_mask(void)
289 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
291 u32 cluster
, type
, mask
= 0;
296 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
297 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
298 type
= initiator_type(cluster
, j
);
299 if (type
&& (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
))
300 mask
|= 1 << (i
* TP_INIT_PER_CLUSTER
+ j
);
303 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
310 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
311 int i
= 0, count
= 0;
312 u32 cluster
, type
, mask
= 0;
317 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
318 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
319 type
= initiator_type(cluster
, j
);
321 if (TP_ITYP_TYPE(type
) == TP_ITYP_TYPE_ARM
)
327 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
333 * Return the number of cores on this SOC.
335 int cpu_numcores(void)
337 return hweight32(cpu_mask());
340 int fsl_qoriq_core_to_cluster(unsigned int core
)
342 struct ccsr_gur __iomem
*gur
=
343 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
344 int i
= 0, count
= 0;
350 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
351 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
352 if (initiator_type(cluster
, j
)) {
359 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
361 return -1; /* cannot identify the cluster */
364 u32
fsl_qoriq_core_to_type(unsigned int core
)
366 struct ccsr_gur __iomem
*gur
=
367 (void __iomem
*)(CONFIG_SYS_FSL_GUTS_ADDR
);
368 int i
= 0, count
= 0;
374 cluster
= gur_in32(&gur
->tp_cluster
[i
].lower
);
375 for (j
= 0; j
< TP_INIT_PER_CLUSTER
; j
++) {
376 type
= initiator_type(cluster
, j
);
384 } while ((cluster
& TP_CLUSTER_EOC
) == 0x0);
386 return -1; /* cannot identify the cluster */
389 #ifndef CONFIG_FSL_LSCH3
392 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
394 return gur_in32(&gur
->svr
);
398 #ifdef CONFIG_DISPLAY_CPUINFO
399 int print_cpuinfo(void)
401 struct ccsr_gur __iomem
*gur
= (void *)(CONFIG_SYS_FSL_GUTS_ADDR
);
402 struct sys_info sysinfo
;
404 unsigned int i
, core
;
405 u32 type
, rcw
, svr
= gur_in32(&gur
->svr
);
410 printf(" %s (0x%x)\n", buf
, svr
);
411 memset((u8
*)buf
, 0x00, ARRAY_SIZE(buf
));
412 get_sys_info(&sysinfo
);
413 puts("Clock Configuration:");
414 for_each_cpu(i
, core
, cpu_numcores(), cpu_mask()) {
417 type
= TP_ITYP_VER(fsl_qoriq_core_to_type(core
));
418 printf("CPU%d(%s):%-4s MHz ", core
,
419 type
== TY_ITYP_VER_A7
? "A7 " :
420 (type
== TY_ITYP_VER_A53
? "A53" :
421 (type
== TY_ITYP_VER_A57
? "A57" :
422 (type
== TY_ITYP_VER_A72
? "A72" : " "))),
423 strmhz(buf
, sysinfo
.freq_processor
[core
]));
425 /* Display platform clock as Bus frequency. */
426 printf("\n Bus: %-4s MHz ",
427 strmhz(buf
, sysinfo
.freq_systembus
/ CONFIG_SYS_FSL_PCLK_DIV
));
428 printf("DDR: %-4s MT/s", strmhz(buf
, sysinfo
.freq_ddrbus
));
429 #ifdef CONFIG_SYS_DPAA_FMAN
430 printf(" FMAN: %-4s MHz", strmhz(buf
, sysinfo
.freq_fman
[0]));
432 #ifdef CONFIG_SYS_FSL_HAS_DP_DDR
433 if (soc_has_dp_ddr()) {
434 printf(" DP-DDR: %-4s MT/s",
435 strmhz(buf
, sysinfo
.freq_ddrbus2
));
441 * Display the RCW, so that no one gets confused as to what RCW
442 * we're actually using for this boot.
444 puts("Reset Configuration Word (RCW):");
445 for (i
= 0; i
< ARRAY_SIZE(gur
->rcwsr
); i
++) {
446 rcw
= gur_in32(&gur
->rcwsr
[i
]);
448 printf("\n %08x:", i
* 4);
449 printf(" %08x", rcw
);
457 #ifdef CONFIG_FSL_ESDHC
458 int cpu_mmc_init(bd_t
*bis
)
460 return fsl_esdhc_mmc_init(bis
);
464 int cpu_eth_init(bd_t
*bis
)
468 #ifdef CONFIG_FSL_MC_ENET
469 error
= fsl_mc_ldpaa_init(bis
);
471 #ifdef CONFIG_FMAN_ENET
472 fm_standard_init(bis
);
477 int arch_early_init_r(void)
481 u32 psci_ver
= 0xffffffff;
484 #ifdef CONFIG_SYS_FSL_ERRATUM_A009635
487 * erratum A009635 is valid only for LS2080A SoC and
490 svr_dev_id
= get_svr() >> 16;
491 if (svr_dev_id
== SVR_DEV_LS2080A
)
494 #if defined(CONFIG_SYS_FSL_ERRATUM_A009942) && defined(CONFIG_SYS_FSL_DDR)
495 erratum_a009942_check_cpo();
498 #if defined(CONFIG_ARMV8_SEC_FIRMWARE_SUPPORT) && \
499 defined(CONFIG_SEC_FIRMWARE_ARMV8_PSCI)
500 /* Check the psci version to determine if the psci is supported */
501 psci_ver
= sec_firmware_support_psci_version();
503 if (psci_ver
== 0xffffffff) {
504 rv
= fsl_layerscape_wake_seconday_cores();
506 printf("Did not wake secondary cores\n");
510 #ifdef CONFIG_SYS_HAS_SERDES
513 #ifdef CONFIG_FMAN_ENET
521 u32 __iomem
*cntcr
= (u32
*)CONFIG_SYS_FSL_TIMER_ADDR
;
522 #ifdef CONFIG_FSL_LSCH3
523 u32 __iomem
*cltbenr
= (u32
*)CONFIG_SYS_FSL_PMU_CLTBENR
;
525 #ifdef CONFIG_LS2080A
526 u32 __iomem
*pctbenr
= (u32
*)FSL_PMU_PCTBENR_OFFSET
;
529 #ifdef COUNTER_FREQUENCY_REAL
530 unsigned long cntfrq
= COUNTER_FREQUENCY_REAL
;
532 /* Update with accurate clock frequency */
533 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq
) : "memory");
536 #ifdef CONFIG_FSL_LSCH3
537 /* Enable timebase for all clusters.
538 * It is safe to do so even some clusters are not enabled.
540 out_le32(cltbenr
, 0xf);
543 #ifdef CONFIG_LS2080A
545 * In certain Layerscape SoCs, the clock for each core's
546 * has an enable bit in the PMU Physical Core Time Base Enable
547 * Register (PCTBENR), which allows the watchdog to operate.
549 setbits_le32(pctbenr
, 0xff);
551 * For LS2080A SoC and its personalities, timer controller
552 * offset is different
554 svr_dev_id
= get_svr() >> 16;
555 if (svr_dev_id
== SVR_DEV_LS2080A
)
556 cntcr
= (u32
*)SYS_FSL_LS2080A_LS2085A_TIMER_ADDR
;
560 /* Enable clock for timer
561 * This is a global setting.
563 out_le32(cntcr
, 0x1);
568 __efi_runtime_data u32 __iomem
*rstcr
= (u32
*)CONFIG_SYS_FSL_RST_ADDR
;
570 void __efi_runtime
reset_cpu(ulong addr
)
574 /* Raise RESET_REQ_B */
575 val
= scfg_in32(rstcr
);
577 scfg_out32(rstcr
, val
);
580 #ifdef CONFIG_EFI_LOADER
582 void __efi_runtime EFIAPI
efi_reset_system(
583 enum efi_reset_type reset_type
,
584 efi_status_t reset_status
,
585 unsigned long data_size
, void *reset_data
)
587 switch (reset_type
) {
592 case EFI_RESET_SHUTDOWN
:
593 /* Nothing we can do */
600 void efi_reset_system_init(void)
602 efi_add_runtime_mmio(&rstcr
, sizeof(*rstcr
));
607 phys_size_t
board_reserve_ram_top(phys_size_t ram_size
)
609 phys_size_t ram_top
= ram_size
;
611 #ifdef CONFIG_FSL_MC_ENET
612 /* The start address of MC reserved memory needs to be aligned. */
613 ram_top
-= mc_get_dram_block_size();
614 ram_top
&= ~(CONFIG_SYS_MC_RSV_MEM_ALIGN
- 1);
617 return ram_size
- ram_top
;
620 phys_size_t
get_effective_memsize(void)
622 phys_size_t ea_size
, rem
= 0;
625 * For ARMv8 SoCs, DDR memory is split into two or three regions. The
626 * first region is 2GB space at 0x8000_0000. If the memory extends to
627 * the second region (or the third region if applicable), the secure
628 * memory and Management Complex (MC) memory should be put into the
629 * highest region, i.e. the end of DDR memory. CONFIG_MAX_MEM_MAPPED
630 * is set to the size of first region so U-Boot doesn't relocate itself
631 * into higher address. Should DDR be configured to skip the first
632 * region, this function needs to be adjusted.
634 if (gd
->ram_size
> CONFIG_MAX_MEM_MAPPED
) {
635 ea_size
= CONFIG_MAX_MEM_MAPPED
;
636 rem
= gd
->ram_size
- ea_size
;
638 ea_size
= gd
->ram_size
;
641 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
642 /* Check if we have enough space for secure memory */
643 if (rem
> CONFIG_SYS_MEM_RESERVE_SECURE
) {
644 rem
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
646 if (ea_size
> CONFIG_SYS_MEM_RESERVE_SECURE
) {
647 ea_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
648 rem
= 0; /* Presume MC requires more memory */
650 printf("Error: No enough space for secure memory.\n");
654 /* Check if we have enough memory for MC */
655 if (rem
< board_reserve_ram_top(rem
)) {
656 /* Not enough memory in high region to reserve */
657 if (ea_size
> board_reserve_ram_top(rem
))
658 ea_size
-= board_reserve_ram_top(rem
);
660 printf("Error: No enough space for reserved memory.\n");
666 void dram_init_banksize(void)
668 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
669 phys_size_t dp_ddr_size
;
673 * gd->ram_size has the total size of DDR memory, less reserved secure
674 * memory. The DDR extends from low region to high region(s) presuming
675 * no hole is created with DDR configuration. gd->arch.secure_ram tracks
676 * the location of secure memory. gd->arch.resv_ram tracks the location
677 * of reserved memory for Management Complex (MC).
679 gd
->bd
->bi_dram
[0].start
= CONFIG_SYS_SDRAM_BASE
;
680 if (gd
->ram_size
> CONFIG_SYS_DDR_BLOCK1_SIZE
) {
681 gd
->bd
->bi_dram
[0].size
= CONFIG_SYS_DDR_BLOCK1_SIZE
;
682 gd
->bd
->bi_dram
[1].start
= CONFIG_SYS_DDR_BLOCK2_BASE
;
683 gd
->bd
->bi_dram
[1].size
= gd
->ram_size
-
684 CONFIG_SYS_DDR_BLOCK1_SIZE
;
685 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
686 if (gd
->bi_dram
[1].size
> CONFIG_SYS_DDR_BLOCK2_SIZE
) {
687 gd
->bd
->bi_dram
[2].start
= CONFIG_SYS_DDR_BLOCK3_BASE
;
688 gd
->bd
->bi_dram
[2].size
= gd
->bd
->bi_dram
[1].size
-
689 CONFIG_SYS_DDR_BLOCK2_SIZE
;
690 gd
->bd
->bi_dram
[1].size
= CONFIG_SYS_DDR_BLOCK2_SIZE
;
694 gd
->bd
->bi_dram
[0].size
= gd
->ram_size
;
696 #ifdef CONFIG_SYS_MEM_RESERVE_SECURE
697 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
698 if (gd
->bd
->bi_dram
[2].size
>= CONFIG_SYS_MEM_RESERVE_SECURE
) {
699 gd
->bd
->bi_dram
[2].size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
700 gd
->arch
.secure_ram
= gd
->bd
->bi_dram
[2].start
+
701 gd
->bd
->bi_dram
[2].size
;
702 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_MAINTAINED
;
703 gd
->ram_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
707 if (gd
->bd
->bi_dram
[1].size
>= CONFIG_SYS_MEM_RESERVE_SECURE
) {
708 gd
->bd
->bi_dram
[1].size
-=
709 CONFIG_SYS_MEM_RESERVE_SECURE
;
710 gd
->arch
.secure_ram
= gd
->bd
->bi_dram
[1].start
+
711 gd
->bd
->bi_dram
[1].size
;
712 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_MAINTAINED
;
713 gd
->ram_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
714 } else if (gd
->bd
->bi_dram
[0].size
>
715 CONFIG_SYS_MEM_RESERVE_SECURE
) {
716 gd
->bd
->bi_dram
[0].size
-=
717 CONFIG_SYS_MEM_RESERVE_SECURE
;
718 gd
->arch
.secure_ram
= gd
->bd
->bi_dram
[0].start
+
719 gd
->bd
->bi_dram
[0].size
;
720 gd
->arch
.secure_ram
|= MEM_RESERVE_SECURE_MAINTAINED
;
721 gd
->ram_size
-= CONFIG_SYS_MEM_RESERVE_SECURE
;
724 #endif /* CONFIG_SYS_MEM_RESERVE_SECURE */
726 #ifdef CONFIG_FSL_MC_ENET
727 /* Assign memory for MC */
728 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
729 if (gd
->bd
->bi_dram
[2].size
>=
730 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
)) {
731 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[2].start
+
732 gd
->bd
->bi_dram
[2].size
-
733 board_reserve_ram_top(gd
->bd
->bi_dram
[2].size
);
737 if (gd
->bd
->bi_dram
[1].size
>=
738 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
)) {
739 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[1].start
+
740 gd
->bd
->bi_dram
[1].size
-
741 board_reserve_ram_top(gd
->bd
->bi_dram
[1].size
);
742 } else if (gd
->bd
->bi_dram
[0].size
>
743 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
)) {
744 gd
->arch
.resv_ram
= gd
->bd
->bi_dram
[0].start
+
745 gd
->bd
->bi_dram
[0].size
-
746 board_reserve_ram_top(gd
->bd
->bi_dram
[0].size
);
749 #endif /* CONFIG_FSL_MC_ENET */
751 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
752 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
753 #error "This SoC shouldn't have DP DDR"
755 if (soc_has_dp_ddr()) {
756 /* initialize DP-DDR here */
759 * DDR controller use 0 as the base address for binding.
760 * It is mapped to CONFIG_SYS_DP_DDR_BASE for core to access.
762 dp_ddr_size
= fsl_other_ddr_sdram(CONFIG_SYS_DP_DDR_BASE_PHY
,
764 CONFIG_DP_DDR_NUM_CTRLS
,
765 CONFIG_DP_DDR_DIMM_SLOTS_PER_CTLR
,
768 gd
->bd
->bi_dram
[2].start
= CONFIG_SYS_DP_DDR_BASE
;
769 gd
->bd
->bi_dram
[2].size
= dp_ddr_size
;
771 puts("Not detected");
777 #if defined(CONFIG_EFI_LOADER) && !defined(CONFIG_SPL_BUILD)
778 void efi_add_known_memory(void)
781 phys_addr_t ram_start
, start
;
782 phys_size_t ram_size
;
786 for (i
= 0; i
< CONFIG_NR_DRAM_BANKS
; i
++) {
787 #ifdef CONFIG_SYS_DP_DDR_BASE_PHY
788 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
789 #error "This SoC shouldn't have DP DDR"
792 continue; /* skip DP-DDR */
794 ram_start
= gd
->bd
->bi_dram
[i
].start
;
795 ram_size
= gd
->bd
->bi_dram
[i
].size
;
796 #ifdef CONFIG_RESV_RAM
797 if (gd
->arch
.resv_ram
>= ram_start
&&
798 gd
->arch
.resv_ram
< ram_start
+ ram_size
)
799 ram_size
= gd
->arch
.resv_ram
- ram_start
;
801 start
= (ram_start
+ EFI_PAGE_MASK
) & ~EFI_PAGE_MASK
;
802 pages
= (ram_size
+ EFI_PAGE_MASK
) >> EFI_PAGE_SHIFT
;
804 efi_add_memory_map(start
, pages
, EFI_CONVENTIONAL_MEMORY
,
811 * Before DDR size is known, early MMU table have DDR mapped as device memory
812 * to avoid speculative access. To relocate U-Boot to DDR, "normal memory"
813 * needs to be set for these mappings.
814 * If a special case configures DDR with holes in the mapping, the holes need
815 * to be marked as invalid. This is not implemented in this function.
817 void update_early_mmu_table(void)
819 if (!gd
->arch
.tlb_addr
)
822 if (gd
->ram_size
<= CONFIG_SYS_FSL_DRAM_SIZE1
) {
823 mmu_change_region_attr(
824 CONFIG_SYS_SDRAM_BASE
,
826 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
827 PTE_BLOCK_OUTER_SHARE
|
831 mmu_change_region_attr(
832 CONFIG_SYS_SDRAM_BASE
,
833 CONFIG_SYS_DDR_BLOCK1_SIZE
,
834 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
835 PTE_BLOCK_OUTER_SHARE
|
838 #ifdef CONFIG_SYS_DDR_BLOCK3_BASE
839 #ifndef CONFIG_SYS_DDR_BLOCK2_SIZE
840 #error "Missing CONFIG_SYS_DDR_BLOCK2_SIZE"
842 if (gd
->ram_size
- CONFIG_SYS_DDR_BLOCK1_SIZE
>
843 CONFIG_SYS_DDR_BLOCK2_SIZE
) {
844 mmu_change_region_attr(
845 CONFIG_SYS_DDR_BLOCK2_BASE
,
846 CONFIG_SYS_DDR_BLOCK2_SIZE
,
847 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
848 PTE_BLOCK_OUTER_SHARE
|
851 mmu_change_region_attr(
852 CONFIG_SYS_DDR_BLOCK3_BASE
,
854 CONFIG_SYS_DDR_BLOCK1_SIZE
-
855 CONFIG_SYS_DDR_BLOCK2_SIZE
,
856 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
857 PTE_BLOCK_OUTER_SHARE
|
863 mmu_change_region_attr(
864 CONFIG_SYS_DDR_BLOCK2_BASE
,
866 CONFIG_SYS_DDR_BLOCK1_SIZE
,
867 PTE_BLOCK_MEMTYPE(MT_NORMAL
) |
868 PTE_BLOCK_OUTER_SHARE
|
875 __weak
int dram_init(void)
877 gd
->ram_size
= initdram();
878 #if !defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD)
879 /* This will break-before-make MMU for DDR */
880 update_early_mmu_table();