1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2012-2015 Panasonic Corporation
4 * Copyright (C) 2015-2017 Socionext Inc.
5 * Author: Masahiro Yamada <yamada.masahiro@socionext.com>
9 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/printk.h>
13 #include <linux/sizes.h>
14 #include <asm/global_data.h>
15 #include <asm/u-boot.h>
21 DECLARE_GLOBAL_DATA_PTR
;
23 struct uniphier_dram_map
{
28 static int uniphier_memconf_decode(struct uniphier_dram_map
*dram_map
,
29 unsigned long sparse_ch1_base
, bool have_ch2
)
34 val
= readl(sg_base
+ SG_MEMCONF
);
37 dram_map
[0].base
= 0x80000000;
39 switch (val
& SG_MEMCONF_CH0_SZ_MASK
) {
40 case SG_MEMCONF_CH0_SZ_64M
:
43 case SG_MEMCONF_CH0_SZ_128M
:
46 case SG_MEMCONF_CH0_SZ_256M
:
49 case SG_MEMCONF_CH0_SZ_512M
:
52 case SG_MEMCONF_CH0_SZ_1G
:
56 pr_err("error: invalid value is set to MEMCONF ch0 size\n");
60 if ((val
& SG_MEMCONF_CH0_NUM_MASK
) == SG_MEMCONF_CH0_NUM_2
)
63 dram_map
[0].size
= size
;
66 dram_map
[1].base
= dram_map
[0].base
+ size
;
68 if (val
& SG_MEMCONF_SPARSEMEM
) {
69 if (dram_map
[1].base
> sparse_ch1_base
) {
70 pr_warn("Sparse mem is enabled, but ch0 and ch1 overlap\n");
71 pr_warn("Only ch0 is available\n");
76 dram_map
[1].base
= sparse_ch1_base
;
79 switch (val
& SG_MEMCONF_CH1_SZ_MASK
) {
80 case SG_MEMCONF_CH1_SZ_64M
:
83 case SG_MEMCONF_CH1_SZ_128M
:
86 case SG_MEMCONF_CH1_SZ_256M
:
89 case SG_MEMCONF_CH1_SZ_512M
:
92 case SG_MEMCONF_CH1_SZ_1G
:
96 pr_err("error: invalid value is set to MEMCONF ch1 size\n");
100 if ((val
& SG_MEMCONF_CH1_NUM_MASK
) == SG_MEMCONF_CH1_NUM_2
)
103 dram_map
[1].size
= size
;
105 if (!have_ch2
|| val
& SG_MEMCONF_CH2_DISABLE
)
109 dram_map
[2].base
= dram_map
[1].base
+ size
;
111 switch (val
& SG_MEMCONF_CH2_SZ_MASK
) {
112 case SG_MEMCONF_CH2_SZ_64M
:
115 case SG_MEMCONF_CH2_SZ_128M
:
118 case SG_MEMCONF_CH2_SZ_256M
:
121 case SG_MEMCONF_CH2_SZ_512M
:
124 case SG_MEMCONF_CH2_SZ_1G
:
128 pr_err("error: invalid value is set to MEMCONF ch2 size\n");
132 if ((val
& SG_MEMCONF_CH2_NUM_MASK
) == SG_MEMCONF_CH2_NUM_2
)
135 dram_map
[2].size
= size
;
140 static int uniphier_ld4_dram_map_get(struct uniphier_dram_map dram_map
[])
142 return uniphier_memconf_decode(dram_map
, 0xc0000000, false);
145 static int uniphier_pro4_dram_map_get(struct uniphier_dram_map dram_map
[])
147 return uniphier_memconf_decode(dram_map
, 0xa0000000, false);
150 static int uniphier_pxs2_dram_map_get(struct uniphier_dram_map dram_map
[])
152 return uniphier_memconf_decode(dram_map
, 0xc0000000, true);
155 struct uniphier_dram_init_data
{
157 int (*dram_map_get
)(struct uniphier_dram_map dram_map
[]);
160 static const struct uniphier_dram_init_data uniphier_dram_init_data
[] = {
162 .soc_id
= UNIPHIER_LD4_ID
,
163 .dram_map_get
= uniphier_ld4_dram_map_get
,
166 .soc_id
= UNIPHIER_PRO4_ID
,
167 .dram_map_get
= uniphier_pro4_dram_map_get
,
170 .soc_id
= UNIPHIER_SLD8_ID
,
171 .dram_map_get
= uniphier_ld4_dram_map_get
,
174 .soc_id
= UNIPHIER_PRO5_ID
,
175 .dram_map_get
= uniphier_ld4_dram_map_get
,
178 .soc_id
= UNIPHIER_PXS2_ID
,
179 .dram_map_get
= uniphier_pxs2_dram_map_get
,
182 .soc_id
= UNIPHIER_LD6B_ID
,
183 .dram_map_get
= uniphier_pxs2_dram_map_get
,
186 .soc_id
= UNIPHIER_LD11_ID
,
187 .dram_map_get
= uniphier_ld4_dram_map_get
,
190 .soc_id
= UNIPHIER_LD20_ID
,
191 .dram_map_get
= uniphier_pxs2_dram_map_get
,
194 .soc_id
= UNIPHIER_PXS3_ID
,
195 .dram_map_get
= uniphier_pxs2_dram_map_get
,
198 UNIPHIER_DEFINE_SOCDATA_FUNC(uniphier_get_dram_init_data
,
199 uniphier_dram_init_data
)
201 static int uniphier_dram_map_get(struct uniphier_dram_map
*dram_map
)
203 const struct uniphier_dram_init_data
*data
;
205 data
= uniphier_get_dram_init_data();
207 pr_err("unsupported SoC\n");
211 return data
->dram_map_get(dram_map
);
216 struct uniphier_dram_map dram_map
[3] = {};
217 bool valid_bank_found
= false;
218 unsigned long prev_top
;
223 ret
= uniphier_dram_map_get(dram_map
);
227 for (i
= 0; i
< ARRAY_SIZE(dram_map
); i
++) {
228 unsigned long max_size
;
230 if (!dram_map
[i
].size
)
234 * U-Boot relocates itself to the tail of the memory region,
235 * but it does not expect sparse memory. We use the first
236 * contiguous chunk here.
238 if (valid_bank_found
&& prev_top
< dram_map
[i
].base
)
242 * Do not use memory that exceeds 32bit address range. U-Boot
243 * relocates itself to the end of the effectively available RAM.
244 * This could be a problem for DMA engines that do not support
245 * 64bit address (SDMA of SDHCI, UniPhier AV-ether, etc.)
247 if (dram_map
[i
].base
>= 1ULL << 32)
250 max_size
= (1ULL << 32) - dram_map
[i
].base
;
252 gd
->ram_size
= min(dram_map
[i
].size
, max_size
);
254 if (!valid_bank_found
)
255 gd
->ram_base
= dram_map
[i
].base
;
257 prev_top
= dram_map
[i
].base
+ dram_map
[i
].size
;
258 valid_bank_found
= true;
262 * LD20 uses the last 64 byte for each channel for dynamic
265 if (uniphier_get_soc_id() == UNIPHIER_LD20_ID
)
271 int dram_init_banksize(void)
273 struct uniphier_dram_map dram_map
[3] = {};
274 unsigned long base
, top
;
275 bool valid_bank_found
= false;
278 ret
= uniphier_dram_map_get(dram_map
);
282 for (i
= 0; i
< ARRAY_SIZE(dram_map
); i
++) {
283 if (i
< ARRAY_SIZE(gd
->bd
->bi_dram
)) {
284 gd
->bd
->bi_dram
[i
].start
= dram_map
[i
].base
;
285 gd
->bd
->bi_dram
[i
].size
= dram_map
[i
].size
;
288 if (!dram_map
[i
].size
)
291 if (!valid_bank_found
)
292 base
= dram_map
[i
].base
;
293 top
= dram_map
[i
].base
+ dram_map
[i
].size
;
294 valid_bank_found
= true;
297 if (!valid_bank_found
)
300 /* map all the DRAM regions */
301 uniphier_mem_map_init(base
, top
- base
);