]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright 2014 Freescale Semiconductor, Inc. | |
3 | * | |
4 | * SPDX-License-Identifier: GPL-2.0+ | |
5 | */ | |
6 | ||
7 | #include <common.h> | |
8 | #include <asm/io.h> | |
9 | #include <asm/system.h> | |
10 | #include <asm/armv8/mmu.h> | |
11 | #include <asm/io.h> | |
12 | #include <asm/arch-fsl-lsch3/immap_lsch3.h> | |
13 | #include <fsl_debug_server.h> | |
14 | #include <fsl-mc/fsl_mc.h> | |
15 | #include <asm/arch/fsl_serdes.h> | |
16 | #include "cpu.h" | |
17 | #include "mp.h" | |
18 | #include "speed.h" | |
19 | ||
20 | DECLARE_GLOBAL_DATA_PTR; | |
21 | ||
22 | #ifndef CONFIG_SYS_DCACHE_OFF | |
23 | /* | |
24 | * To start MMU before DDR is available, we create MMU table in SRAM. | |
25 | * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three | |
26 | * levels of translation tables here to cover 40-bit address space. | |
27 | * We use 4KB granule size, with 40 bits physical address, T0SZ=24 | |
28 | * Level 0 IA[39], table address @0 | |
29 | * Level 1 IA[31:30], table address @0x1000, 0x2000 | |
30 | * Level 2 IA[29:21], table address @0x3000, 0x4000 | |
31 | * Address above 0x5000 is free for other purpose. | |
32 | */ | |
33 | ||
34 | #define SECTION_SHIFT_L0 39UL | |
35 | #define SECTION_SHIFT_L1 30UL | |
36 | #define SECTION_SHIFT_L2 21UL | |
37 | #define BLOCK_SIZE_L0 0x8000000000UL | |
38 | #define BLOCK_SIZE_L1 (1 << SECTION_SHIFT_L1) | |
39 | #define BLOCK_SIZE_L2 (1 << SECTION_SHIFT_L2) | |
40 | #define CONFIG_SYS_IFC_BASE 0x30000000 | |
41 | #define CONFIG_SYS_IFC_SIZE 0x10000000 | |
42 | #define CONFIG_SYS_IFC_BASE2 0x500000000 | |
43 | #define CONFIG_SYS_IFC_SIZE2 0x100000000 | |
44 | #define TCR_EL2_PS_40BIT (2 << 16) | |
45 | #define LSCH3_VA_BITS (40) | |
46 | #define LSCH3_TCR (TCR_TG0_4K | \ | |
47 | TCR_EL2_PS_40BIT | \ | |
48 | TCR_SHARED_NON | \ | |
49 | TCR_ORGN_NC | \ | |
50 | TCR_IRGN_NC | \ | |
51 | TCR_T0SZ(LSCH3_VA_BITS)) | |
52 | ||
53 | /* | |
54 | * Final MMU | |
55 | * Let's start from the same layout as early MMU and modify as needed. | |
56 | * IFC regions will be cache-inhibit. | |
57 | */ | |
58 | #define FINAL_QBMAN_CACHED_MEM 0x818000000UL | |
59 | #define FINAL_QBMAN_CACHED_SIZE 0x4000000 | |
60 | ||
61 | ||
62 | static inline void early_mmu_setup(void) | |
63 | { | |
64 | int el; | |
65 | u64 i; | |
66 | u64 section_l1t0, section_l1t1, section_l2t0, section_l2t1; | |
67 | u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE; | |
68 | u64 *level1_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000); | |
69 | u64 *level1_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000); | |
70 | u64 *level2_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000); | |
71 | u64 *level2_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x4000); | |
72 | ||
73 | level0_table[0] = | |
74 | (u64)level1_table_0 | PMD_TYPE_TABLE; | |
75 | level0_table[1] = | |
76 | (u64)level1_table_1 | PMD_TYPE_TABLE; | |
77 | ||
78 | /* | |
79 | * set level 1 table 0 to cache_inhibit, covering 0 to 512GB | |
80 | * set level 1 table 1 to cache enabled, covering 512GB to 1TB | |
81 | * set level 2 table to cache-inhibit, covering 0 to 1GB | |
82 | */ | |
83 | section_l1t0 = 0; | |
84 | section_l1t1 = BLOCK_SIZE_L0; | |
85 | section_l2t0 = 0; | |
86 | section_l2t1 = CONFIG_SYS_FLASH_BASE; | |
87 | for (i = 0; i < 512; i++) { | |
88 | set_pgtable_section(level1_table_0, i, section_l1t0, | |
89 | MT_DEVICE_NGNRNE); | |
90 | set_pgtable_section(level1_table_1, i, section_l1t1, | |
91 | MT_NORMAL); | |
92 | set_pgtable_section(level2_table_0, i, section_l2t0, | |
93 | MT_DEVICE_NGNRNE); | |
94 | set_pgtable_section(level2_table_1, i, section_l2t1, | |
95 | MT_DEVICE_NGNRNE); | |
96 | section_l1t0 += BLOCK_SIZE_L1; | |
97 | section_l1t1 += BLOCK_SIZE_L1; | |
98 | section_l2t0 += BLOCK_SIZE_L2; | |
99 | section_l2t1 += BLOCK_SIZE_L2; | |
100 | } | |
101 | ||
102 | level1_table_0[0] = | |
103 | (u64)level2_table_0 | PMD_TYPE_TABLE; | |
104 | level1_table_0[1] = | |
105 | 0x40000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
106 | PMD_ATTRINDX(MT_DEVICE_NGNRNE); | |
107 | level1_table_0[2] = | |
108 | 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
109 | PMD_ATTRINDX(MT_NORMAL); | |
110 | level1_table_0[3] = | |
111 | 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
112 | PMD_ATTRINDX(MT_NORMAL); | |
113 | ||
114 | /* Rewerite table to enable cache for OCRAM */ | |
115 | set_pgtable_section(level2_table_0, | |
116 | CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2, | |
117 | CONFIG_SYS_FSL_OCRAM_BASE, | |
118 | MT_NORMAL); | |
119 | ||
120 | #if defined(CONFIG_SYS_NOR0_CSPR_EARLY) && defined(CONFIG_SYS_NOR_AMASK_EARLY) | |
121 | /* Rewrite table to enable cache for two entries (4MB) */ | |
122 | section_l2t1 = CONFIG_SYS_IFC_BASE; | |
123 | set_pgtable_section(level2_table_0, | |
124 | section_l2t1 >> SECTION_SHIFT_L2, | |
125 | section_l2t1, | |
126 | MT_NORMAL); | |
127 | section_l2t1 += BLOCK_SIZE_L2; | |
128 | set_pgtable_section(level2_table_0, | |
129 | section_l2t1 >> SECTION_SHIFT_L2, | |
130 | section_l2t1, | |
131 | MT_NORMAL); | |
132 | #endif | |
133 | ||
134 | /* Create a mapping for 256MB IFC region to final flash location */ | |
135 | level1_table_0[CONFIG_SYS_FLASH_BASE >> SECTION_SHIFT_L1] = | |
136 | (u64)level2_table_1 | PMD_TYPE_TABLE; | |
137 | section_l2t1 = CONFIG_SYS_IFC_BASE; | |
138 | for (i = 0; i < 0x10000000 >> SECTION_SHIFT_L2; i++) { | |
139 | set_pgtable_section(level2_table_1, i, | |
140 | section_l2t1, MT_DEVICE_NGNRNE); | |
141 | section_l2t1 += BLOCK_SIZE_L2; | |
142 | } | |
143 | ||
144 | el = current_el(); | |
145 | set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR, MEMORY_ATTRIBUTES); | |
146 | set_sctlr(get_sctlr() | CR_M); | |
147 | } | |
148 | ||
149 | /* | |
150 | * This final tale looks similar to early table, but different in detail. | |
151 | * These tables are in regular memory. Cache on IFC is disabled. One sub table | |
152 | * is added to enable cache for QBMan. | |
153 | */ | |
154 | static inline void final_mmu_setup(void) | |
155 | { | |
156 | int el; | |
157 | u64 i, tbl_base, tbl_limit, section_base; | |
158 | u64 section_l1t0, section_l1t1, section_l2; | |
159 | u64 *level0_table = (u64 *)gd->arch.tlb_addr; | |
160 | u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + 0x1000); | |
161 | u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + 0x2000); | |
162 | u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + 0x3000); | |
163 | u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + 0x4000); | |
164 | ||
165 | ||
166 | level0_table[0] = | |
167 | (u64)level1_table_0 | PMD_TYPE_TABLE; | |
168 | level0_table[1] = | |
169 | (u64)level1_table_1 | PMD_TYPE_TABLE; | |
170 | ||
171 | /* | |
172 | * set level 1 table 0 to cache_inhibit, covering 0 to 512GB | |
173 | * set level 1 table 1 to cache enabled, covering 512GB to 1TB | |
174 | * set level 2 table 0 to cache-inhibit, covering 0 to 1GB | |
175 | */ | |
176 | section_l1t0 = 0; | |
177 | section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE; | |
178 | section_l2 = 0; | |
179 | for (i = 0; i < 512; i++) { | |
180 | set_pgtable_section(level1_table_0, i, section_l1t0, | |
181 | MT_DEVICE_NGNRNE); | |
182 | set_pgtable_section(level1_table_1, i, section_l1t1, | |
183 | MT_NORMAL); | |
184 | set_pgtable_section(level2_table_0, i, section_l2, | |
185 | MT_DEVICE_NGNRNE); | |
186 | section_l1t0 += BLOCK_SIZE_L1; | |
187 | section_l1t1 += BLOCK_SIZE_L1; | |
188 | section_l2 += BLOCK_SIZE_L2; | |
189 | } | |
190 | ||
191 | level1_table_0[0] = | |
192 | (u64)level2_table_0 | PMD_TYPE_TABLE; | |
193 | level1_table_0[2] = | |
194 | 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
195 | PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL); | |
196 | level1_table_0[3] = | |
197 | 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT | | |
198 | PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL); | |
199 | ||
200 | /* Rewrite table to enable cache */ | |
201 | set_pgtable_section(level2_table_0, | |
202 | CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2, | |
203 | CONFIG_SYS_FSL_OCRAM_BASE, | |
204 | MT_NORMAL); | |
205 | ||
206 | /* | |
207 | * Fill in other part of tables if cache is needed | |
208 | * If finer granularity than 1GB is needed, sub table | |
209 | * should be created. | |
210 | */ | |
211 | section_base = FINAL_QBMAN_CACHED_MEM & ~(BLOCK_SIZE_L1 - 1); | |
212 | i = section_base >> SECTION_SHIFT_L1; | |
213 | level1_table_0[i] = (u64)level2_table_1 | PMD_TYPE_TABLE; | |
214 | section_l2 = section_base; | |
215 | for (i = 0; i < 512; i++) { | |
216 | set_pgtable_section(level2_table_1, i, section_l2, | |
217 | MT_DEVICE_NGNRNE); | |
218 | section_l2 += BLOCK_SIZE_L2; | |
219 | } | |
220 | tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1); | |
221 | tbl_limit = (FINAL_QBMAN_CACHED_MEM + FINAL_QBMAN_CACHED_SIZE) & | |
222 | (BLOCK_SIZE_L1 - 1); | |
223 | for (i = tbl_base >> SECTION_SHIFT_L2; | |
224 | i < tbl_limit >> SECTION_SHIFT_L2; i++) { | |
225 | section_l2 = section_base + (i << SECTION_SHIFT_L2); | |
226 | set_pgtable_section(level2_table_1, i, | |
227 | section_l2, MT_NORMAL); | |
228 | } | |
229 | ||
230 | /* flush new MMU table */ | |
231 | flush_dcache_range(gd->arch.tlb_addr, | |
232 | gd->arch.tlb_addr + gd->arch.tlb_size); | |
233 | ||
234 | /* point TTBR to the new table */ | |
235 | el = current_el(); | |
236 | asm volatile("dsb sy"); | |
237 | if (el == 1) { | |
238 | asm volatile("msr ttbr0_el1, %0" | |
239 | : : "r" ((u64)level0_table) : "memory"); | |
240 | } else if (el == 2) { | |
241 | asm volatile("msr ttbr0_el2, %0" | |
242 | : : "r" ((u64)level0_table) : "memory"); | |
243 | } else if (el == 3) { | |
244 | asm volatile("msr ttbr0_el3, %0" | |
245 | : : "r" ((u64)level0_table) : "memory"); | |
246 | } else { | |
247 | hang(); | |
248 | } | |
249 | asm volatile("isb"); | |
250 | ||
251 | /* | |
252 | * MMU is already enabled, just need to invalidate TLB to load the | |
253 | * new table. The new table is compatible with the current table, if | |
254 | * MMU somehow walks through the new table before invalidation TLB, | |
255 | * it still works. So we don't need to turn off MMU here. | |
256 | */ | |
257 | } | |
258 | ||
259 | int arch_cpu_init(void) | |
260 | { | |
261 | icache_enable(); | |
262 | __asm_invalidate_dcache_all(); | |
263 | __asm_invalidate_tlb_all(); | |
264 | early_mmu_setup(); | |
265 | set_sctlr(get_sctlr() | CR_C); | |
266 | return 0; | |
267 | } | |
268 | ||
269 | /* | |
270 | * This function is called from lib/board.c. | |
271 | * It recreates MMU table in main memory. MMU and d-cache are enabled earlier. | |
272 | * There is no need to disable d-cache for this operation. | |
273 | */ | |
274 | void enable_caches(void) | |
275 | { | |
276 | final_mmu_setup(); | |
277 | __asm_invalidate_tlb_all(); | |
278 | } | |
279 | #endif | |
280 | ||
281 | static inline u32 initiator_type(u32 cluster, int init_id) | |
282 | { | |
283 | struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
284 | u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK; | |
285 | u32 type = in_le32(&gur->tp_ityp[idx]); | |
286 | ||
287 | if (type & TP_ITYP_AV) | |
288 | return type; | |
289 | ||
290 | return 0; | |
291 | } | |
292 | ||
293 | u32 cpu_mask(void) | |
294 | { | |
295 | struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
296 | int i = 0, count = 0; | |
297 | u32 cluster, type, mask = 0; | |
298 | ||
299 | do { | |
300 | int j; | |
301 | cluster = in_le32(&gur->tp_cluster[i].lower); | |
302 | for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { | |
303 | type = initiator_type(cluster, j); | |
304 | if (type) { | |
305 | if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM) | |
306 | mask |= 1 << count; | |
307 | count++; | |
308 | } | |
309 | } | |
310 | i++; | |
311 | } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); | |
312 | ||
313 | return mask; | |
314 | } | |
315 | ||
316 | /* | |
317 | * Return the number of cores on this SOC. | |
318 | */ | |
319 | int cpu_numcores(void) | |
320 | { | |
321 | return hweight32(cpu_mask()); | |
322 | } | |
323 | ||
324 | int fsl_qoriq_core_to_cluster(unsigned int core) | |
325 | { | |
326 | struct ccsr_gur __iomem *gur = | |
327 | (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
328 | int i = 0, count = 0; | |
329 | u32 cluster; | |
330 | ||
331 | do { | |
332 | int j; | |
333 | cluster = in_le32(&gur->tp_cluster[i].lower); | |
334 | for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { | |
335 | if (initiator_type(cluster, j)) { | |
336 | if (count == core) | |
337 | return i; | |
338 | count++; | |
339 | } | |
340 | } | |
341 | i++; | |
342 | } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); | |
343 | ||
344 | return -1; /* cannot identify the cluster */ | |
345 | } | |
346 | ||
347 | u32 fsl_qoriq_core_to_type(unsigned int core) | |
348 | { | |
349 | struct ccsr_gur __iomem *gur = | |
350 | (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR); | |
351 | int i = 0, count = 0; | |
352 | u32 cluster, type; | |
353 | ||
354 | do { | |
355 | int j; | |
356 | cluster = in_le32(&gur->tp_cluster[i].lower); | |
357 | for (j = 0; j < TP_INIT_PER_CLUSTER; j++) { | |
358 | type = initiator_type(cluster, j); | |
359 | if (type) { | |
360 | if (count == core) | |
361 | return type; | |
362 | count++; | |
363 | } | |
364 | } | |
365 | i++; | |
366 | } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC); | |
367 | ||
368 | return -1; /* cannot identify the cluster */ | |
369 | } | |
370 | ||
371 | #ifdef CONFIG_DISPLAY_CPUINFO | |
372 | int print_cpuinfo(void) | |
373 | { | |
374 | struct sys_info sysinfo; | |
375 | char buf[32]; | |
376 | unsigned int i, core; | |
377 | u32 type; | |
378 | ||
379 | get_sys_info(&sysinfo); | |
380 | puts("Clock Configuration:"); | |
381 | for_each_cpu(i, core, cpu_numcores(), cpu_mask()) { | |
382 | if (!(i % 3)) | |
383 | puts("\n "); | |
384 | type = TP_ITYP_VER(fsl_qoriq_core_to_type(core)); | |
385 | printf("CPU%d(%s):%-4s MHz ", core, | |
386 | type == TY_ITYP_VER_A7 ? "A7 " : | |
387 | (type == TY_ITYP_VER_A53 ? "A53" : | |
388 | (type == TY_ITYP_VER_A57 ? "A57" : " ")), | |
389 | strmhz(buf, sysinfo.freq_processor[core])); | |
390 | } | |
391 | printf("\n Bus: %-4s MHz ", | |
392 | strmhz(buf, sysinfo.freq_systembus)); | |
393 | printf("DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus)); | |
394 | printf(" DP-DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus2)); | |
395 | puts("\n"); | |
396 | ||
397 | return 0; | |
398 | } | |
399 | #endif | |
400 | ||
401 | int cpu_eth_init(bd_t *bis) | |
402 | { | |
403 | int error = 0; | |
404 | ||
405 | #ifdef CONFIG_FSL_MC_ENET | |
406 | error = fsl_mc_ldpaa_init(bis); | |
407 | #endif | |
408 | return error; | |
409 | } | |
410 | ||
411 | int arch_early_init_r(void) | |
412 | { | |
413 | int rv; | |
414 | rv = fsl_lsch3_wake_seconday_cores(); | |
415 | ||
416 | if (rv) | |
417 | printf("Did not wake secondary cores\n"); | |
418 | ||
419 | #ifdef CONFIG_SYS_HAS_SERDES | |
420 | fsl_serdes_init(); | |
421 | #endif | |
422 | return 0; | |
423 | } | |
424 | ||
425 | int timer_init(void) | |
426 | { | |
427 | u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR; | |
428 | u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR; | |
429 | #ifdef COUNTER_FREQUENCY_REAL | |
430 | unsigned long cntfrq = COUNTER_FREQUENCY_REAL; | |
431 | ||
432 | /* Update with accurate clock frequency */ | |
433 | asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory"); | |
434 | #endif | |
435 | ||
436 | /* Enable timebase for all clusters. | |
437 | * It is safe to do so even some clusters are not enabled. | |
438 | */ | |
439 | out_le32(cltbenr, 0xf); | |
440 | ||
441 | /* Enable clock for timer | |
442 | * This is a global setting. | |
443 | */ | |
444 | out_le32(cntcr, 0x1); | |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
449 | void reset_cpu(ulong addr) | |
450 | { | |
451 | u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR; | |
452 | u32 val; | |
453 | ||
454 | /* Raise RESET_REQ_B */ | |
455 | val = in_le32(rstcr); | |
456 | val |= 0x02; | |
457 | out_le32(rstcr, val); | |
458 | } |