]> git.ipfire.org Git - people/ms/u-boot.git/blame - arch/arm/cpu/armv8/fsl-layerscape/lowlevel.S
armv8: fsl-lsch3: Make CCN-504 related code conditional
[people/ms/u-boot.git] / arch / arm / cpu / armv8 / fsl-layerscape / lowlevel.S
CommitLineData
2f78eae5 1/*
9f3183d2 2 * (C) Copyright 2014-2015 Freescale Semiconductor
2f78eae5
YS
3 *
4 * SPDX-License-Identifier: GPL-2.0+
5 *
6 * Extracted from armv8/start.S
7 */
8
9#include <config.h>
10#include <linux/linkage.h>
40f8dec5 11#include <asm/gic.h>
2f78eae5 12#include <asm/macro.h>
fa18ed76 13#include <asm/arch-fsl-layerscape/soc.h>
9f3183d2
MH
14#ifdef CONFIG_MP
15#include <asm/arch/mp.h>
16#endif
f6a70b3a
PJ
17#ifdef CONFIG_FSL_LSCH3
18#include <asm/arch-fsl-layerscape/immap_lsch3.h>
19#endif
ec6617c3 20#include <asm/u-boot.h>
2f78eae5 21
fa18ed76
WS
22/* Get GIC offset
23* For LS1043a rev1.0, GIC base address align with 4k.
24* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
25* is set, GIC base address align with 4K, or else align
26* with 64k.
27* output:
28* x0: the base address of GICD
29* x1: the base address of GICC
30*/
31ENTRY(get_gic_offset)
32 ldr x0, =GICD_BASE
33#ifdef CONFIG_GICV2
34 ldr x1, =GICC_BASE
35#endif
36#ifdef CONFIG_HAS_FEATURE_GIC64K_ALIGN
37 ldr x2, =DCFG_CCSR_SVR
38 ldr w2, [x2]
39 rev w2, w2
40 mov w3, w2
41 ands w3, w3, #SVR_WO_E << 8
42 mov w4, #SVR_LS1043A << 8
43 cmp w3, w4
44 b.ne 1f
45 ands w2, w2, #0xff
46 cmp w2, #REV1_0
47 b.eq 1f
48 ldr x2, =SCFG_GIC400_ALIGN
49 ldr w2, [x2]
50 rev w2, w2
51 tbnz w2, #GIC_ADDR_BIT, 1f
52 ldr x0, =GICD_BASE_64K
53#ifdef CONFIG_GICV2
54 ldr x1, =GICC_BASE_64K
55#endif
561:
57#endif
58 ret
59ENDPROC(get_gic_offset)
60
61ENTRY(smp_kick_all_cpus)
62 /* Kick secondary cpus up by SGI 0 interrupt */
63#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
64 mov x29, lr /* Save LR */
65 bl get_gic_offset
66 bl gic_kick_secondary_cpus
67 mov lr, x29 /* Restore LR */
68#endif
69 ret
70ENDPROC(smp_kick_all_cpus)
71
72
2f78eae5
YS
73ENTRY(lowlevel_init)
74 mov x29, lr /* Save LR */
75
399e2bb6
YS
76 switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
771:
78
c055cee1 79#if defined (CONFIG_SYS_FSL_HAS_CCN504)
2b690b98
PK
80
81 /* Set Wuo bit for RN-I 20 */
4a3ab193 82#ifdef CONFIG_ARCH_LS2080A
2b690b98
PK
83 ldr x0, =CCI_AUX_CONTROL_BASE(20)
84 ldr x1, =0x00000010
85 bl ccn504_set_aux
d037261f
PJ
86
87 /*
88 * Set forced-order mode in RNI-6, RNI-20
89 * This is required for performance optimization on LS2088A
90 * LS2080A family does not support setting forced-order mode,
91 * so skip this operation for LS2080A family
92 */
93 bl get_svr
94 lsr w0, w0, #16
95 ldr w1, =SVR_DEV_LS2080A
96 cmp w0, w1
97 b.eq 1f
98
99 ldr x0, =CCI_AUX_CONTROL_BASE(6)
100 ldr x1, =0x00000020
101 bl ccn504_set_aux
102 ldr x0, =CCI_AUX_CONTROL_BASE(20)
103 ldr x1, =0x00000020
104 bl ccn504_set_aux
1051:
2b690b98
PK
106#endif
107
07c66000 108 /* Add fully-coherent masters to DVM domain */
3ffa95c2
BS
109 ldr x0, =CCI_MN_BASE
110 ldr x1, =CCI_MN_RNF_NODEID_LIST
111 ldr x2, =CCI_MN_DVM_DOMAIN_CTL_SET
112 bl ccn504_add_masters_to_dvm
113
114 /* Set all RN-I ports to QoS of 15 */
115 ldr x0, =CCI_S0_QOS_CONTROL_BASE(0)
116 ldr x1, =0x00FF000C
117 bl ccn504_set_qos
118 ldr x0, =CCI_S1_QOS_CONTROL_BASE(0)
119 ldr x1, =0x00FF000C
120 bl ccn504_set_qos
121 ldr x0, =CCI_S2_QOS_CONTROL_BASE(0)
122 ldr x1, =0x00FF000C
123 bl ccn504_set_qos
124
125 ldr x0, =CCI_S0_QOS_CONTROL_BASE(2)
126 ldr x1, =0x00FF000C
127 bl ccn504_set_qos
128 ldr x0, =CCI_S1_QOS_CONTROL_BASE(2)
129 ldr x1, =0x00FF000C
130 bl ccn504_set_qos
131 ldr x0, =CCI_S2_QOS_CONTROL_BASE(2)
132 ldr x1, =0x00FF000C
133 bl ccn504_set_qos
134
135 ldr x0, =CCI_S0_QOS_CONTROL_BASE(6)
136 ldr x1, =0x00FF000C
137 bl ccn504_set_qos
138 ldr x0, =CCI_S1_QOS_CONTROL_BASE(6)
139 ldr x1, =0x00FF000C
140 bl ccn504_set_qos
141 ldr x0, =CCI_S2_QOS_CONTROL_BASE(6)
142 ldr x1, =0x00FF000C
143 bl ccn504_set_qos
144
145 ldr x0, =CCI_S0_QOS_CONTROL_BASE(12)
146 ldr x1, =0x00FF000C
147 bl ccn504_set_qos
148 ldr x0, =CCI_S1_QOS_CONTROL_BASE(12)
149 ldr x1, =0x00FF000C
150 bl ccn504_set_qos
151 ldr x0, =CCI_S2_QOS_CONTROL_BASE(12)
152 ldr x1, =0x00FF000C
153 bl ccn504_set_qos
154
155 ldr x0, =CCI_S0_QOS_CONTROL_BASE(16)
156 ldr x1, =0x00FF000C
157 bl ccn504_set_qos
158 ldr x0, =CCI_S1_QOS_CONTROL_BASE(16)
159 ldr x1, =0x00FF000C
160 bl ccn504_set_qos
161 ldr x0, =CCI_S2_QOS_CONTROL_BASE(16)
162 ldr x1, =0x00FF000C
163 bl ccn504_set_qos
164
165 ldr x0, =CCI_S0_QOS_CONTROL_BASE(20)
166 ldr x1, =0x00FF000C
167 bl ccn504_set_qos
168 ldr x0, =CCI_S1_QOS_CONTROL_BASE(20)
169 ldr x1, =0x00FF000C
170 bl ccn504_set_qos
171 ldr x0, =CCI_S2_QOS_CONTROL_BASE(20)
172 ldr x1, =0x00FF000C
173 bl ccn504_set_qos
c055cee1 174#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
07c66000 175
1e49a231 176#ifdef SMMU_BASE
2f78eae5
YS
177 /* Set the SMMU page size in the sACR register */
178 ldr x1, =SMMU_BASE
179 ldr w0, [x1, #0x10]
180 orr w0, w0, #1 << 16 /* set sACR.pagesize to indicate 64K page */
181 str w0, [x1, #0x10]
1e49a231 182#endif
2f78eae5
YS
183
184 /* Initialize GIC Secure Bank Status */
185#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
186 branch_if_slave x0, 1f
fa18ed76 187 bl get_gic_offset
2f78eae5
YS
188 bl gic_init_secure
1891:
190#ifdef CONFIG_GICV3
191 ldr x0, =GICR_BASE
192 bl gic_init_secure_percpu
193#elif defined(CONFIG_GICV2)
fa18ed76 194 bl get_gic_offset
2f78eae5
YS
195 bl gic_init_secure_percpu
196#endif
197#endif
198
399e2bb6 199100:
40f8dec5 200 branch_if_master x0, x1, 2f
2f78eae5 201
9f3183d2 202#if defined(CONFIG_MP) && defined(CONFIG_ARMV8_MULTIENTRY)
40f8dec5
YS
203 ldr x0, =secondary_boot_func
204 blr x0
9f3183d2 205#endif
9c66ce66 206
9f3183d2 2072:
399e2bb6
YS
208 switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
2091:
9c66ce66
BS
210#ifdef CONFIG_FSL_TZPC_BP147
211 /* Set Non Secure access for all devices protected via TZPC */
212 ldr x1, =TZPCDECPROT_0_SET_BASE /* Decode Protection-0 Set Reg */
213 orr w0, w0, #1 << 3 /* DCFG_RESET is accessible from NS world */
214 str w0, [x1]
215
216 isb
217 dsb sy
218#endif
219
220#ifdef CONFIG_FSL_TZASC_400
d5df606d
PJ
221 /*
222 * LS2080 and its personalities does not support TZASC
223 * So skip TZASC related operations
224 */
225 bl get_svr
226 lsr w0, w0, #16
227 ldr w1, =SVR_DEV_LS2080A
228 cmp w0, w1
229 b.eq 1f
230
9c66ce66
BS
231 /* Set TZASC so that:
232 * a. We use only Region0 whose global secure write/read is EN
233 * b. We use only Region0 whose NSAID write/read is EN
234 *
235 * NOTE: As per the CCSR map doc, TZASC 3 and TZASC 4 are just
236 * placeholders.
237 */
85a9a14e 238#ifdef CONFIG_FSL_TZASC_1
9c66ce66 239 ldr x1, =TZASC_GATE_KEEPER(0)
7cfbb4ab
PJ
240 ldr w0, [x1] /* Filter 0 Gate Keeper Register */
241 orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */
242 str w0, [x1]
9c66ce66 243
9c66ce66 244 ldr x1, =TZASC_REGION_ATTRIBUTES_0(0)
7cfbb4ab
PJ
245 ldr w0, [x1] /* Region-0 Attributes Register */
246 orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */
247 orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */
248 str w0, [x1]
9c66ce66 249
85a9a14e
A
250 ldr x1, =TZASC_REGION_ID_ACCESS_0(0)
251 ldr w0, [x1] /* Region-0 Access Register */
252 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
253 str w0, [x1]
254#endif
255#ifdef CONFIG_FSL_TZASC_2
256 ldr x1, =TZASC_GATE_KEEPER(1)
257 ldr w0, [x1] /* Filter 0 Gate Keeper Register */
258 orr w0, w0, #1 << 0 /* Set open_request for Filter 0 */
259 str w0, [x1]
260
9c66ce66 261 ldr x1, =TZASC_REGION_ATTRIBUTES_0(1)
7cfbb4ab
PJ
262 ldr w0, [x1] /* Region-1 Attributes Register */
263 orr w0, w0, #1 << 31 /* Set Sec global write en, Bit[31] */
264 orr w0, w0, #1 << 30 /* Set Sec global read en, Bit[30] */
265 str w0, [x1]
9c66ce66 266
9c66ce66
BS
267 ldr x1, =TZASC_REGION_ID_ACCESS_0(1)
268 ldr w0, [x1] /* Region-1 Attributes Register */
269 mov w0, #0xFFFFFFFF /* Set nsaid_wr_en and nsaid_rd_en */
270 str w0, [x1]
85a9a14e 271#endif
9c66ce66
BS
272 isb
273 dsb sy
274#endif
399e2bb6 275100:
d5df606d 2761:
da28e58a 277#ifdef CONFIG_ARCH_LS1046A
399e2bb6
YS
278 switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
2791:
13f79880
MH
280 /* Initialize the L2 RAM latency */
281 mrs x1, S3_1_c11_c0_2
282 mov x0, #0x1C7
283 /* Clear L2 Tag RAM latency and L2 Data RAM latency */
284 bic x1, x1, x0
285 /* Set L2 data ram latency bits [2:0] */
286 orr x1, x1, #0x2
287 /* set L2 tag ram latency bits [8:6] */
288 orr x1, x1, #0x80
289 msr S3_1_c11_c0_2, x1
290 isb
399e2bb6 291100:
13f79880
MH
292#endif
293
3b6bf811
HZ
294#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
295 bl fsl_ocram_init
296#endif
297
40f8dec5
YS
298 mov lr, x29 /* Restore LR */
299 ret
300ENDPROC(lowlevel_init)
301
3b6bf811
HZ
302#if defined(CONFIG_FSL_LSCH2) && !defined(CONFIG_SPL_BUILD)
303ENTRY(fsl_ocram_init)
304 mov x28, lr /* Save LR */
305 bl fsl_clear_ocram
306 bl fsl_ocram_clear_ecc_err
307 mov lr, x28 /* Restore LR */
308 ret
309ENDPROC(fsl_ocram_init)
310
311ENTRY(fsl_clear_ocram)
312/* Clear OCRAM */
313 ldr x0, =CONFIG_SYS_FSL_OCRAM_BASE
314 ldr x1, =(CONFIG_SYS_FSL_OCRAM_BASE + CONFIG_SYS_FSL_OCRAM_SIZE)
315 mov x2, #0
316clear_loop:
317 str x2, [x0]
318 add x0, x0, #8
319 cmp x0, x1
320 b.lo clear_loop
321 ret
322ENDPROC(fsl_clear_ocram)
323
324ENTRY(fsl_ocram_clear_ecc_err)
325 /* OCRAM1/2 ECC status bit */
326 mov w1, #0x60
327 ldr x0, =DCSR_DCFG_SBEESR2
328 str w1, [x0]
329 ldr x0, =DCSR_DCFG_MBEESR2
330 str w1, [x0]
331 ret
332ENDPROC(fsl_ocram_init)
333#endif
334
b7f2bbff 335#ifdef CONFIG_FSL_LSCH3
f6a70b3a
PJ
336 .globl get_svr
337get_svr:
338 ldr x1, =FSL_LSCH3_SVR
339 ldr w0, [x1]
340 ret
c055cee1 341#endif
f6a70b3a 342
c055cee1 343#ifdef CONFIG_SYS_FSL_HAS_CCN504
dcd468b8
YS
344hnf_pstate_poll:
345 /* x0 has the desired status, return 0 for success, 1 for timeout
346 * clobber x1, x2, x3, x4, x6, x7
347 */
348 mov x1, x0
349 mov x7, #0 /* flag for timeout */
350 mrs x3, cntpct_el0 /* read timer */
351 add x3, x3, #1200 /* timeout after 100 microseconds */
352 mov x0, #0x18
353 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_STATUS */
354 mov w6, #8 /* HN-F node count */
3551:
356 ldr x2, [x0]
357 cmp x2, x1 /* check status */
358 b.eq 2f
359 mrs x4, cntpct_el0
360 cmp x4, x3
361 b.ls 1b
362 mov x7, #1 /* timeout */
363 b 3f
3642:
365 add x0, x0, #0x10000 /* move to next node */
366 subs w6, w6, #1
367 cbnz w6, 1b
3683:
369 mov x0, x7
370 ret
371
372hnf_set_pstate:
373 /* x0 has the desired state, clobber x1, x2, x6 */
374 mov x1, x0
375 /* power state to SFONLY */
376 mov w6, #8 /* HN-F node count */
377 mov x0, #0x10
378 movk x0, #0x420, lsl #16 /* HNF0_PSTATE_REQ */
3791: /* set pstate to sfonly */
380 ldr x2, [x0]
381 and x2, x2, #0xfffffffffffffffc /* & HNFPSTAT_MASK */
382 orr x2, x2, x1
383 str x2, [x0]
384 add x0, x0, #0x10000 /* move to next node */
385 subs w6, w6, #1
386 cbnz w6, 1b
387
388 ret
389
1ab557a0 390ENTRY(__asm_flush_l3_dcache)
dcd468b8
YS
391 /*
392 * Return status in x0
393 * success 0
399e2bb6 394 * timeout 1 for setting SFONLY, 2 for FAM, 3 for both
dcd468b8
YS
395 */
396 mov x29, lr
397 mov x8, #0
398
399e2bb6
YS
399 switch_el x0, 1f, 100f, 100f /* skip if not in EL3 */
400
4011:
dcd468b8
YS
402 dsb sy
403 mov x0, #0x1 /* HNFPSTAT_SFONLY */
404 bl hnf_set_pstate
405
406 mov x0, #0x4 /* SFONLY status */
407 bl hnf_pstate_poll
408 cbz x0, 1f
409 mov x8, #1 /* timeout */
4101:
411 dsb sy
412 mov x0, #0x3 /* HNFPSTAT_FAM */
413 bl hnf_set_pstate
414
415 mov x0, #0xc /* FAM status */
416 bl hnf_pstate_poll
417 cbz x0, 1f
418 add x8, x8, #0x2
399e2bb6 419100:
dcd468b8
YS
4201:
421 mov x0, x8
422 mov lr, x29
423 ret
1ab557a0 424ENDPROC(__asm_flush_l3_dcache)
c055cee1 425#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
dcd468b8 426
9f3183d2 427#ifdef CONFIG_MP
40f8dec5
YS
428 /* Keep literals not used by the secondary boot code outside it */
429 .ltorg
430
431 /* Using 64 bit alignment since the spin table is accessed as data */
432 .align 4
433 .global secondary_boot_code
434 /* Secondary Boot Code starts here */
435secondary_boot_code:
436 .global __spin_table
437__spin_table:
438 .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
439
440 .align 2
441ENTRY(secondary_boot_func)
2f78eae5 442 /*
40f8dec5
YS
443 * MPIDR_EL1 Fields:
444 * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
445 * MPIDR[7:2] = AFF0_RES
446 * MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
447 * MPIDR[23:16] = AFF2_CLUSTERID
448 * MPIDR[24] = MT
449 * MPIDR[29:25] = RES0
450 * MPIDR[30] = U
451 * MPIDR[31] = ME
452 * MPIDR[39:32] = AFF3
453 *
454 * Linear Processor ID (LPID) calculation from MPIDR_EL1:
455 * (We only use AFF0_CPUID and AFF1_CLUSTERID for now
456 * until AFF2_CLUSTERID and AFF3 have non-zero values)
457 *
458 * LPID = MPIDR[15:8] | MPIDR[1:0]
2f78eae5 459 */
40f8dec5
YS
460 mrs x0, mpidr_el1
461 ubfm x1, x0, #8, #15
462 ubfm x2, x0, #0, #1
463 orr x10, x2, x1, lsl #2 /* x10 has LPID */
464 ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
2f78eae5 465 /*
40f8dec5
YS
466 * offset of the spin table element for this core from start of spin
467 * table (each elem is padded to 64 bytes)
2f78eae5 468 */
40f8dec5
YS
469 lsl x1, x10, #6
470 ldr x0, =__spin_table
471 /* physical address of this cpus spin table element */
472 add x11, x1, x0
473
207774b2
YS
474 ldr x0, =__real_cntfrq
475 ldr x0, [x0]
476 msr cntfrq_el0, x0 /* set with real frequency */
40f8dec5
YS
477 str x9, [x11, #16] /* LPID */
478 mov x4, #1
479 str x4, [x11, #8] /* STATUS */
480 dsb sy
481#if defined(CONFIG_GICV3)
482 gic_wait_for_interrupt_m x0
483#elif defined(CONFIG_GICV2)
fa18ed76
WS
484 bl get_gic_offset
485 mov x0, x1
40f8dec5
YS
486 gic_wait_for_interrupt_m x0, w1
487#endif
488
40f8dec5
YS
489slave_cpu:
490 wfe
491 ldr x0, [x11]
492 cbz x0, slave_cpu
493#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
494 mrs x1, sctlr_el2
495#else
496 mrs x1, sctlr_el1
497#endif
498 tbz x1, #25, cpu_is_le
499 rev x0, x0 /* BE to LE conversion */
500cpu_is_le:
ec6617c3 501 ldr x5, [x11, #24]
020b3ce8 502 cbz x5, 1f
ec6617c3
AW
503
504#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
7c5e1feb
AW
505 adr x4, secondary_switch_to_el1
506 ldr x5, =ES_TO_AARCH64
ec6617c3 507#else
7c5e1feb
AW
508 ldr x4, [x11]
509 ldr x5, =ES_TO_AARCH32
ec6617c3
AW
510#endif
511 bl secondary_switch_to_el2
512
5131:
514#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
7c5e1feb 515 adr x4, secondary_switch_to_el1
ec6617c3 516#else
7c5e1feb 517 ldr x4, [x11]
ec6617c3 518#endif
7c5e1feb 519 ldr x5, =ES_TO_AARCH64
ec6617c3
AW
520 bl secondary_switch_to_el2
521
40f8dec5
YS
522ENDPROC(secondary_boot_func)
523
524ENTRY(secondary_switch_to_el2)
7c5e1feb 525 switch_el x6, 1f, 0f, 0f
40f8dec5 5260: ret
7c5e1feb 5271: armv8_switch_to_el2_m x4, x5, x6
40f8dec5
YS
528ENDPROC(secondary_switch_to_el2)
529
530ENTRY(secondary_switch_to_el1)
ec6617c3
AW
531 mrs x0, mpidr_el1
532 ubfm x1, x0, #8, #15
533 ubfm x2, x0, #0, #1
534 orr x10, x2, x1, lsl #2 /* x10 has LPID */
535
536 lsl x1, x10, #6
537 ldr x0, =__spin_table
538 /* physical address of this cpus spin table element */
539 add x11, x1, x0
540
7c5e1feb 541 ldr x4, [x11]
ec6617c3
AW
542
543 ldr x5, [x11, #24]
020b3ce8 544 cbz x5, 2f
ec6617c3 545
7c5e1feb 546 ldr x5, =ES_TO_AARCH32
ec6617c3
AW
547 bl switch_to_el1
548
7c5e1feb 5492: ldr x5, =ES_TO_AARCH64
ec6617c3
AW
550
551switch_to_el1:
7c5e1feb 552 switch_el x6, 0f, 1f, 0f
40f8dec5 5530: ret
7c5e1feb 5541: armv8_switch_to_el1_m x4, x5, x6
40f8dec5
YS
555ENDPROC(secondary_switch_to_el1)
556
557 /* Ensure that the literals used by the secondary boot code are
558 * assembled within it (this is required so that we can protect
559 * this area with a single memreserve region
560 */
561 .ltorg
562
563 /* 64 bit alignment for elements accessed as data */
564 .align 4
207774b2
YS
565 .global __real_cntfrq
566__real_cntfrq:
567 .quad COUNTER_FREQUENCY
40f8dec5
YS
568 .globl __secondary_boot_code_size
569 .type __secondary_boot_code_size, %object
570 /* Secondary Boot Code ends here */
571__secondary_boot_code_size:
572 .quad .-secondary_boot_code
9f3183d2 573#endif