]> git.ipfire.org Git - people/ms/u-boot.git/blob - arch/arm/cpu/armv8/start.S
arm/arm64: implement a boot header capability
[people/ms/u-boot.git] / arch / arm / cpu / armv8 / start.S
1 /*
2 * (C) Copyright 2013
3 * David Feng <fenghua@phytium.com.cn>
4 *
5 * SPDX-License-Identifier: GPL-2.0+
6 */
7
8 #include <asm-offsets.h>
9 #include <config.h>
10 #include <linux/linkage.h>
11 #include <asm/macro.h>
12 #include <asm/armv8/mmu.h>
13
14 /*************************************************************************
15 *
16 * Startup Code (reset vector)
17 *
18 *************************************************************************/
19
20 .globl _start
21 _start:
22 b reset
23
24 #ifdef CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK
25 /*
26 * Various SoCs need something special and SoC-specific up front in
27 * order to boot, allow them to set that in their boot0.h file and then
28 * use it here.
29 */
30 #include <asm/arch/boot0.h>
31 ARM_SOC_BOOT0_HOOK
32 #endif
33
34 .align 3
35
36 .globl _TEXT_BASE
37 _TEXT_BASE:
38 .quad CONFIG_SYS_TEXT_BASE
39
40 /*
41 * These are defined in the linker script.
42 */
43 .globl _end_ofs
44 _end_ofs:
45 .quad _end - _start
46
47 .globl _bss_start_ofs
48 _bss_start_ofs:
49 .quad __bss_start - _start
50
51 .globl _bss_end_ofs
52 _bss_end_ofs:
53 .quad __bss_end - _start
54
55 reset:
56 #ifdef CONFIG_SYS_RESET_SCTRL
57 bl reset_sctrl
58 #endif
59 /*
60 * Could be EL3/EL2/EL1, Initial State:
61 * Little Endian, MMU Disabled, i/dCache Disabled
62 */
63 adr x0, vectors
64 switch_el x1, 3f, 2f, 1f
65 3: msr vbar_el3, x0
66 mrs x0, scr_el3
67 orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
68 msr scr_el3, x0
69 msr cptr_el3, xzr /* Enable FP/SIMD */
70 #ifdef COUNTER_FREQUENCY
71 ldr x0, =COUNTER_FREQUENCY
72 msr cntfrq_el0, x0 /* Initialize CNTFRQ */
73 #endif
74 b 0f
75 2: msr vbar_el2, x0
76 mov x0, #0x33ff
77 msr cptr_el2, x0 /* Enable FP/SIMD */
78 b 0f
79 1: msr vbar_el1, x0
80 mov x0, #3 << 20
81 msr cpacr_el1, x0 /* Enable FP/SIMD */
82 0:
83
84 /* Apply ARM core specific erratas */
85 bl apply_core_errata
86
87 /*
88 * Cache/BPB/TLB Invalidate
89 * i-cache is invalidated before enabled in icache_enable()
90 * tlb is invalidated before mmu is enabled in dcache_enable()
91 * d-cache is invalidated before enabled in dcache_enable()
92 */
93
94 /* Processor specific initialization */
95 bl lowlevel_init
96
97 #ifdef CONFIG_ARMV8_MULTIENTRY
98 branch_if_master x0, x1, master_cpu
99
100 /*
101 * Slave CPUs
102 */
103 slave_cpu:
104 wfe
105 ldr x1, =CPU_RELEASE_ADDR
106 ldr x0, [x1]
107 cbz x0, slave_cpu
108 br x0 /* branch to the given address */
109 master_cpu:
110 /* On the master CPU */
111 #endif /* CONFIG_ARMV8_MULTIENTRY */
112
113 bl _main
114
115 #ifdef CONFIG_SYS_RESET_SCTRL
116 reset_sctrl:
117 switch_el x1, 3f, 2f, 1f
118 3:
119 mrs x0, sctlr_el3
120 b 0f
121 2:
122 mrs x0, sctlr_el2
123 b 0f
124 1:
125 mrs x0, sctlr_el1
126
127 0:
128 ldr x1, =0xfdfffffa
129 and x0, x0, x1
130
131 switch_el x1, 6f, 5f, 4f
132 6:
133 msr sctlr_el3, x0
134 b 7f
135 5:
136 msr sctlr_el2, x0
137 b 7f
138 4:
139 msr sctlr_el1, x0
140
141 7:
142 dsb sy
143 isb
144 b __asm_invalidate_tlb_all
145 ret
146 #endif
147
148 /*-----------------------------------------------------------------------*/
149
150 WEAK(apply_core_errata)
151
152 mov x29, lr /* Save LR */
153 /* For now, we support Cortex-A57 specific errata only */
154
155 /* Check if we are running on a Cortex-A57 core */
156 branch_if_a57_core x0, apply_a57_core_errata
157 0:
158 mov lr, x29 /* Restore LR */
159 ret
160
161 apply_a57_core_errata:
162
163 #ifdef CONFIG_ARM_ERRATA_828024
164 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
165 /* Disable non-allocate hint of w-b-n-a memory type */
166 orr x0, x0, #1 << 49
167 /* Disable write streaming no L1-allocate threshold */
168 orr x0, x0, #3 << 25
169 /* Disable write streaming no-allocate threshold */
170 orr x0, x0, #3 << 27
171 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
172 #endif
173
174 #ifdef CONFIG_ARM_ERRATA_826974
175 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
176 /* Disable speculative load execution ahead of a DMB */
177 orr x0, x0, #1 << 59
178 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
179 #endif
180
181 #ifdef CONFIG_ARM_ERRATA_833471
182 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
183 /* FPSCR write flush.
184 * Note that in some cases where a flush is unnecessary this
185 could impact performance. */
186 orr x0, x0, #1 << 38
187 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
188 #endif
189
190 #ifdef CONFIG_ARM_ERRATA_829520
191 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
192 /* Disable Indirect Predictor bit will prevent this erratum
193 from occurring
194 * Note that in some cases where a flush is unnecessary this
195 could impact performance. */
196 orr x0, x0, #1 << 4
197 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
198 #endif
199
200 #ifdef CONFIG_ARM_ERRATA_833069
201 mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
202 /* Disable Enable Invalidates of BTB bit */
203 and x0, x0, #0xE
204 msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
205 #endif
206 b 0b
207 ENDPROC(apply_core_errata)
208
209 /*-----------------------------------------------------------------------*/
210
211 WEAK(lowlevel_init)
212 mov x29, lr /* Save LR */
213
214 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
215 branch_if_slave x0, 1f
216 ldr x0, =GICD_BASE
217 bl gic_init_secure
218 1:
219 #if defined(CONFIG_GICV3)
220 ldr x0, =GICR_BASE
221 bl gic_init_secure_percpu
222 #elif defined(CONFIG_GICV2)
223 ldr x0, =GICD_BASE
224 ldr x1, =GICC_BASE
225 bl gic_init_secure_percpu
226 #endif
227 #endif
228
229 #ifdef CONFIG_ARMV8_MULTIENTRY
230 branch_if_master x0, x1, 2f
231
232 /*
233 * Slave should wait for master clearing spin table.
234 * This sync prevent salves observing incorrect
235 * value of spin table and jumping to wrong place.
236 */
237 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
238 #ifdef CONFIG_GICV2
239 ldr x0, =GICC_BASE
240 #endif
241 bl gic_wait_for_interrupt
242 #endif
243
244 /*
245 * All slaves will enter EL2 and optionally EL1.
246 */
247 bl armv8_switch_to_el2
248 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
249 bl armv8_switch_to_el1
250 #endif
251
252 #endif /* CONFIG_ARMV8_MULTIENTRY */
253
254 2:
255 mov lr, x29 /* Restore LR */
256 ret
257 ENDPROC(lowlevel_init)
258
259 WEAK(smp_kick_all_cpus)
260 /* Kick secondary cpus up by SGI 0 interrupt */
261 mov x29, lr /* Save LR */
262 #if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
263 ldr x0, =GICD_BASE
264 bl gic_kick_secondary_cpus
265 #endif
266 mov lr, x29 /* Restore LR */
267 ret
268 ENDPROC(smp_kick_all_cpus)
269
270 /*-----------------------------------------------------------------------*/
271
272 ENTRY(c_runtime_cpu_setup)
273 /* Relocate vBAR */
274 adr x0, vectors
275 switch_el x1, 3f, 2f, 1f
276 3: msr vbar_el3, x0
277 b 0f
278 2: msr vbar_el2, x0
279 b 0f
280 1: msr vbar_el1, x0
281 0:
282
283 ret
284 ENDPROC(c_runtime_cpu_setup)