]> git.ipfire.org Git - thirdparty/linux.git/blob - arch/arm64/kvm/hyp/hyp-entry.S
io_uring: reset -EBUSY error when io sq thread is waken up
[thirdparty/linux.git] / arch / arm64 / kvm / hyp / hyp-entry.S
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
9
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/cpufeature.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
16 #include <asm/mmu.h>
17
18 .text
19 .pushsection .hyp.text, "ax"
20
21 .macro do_el2_call
22 /*
23 * Shuffle the parameters before calling the function
24 * pointed to in x0. Assumes parameters in x[1,2,3].
25 */
26 str lr, [sp, #-16]!
27 mov lr, x0
28 mov x0, x1
29 mov x1, x2
30 mov x2, x3
31 blr lr
32 ldr lr, [sp], #16
33 .endm
34
35 el1_sync: // Guest trapped into EL2
36
37 mrs x0, esr_el2
38 lsr x0, x0, #ESR_ELx_EC_SHIFT
39 cmp x0, #ESR_ELx_EC_HVC64
40 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
41 b.ne el1_trap
42
43 mrs x1, vttbr_el2 // If vttbr is valid, the guest
44 cbnz x1, el1_hvc_guest // called HVC
45
46 /* Here, we're pretty sure the host called HVC. */
47 ldp x0, x1, [sp], #16
48
49 /* Check for a stub HVC call */
50 cmp x0, #HVC_STUB_HCALL_NR
51 b.hs 1f
52
53 /*
54 * Compute the idmap address of __kvm_handle_stub_hvc and
55 * jump there. Since we use kimage_voffset, do not use the
56 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
57 * (by loading it from the constant pool).
58 *
59 * Preserve x0-x4, which may contain stub parameters.
60 */
61 ldr x5, =__kvm_handle_stub_hvc
62 ldr_l x6, kimage_voffset
63
64 /* x5 = __pa(x5) */
65 sub x5, x5, x6
66 br x5
67
68 1:
69 /*
70 * Perform the EL2 call
71 */
72 kern_hyp_va x0
73 do_el2_call
74
75 eret
76 sb
77
78 el1_hvc_guest:
79 /*
80 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
81 * The workaround has already been applied on the host,
82 * so let's quickly get back to the guest. We don't bother
83 * restoring x1, as it can be clobbered anyway.
84 */
85 ldr x1, [sp] // Guest's x0
86 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
87 cbz w1, wa_epilogue
88
89 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
90 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
91 ARM_SMCCC_ARCH_WORKAROUND_2)
92 cbnz w1, el1_trap
93
94 #ifdef CONFIG_ARM64_SSBD
95 alternative_cb arm64_enable_wa2_handling
96 b wa2_end
97 alternative_cb_end
98 get_vcpu_ptr x2, x0
99 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
100
101 // Sanitize the argument and update the guest flags
102 ldr x1, [sp, #8] // Guest's x1
103 clz w1, w1 // Murphy's device:
104 lsr w1, w1, #5 // w1 = !!w1 without using
105 eor w1, w1, #1 // the flags...
106 bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
107 str x0, [x2, #VCPU_WORKAROUND_FLAGS]
108
109 /* Check that we actually need to perform the call */
110 hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
111 cbz x0, wa2_end
112
113 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
114 smc #0
115
116 /* Don't leak data from the SMC call */
117 mov x3, xzr
118 wa2_end:
119 mov x2, xzr
120 mov x1, xzr
121 #endif
122
123 wa_epilogue:
124 mov x0, xzr
125 add sp, sp, #16
126 eret
127 sb
128
129 el1_trap:
130 get_vcpu_ptr x1, x0
131 mov x0, #ARM_EXCEPTION_TRAP
132 b __guest_exit
133
134 el1_irq:
135 get_vcpu_ptr x1, x0
136 mov x0, #ARM_EXCEPTION_IRQ
137 b __guest_exit
138
139 el1_error:
140 get_vcpu_ptr x1, x0
141 mov x0, #ARM_EXCEPTION_EL1_SERROR
142 b __guest_exit
143
144 el2_sync:
145 /* Check for illegal exception return, otherwise panic */
146 mrs x0, spsr_el2
147
148 /* if this was something else, then panic! */
149 tst x0, #PSR_IL_BIT
150 b.eq __hyp_panic
151
152 /* Let's attempt a recovery from the illegal exception return */
153 get_vcpu_ptr x1, x0
154 mov x0, #ARM_EXCEPTION_IL
155 b __guest_exit
156
157
158 el2_error:
159 ldp x0, x1, [sp], #16
160
161 /*
162 * Only two possibilities:
163 * 1) Either we come from the exit path, having just unmasked
164 * PSTATE.A: change the return code to an EL2 fault, and
165 * carry on, as we're already in a sane state to handle it.
166 * 2) Or we come from anywhere else, and that's a bug: we panic.
167 *
168 * For (1), x0 contains the original return code and x1 doesn't
169 * contain anything meaningful at that stage. We can reuse them
170 * as temp registers.
171 * For (2), who cares?
172 */
173 mrs x0, elr_el2
174 adr x1, abort_guest_exit_start
175 cmp x0, x1
176 adr x1, abort_guest_exit_end
177 ccmp x0, x1, #4, ne
178 b.ne __hyp_panic
179 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
180 eret
181 sb
182
183 SYM_FUNC_START(__hyp_do_panic)
184 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
185 PSR_MODE_EL1h)
186 msr spsr_el2, lr
187 ldr lr, =panic
188 msr elr_el2, lr
189 eret
190 sb
191 SYM_FUNC_END(__hyp_do_panic)
192
193 SYM_CODE_START(__hyp_panic)
194 get_host_ctxt x0, x1
195 b hyp_panic
196 SYM_CODE_END(__hyp_panic)
197
198 .macro invalid_vector label, target = __hyp_panic
199 .align 2
200 SYM_CODE_START(\label)
201 \label:
202 b \target
203 SYM_CODE_END(\label)
204 .endm
205
206 /* None of these should ever happen */
207 invalid_vector el2t_sync_invalid
208 invalid_vector el2t_irq_invalid
209 invalid_vector el2t_fiq_invalid
210 invalid_vector el2t_error_invalid
211 invalid_vector el2h_sync_invalid
212 invalid_vector el2h_irq_invalid
213 invalid_vector el2h_fiq_invalid
214 invalid_vector el1_fiq_invalid
215
216 .ltorg
217
218 .align 11
219
220 .macro check_preamble_length start, end
221 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
222 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
223 .error "KVM vector preamble length mismatch"
224 .endif
225 .endm
226
227 .macro valid_vect target
228 .align 7
229 661:
230 esb
231 stp x0, x1, [sp, #-16]!
232 662:
233 b \target
234
235 check_preamble_length 661b, 662b
236 .endm
237
238 .macro invalid_vect target
239 .align 7
240 661:
241 b \target
242 nop
243 662:
244 ldp x0, x1, [sp], #16
245 b \target
246
247 check_preamble_length 661b, 662b
248 .endm
249
250 SYM_CODE_START(__kvm_hyp_vector)
251 invalid_vect el2t_sync_invalid // Synchronous EL2t
252 invalid_vect el2t_irq_invalid // IRQ EL2t
253 invalid_vect el2t_fiq_invalid // FIQ EL2t
254 invalid_vect el2t_error_invalid // Error EL2t
255
256 valid_vect el2_sync // Synchronous EL2h
257 invalid_vect el2h_irq_invalid // IRQ EL2h
258 invalid_vect el2h_fiq_invalid // FIQ EL2h
259 valid_vect el2_error // Error EL2h
260
261 valid_vect el1_sync // Synchronous 64-bit EL1
262 valid_vect el1_irq // IRQ 64-bit EL1
263 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
264 valid_vect el1_error // Error 64-bit EL1
265
266 valid_vect el1_sync // Synchronous 32-bit EL1
267 valid_vect el1_irq // IRQ 32-bit EL1
268 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
269 valid_vect el1_error // Error 32-bit EL1
270 SYM_CODE_END(__kvm_hyp_vector)
271
272 #ifdef CONFIG_KVM_INDIRECT_VECTORS
273 .macro hyp_ventry
274 .align 7
275 1: esb
276 .rept 26
277 nop
278 .endr
279 /*
280 * The default sequence is to directly branch to the KVM vectors,
281 * using the computed offset. This applies for VHE as well as
282 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
283 *
284 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
285 * with:
286 *
287 * stp x0, x1, [sp, #-16]!
288 * movz x0, #(addr & 0xffff)
289 * movk x0, #((addr >> 16) & 0xffff), lsl #16
290 * movk x0, #((addr >> 32) & 0xffff), lsl #32
291 * br x0
292 *
293 * Where:
294 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
295 * See kvm_patch_vector_branch for details.
296 */
297 alternative_cb kvm_patch_vector_branch
298 stp x0, x1, [sp, #-16]!
299 b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
300 nop
301 nop
302 nop
303 alternative_cb_end
304 .endm
305
306 .macro generate_vectors
307 0:
308 .rept 16
309 hyp_ventry
310 .endr
311 .org 0b + SZ_2K // Safety measure
312 .endm
313
314 .align 11
315 SYM_CODE_START(__bp_harden_hyp_vecs)
316 .rept BP_HARDEN_EL2_SLOTS
317 generate_vectors
318 .endr
319 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
320 .org 1b
321 SYM_CODE_END(__bp_harden_hyp_vecs)
322
323 .popsection
324
325 SYM_CODE_START(__smccc_workaround_1_smc)
326 esb
327 sub sp, sp, #(8 * 4)
328 stp x2, x3, [sp, #(8 * 0)]
329 stp x0, x1, [sp, #(8 * 2)]
330 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
331 smc #0
332 ldp x2, x3, [sp, #(8 * 0)]
333 ldp x0, x1, [sp, #(8 * 2)]
334 add sp, sp, #(8 * 4)
335 1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
336 .org 1b
337 SYM_CODE_END(__smccc_workaround_1_smc)
338 #endif