1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2015-2018 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/assembler.h>
12 #include <asm/cpufeature.h>
13 #include <asm/kvm_arm.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/kvm_mmu.h>
19 .pushsection .hyp.text, "ax"
23 * Shuffle the parameters before calling the function
24 * pointed to in x0. Assumes parameters in x[1,2,3].
35 el1_sync: // Guest trapped into EL2
38 lsr x0, x0, #ESR_ELx_EC_SHIFT
39 cmp x0, #ESR_ELx_EC_HVC64
40 ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
43 mrs x1, vttbr_el2 // If vttbr is valid, the guest
44 cbnz x1, el1_hvc_guest // called HVC
46 /* Here, we're pretty sure the host called HVC. */
49 /* Check for a stub HVC call */
50 cmp x0, #HVC_STUB_HCALL_NR
54 * Compute the idmap address of __kvm_handle_stub_hvc and
55 * jump there. Since we use kimage_voffset, do not use the
56 * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
57 * (by loading it from the constant pool).
59 * Preserve x0-x4, which may contain stub parameters.
61 ldr x5, =__kvm_handle_stub_hvc
62 ldr_l x6, kimage_voffset
70 * Perform the EL2 call
80 * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
81 * The workaround has already been applied on the host,
82 * so let's quickly get back to the guest. We don't bother
83 * restoring x1, as it can be clobbered anyway.
85 ldr x1, [sp] // Guest's x0
86 eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
89 /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
90 eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
91 ARM_SMCCC_ARCH_WORKAROUND_2)
94 #ifdef CONFIG_ARM64_SSBD
95 alternative_cb arm64_enable_wa2_handling
99 ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
101 // Sanitize the argument and update the guest flags
102 ldr x1, [sp, #8] // Guest's x1
103 clz w1, w1 // Murphy's device:
104 lsr w1, w1, #5 // w1 = !!w1 without using
105 eor w1, w1, #1 // the flags...
106 bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
107 str x0, [x2, #VCPU_WORKAROUND_FLAGS]
109 /* Check that we actually need to perform the call */
110 hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
113 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
116 /* Don't leak data from the SMC call */
131 mov x0, #ARM_EXCEPTION_TRAP
136 mov x0, #ARM_EXCEPTION_IRQ
141 mov x0, #ARM_EXCEPTION_EL1_SERROR
145 /* Check for illegal exception return, otherwise panic */
148 /* if this was something else, then panic! */
152 /* Let's attempt a recovery from the illegal exception return */
154 mov x0, #ARM_EXCEPTION_IL
159 ldp x0, x1, [sp], #16
162 * Only two possibilities:
163 * 1) Either we come from the exit path, having just unmasked
164 * PSTATE.A: change the return code to an EL2 fault, and
165 * carry on, as we're already in a sane state to handle it.
166 * 2) Or we come from anywhere else, and that's a bug: we panic.
168 * For (1), x0 contains the original return code and x1 doesn't
169 * contain anything meaningful at that stage. We can reuse them
171 * For (2), who cares?
174 adr x1, abort_guest_exit_start
176 adr x1, abort_guest_exit_end
179 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
183 SYM_FUNC_START(__hyp_do_panic)
184 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
191 SYM_FUNC_END(__hyp_do_panic)
193 SYM_CODE_START(__hyp_panic)
196 SYM_CODE_END(__hyp_panic)
198 .macro invalid_vector label, target = __hyp_panic
200 SYM_CODE_START(\label)
206 /* None of these should ever happen */
207 invalid_vector el2t_sync_invalid
208 invalid_vector el2t_irq_invalid
209 invalid_vector el2t_fiq_invalid
210 invalid_vector el2t_error_invalid
211 invalid_vector el2h_sync_invalid
212 invalid_vector el2h_irq_invalid
213 invalid_vector el2h_fiq_invalid
214 invalid_vector el1_fiq_invalid
220 .macro check_preamble_length start, end
221 /* kvm_patch_vector_branch() generates code that jumps over the preamble. */
222 .if ((\end-\start) != KVM_VECTOR_PREAMBLE)
223 .error "KVM vector preamble length mismatch"
227 .macro valid_vect target
231 stp x0, x1, [sp, #-16]!
235 check_preamble_length 661b, 662b
238 .macro invalid_vect target
244 ldp x0, x1, [sp], #16
247 check_preamble_length 661b, 662b
250 SYM_CODE_START(__kvm_hyp_vector)
251 invalid_vect el2t_sync_invalid // Synchronous EL2t
252 invalid_vect el2t_irq_invalid // IRQ EL2t
253 invalid_vect el2t_fiq_invalid // FIQ EL2t
254 invalid_vect el2t_error_invalid // Error EL2t
256 valid_vect el2_sync // Synchronous EL2h
257 invalid_vect el2h_irq_invalid // IRQ EL2h
258 invalid_vect el2h_fiq_invalid // FIQ EL2h
259 valid_vect el2_error // Error EL2h
261 valid_vect el1_sync // Synchronous 64-bit EL1
262 valid_vect el1_irq // IRQ 64-bit EL1
263 invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
264 valid_vect el1_error // Error 64-bit EL1
266 valid_vect el1_sync // Synchronous 32-bit EL1
267 valid_vect el1_irq // IRQ 32-bit EL1
268 invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
269 valid_vect el1_error // Error 32-bit EL1
270 SYM_CODE_END(__kvm_hyp_vector)
272 #ifdef CONFIG_KVM_INDIRECT_VECTORS
280 * The default sequence is to directly branch to the KVM vectors,
281 * using the computed offset. This applies for VHE as well as
282 * !ARM64_HARDEN_EL2_VECTORS. The first vector must always run the preamble.
284 * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
287 * stp x0, x1, [sp, #-16]!
288 * movz x0, #(addr & 0xffff)
289 * movk x0, #((addr >> 16) & 0xffff), lsl #16
290 * movk x0, #((addr >> 32) & 0xffff), lsl #32
294 * addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + KVM_VECTOR_PREAMBLE.
295 * See kvm_patch_vector_branch for details.
297 alternative_cb kvm_patch_vector_branch
298 stp x0, x1, [sp, #-16]!
299 b __kvm_hyp_vector + (1b - 0b + KVM_VECTOR_PREAMBLE)
306 .macro generate_vectors
311 .org 0b + SZ_2K // Safety measure
315 SYM_CODE_START(__bp_harden_hyp_vecs)
316 .rept BP_HARDEN_EL2_SLOTS
319 1: .org __bp_harden_hyp_vecs + __BP_HARDEN_HYP_VECS_SZ
321 SYM_CODE_END(__bp_harden_hyp_vecs)
325 SYM_CODE_START(__smccc_workaround_1_smc)
328 stp x2, x3, [sp, #(8 * 0)]
329 stp x0, x1, [sp, #(8 * 2)]
330 mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
332 ldp x2, x3, [sp, #(8 * 0)]
333 ldp x0, x1, [sp, #(8 * 2)]
335 1: .org __smccc_workaround_1_smc + __SMCCC_WORKAROUND_1_SMC_SZ
337 SYM_CODE_END(__smccc_workaround_1_smc)