1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/kvm_host.h>
9 #include <linux/types.h>
10 #include <linux/jump_label.h>
11 #include <uapi/linux/psci.h>
13 #include <kvm/arm_psci.h>
15 #include <asm/arch_gicv3.h>
16 #include <asm/cpufeature.h>
17 #include <asm/kprobes.h>
18 #include <asm/kvm_asm.h>
19 #include <asm/kvm_emulate.h>
20 #include <asm/kvm_host.h>
21 #include <asm/kvm_hyp.h>
22 #include <asm/kvm_mmu.h>
23 #include <asm/fpsimd.h>
24 #include <asm/debug-monitors.h>
25 #include <asm/processor.h>
26 #include <asm/thread_info.h>
28 /* Check whether the FP regs were dirtied while in the host-side run loop: */
29 static bool __hyp_text
update_fp_enabled(struct kvm_vcpu
*vcpu
)
31 if (vcpu
->arch
.host_thread_info
->flags
& _TIF_FOREIGN_FPSTATE
)
32 vcpu
->arch
.flags
&= ~(KVM_ARM64_FP_ENABLED
|
35 return !!(vcpu
->arch
.flags
& KVM_ARM64_FP_ENABLED
);
38 /* Save the 32-bit only FPSIMD system register state */
39 static void __hyp_text
__fpsimd_save_fpexc32(struct kvm_vcpu
*vcpu
)
41 if (!vcpu_el1_is_32bit(vcpu
))
44 vcpu
->arch
.ctxt
.sys_regs
[FPEXC32_EL2
] = read_sysreg(fpexc32_el2
);
47 static void __hyp_text
__activate_traps_fpsimd32(struct kvm_vcpu
*vcpu
)
50 * We are about to set CPTR_EL2.TFP to trap all floating point
51 * register accesses to EL2, however, the ARM ARM clearly states that
52 * traps are only taken to EL2 if the operation would not otherwise
53 * trap to EL1. Therefore, always make sure that for 32-bit guests,
54 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
55 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
56 * it will cause an exception.
58 if (vcpu_el1_is_32bit(vcpu
) && system_supports_fpsimd()) {
59 write_sysreg(1 << 30, fpexc32_el2
);
64 static void __hyp_text
__activate_traps_common(struct kvm_vcpu
*vcpu
)
66 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
67 write_sysreg(1 << 15, hstr_el2
);
70 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
71 * PMSELR_EL0 to make sure it never contains the cycle
72 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
73 * EL1 instead of being trapped to EL2.
75 write_sysreg(0, pmselr_el0
);
76 write_sysreg(ARMV8_PMU_USERENR_MASK
, pmuserenr_el0
);
77 write_sysreg(vcpu
->arch
.mdcr_el2
, mdcr_el2
);
80 static void __hyp_text
__deactivate_traps_common(void)
82 write_sysreg(0, hstr_el2
);
83 write_sysreg(0, pmuserenr_el0
);
86 static void activate_traps_vhe(struct kvm_vcpu
*vcpu
)
90 val
= read_sysreg(cpacr_el1
);
92 val
&= ~CPACR_EL1_ZEN
;
93 if (update_fp_enabled(vcpu
)) {
94 if (vcpu_has_sve(vcpu
))
97 val
&= ~CPACR_EL1_FPEN
;
98 __activate_traps_fpsimd32(vcpu
);
101 write_sysreg(val
, cpacr_el1
);
103 write_sysreg(kvm_get_hyp_vector(), vbar_el1
);
105 NOKPROBE_SYMBOL(activate_traps_vhe
);
107 static void __hyp_text
__activate_traps_nvhe(struct kvm_vcpu
*vcpu
)
111 __activate_traps_common(vcpu
);
113 val
= CPTR_EL2_DEFAULT
;
114 val
|= CPTR_EL2_TTA
| CPTR_EL2_TZ
;
115 if (!update_fp_enabled(vcpu
)) {
117 __activate_traps_fpsimd32(vcpu
);
120 write_sysreg(val
, cptr_el2
);
123 static void __hyp_text
__activate_traps(struct kvm_vcpu
*vcpu
)
125 u64 hcr
= vcpu
->arch
.hcr_el2
;
127 write_sysreg(hcr
, hcr_el2
);
129 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN
) && (hcr
& HCR_VSE
))
130 write_sysreg_s(vcpu
->arch
.vsesr_el2
, SYS_VSESR_EL2
);
133 activate_traps_vhe(vcpu
);
135 __activate_traps_nvhe(vcpu
);
138 static void deactivate_traps_vhe(void)
140 extern char vectors
[]; /* kernel exception vectors */
141 write_sysreg(HCR_HOST_VHE_FLAGS
, hcr_el2
);
144 * ARM erratum 1165522 requires the actual execution of the above
145 * before we can switch to the EL2/EL0 translation regime used by
148 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522
));
150 write_sysreg(CPACR_EL1_DEFAULT
, cpacr_el1
);
151 write_sysreg(vectors
, vbar_el1
);
153 NOKPROBE_SYMBOL(deactivate_traps_vhe
);
155 static void __hyp_text
__deactivate_traps_nvhe(void)
157 u64 mdcr_el2
= read_sysreg(mdcr_el2
);
159 __deactivate_traps_common();
161 mdcr_el2
&= MDCR_EL2_HPMN_MASK
;
162 mdcr_el2
|= MDCR_EL2_E2PB_MASK
<< MDCR_EL2_E2PB_SHIFT
;
164 write_sysreg(mdcr_el2
, mdcr_el2
);
165 write_sysreg(HCR_HOST_NVHE_FLAGS
, hcr_el2
);
166 write_sysreg(CPTR_EL2_DEFAULT
, cptr_el2
);
169 static void __hyp_text
__deactivate_traps(struct kvm_vcpu
*vcpu
)
172 * If we pended a virtual abort, preserve it until it gets
173 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
174 * the crucial bit is "On taking a vSError interrupt,
175 * HCR_EL2.VSE is cleared to 0."
177 if (vcpu
->arch
.hcr_el2
& HCR_VSE
)
178 vcpu
->arch
.hcr_el2
= read_sysreg(hcr_el2
);
181 deactivate_traps_vhe();
183 __deactivate_traps_nvhe();
186 void activate_traps_vhe_load(struct kvm_vcpu
*vcpu
)
188 __activate_traps_common(vcpu
);
191 void deactivate_traps_vhe_put(void)
193 u64 mdcr_el2
= read_sysreg(mdcr_el2
);
195 mdcr_el2
&= MDCR_EL2_HPMN_MASK
|
196 MDCR_EL2_E2PB_MASK
<< MDCR_EL2_E2PB_SHIFT
|
199 write_sysreg(mdcr_el2
, mdcr_el2
);
201 __deactivate_traps_common();
204 static void __hyp_text
__activate_vm(struct kvm
*kvm
)
206 __load_guest_stage2(kvm
);
209 static void __hyp_text
__deactivate_vm(struct kvm_vcpu
*vcpu
)
211 write_sysreg(0, vttbr_el2
);
214 /* Save VGICv3 state on non-VHE systems */
215 static void __hyp_text
__hyp_vgic_save_state(struct kvm_vcpu
*vcpu
)
217 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
)) {
218 __vgic_v3_save_state(vcpu
);
219 __vgic_v3_deactivate_traps(vcpu
);
223 /* Restore VGICv3 state on non_VEH systems */
224 static void __hyp_text
__hyp_vgic_restore_state(struct kvm_vcpu
*vcpu
)
226 if (static_branch_unlikely(&kvm_vgic_global_state
.gicv3_cpuif
)) {
227 __vgic_v3_activate_traps(vcpu
);
228 __vgic_v3_restore_state(vcpu
);
232 static bool __hyp_text
__true_value(void)
237 static bool __hyp_text
__false_value(void)
242 static hyp_alternate_select(__check_arm_834220
,
243 __false_value
, __true_value
,
244 ARM64_WORKAROUND_834220
);
246 static bool __hyp_text
__translate_far_to_hpfar(u64 far
, u64
*hpfar
)
251 * Resolve the IPA the hard way using the guest VA.
253 * Stage-1 translation already validated the memory access
254 * rights. As such, we can use the EL1 translation regime, and
255 * don't have to distinguish between EL0 and EL1 access.
257 * We do need to save/restore PAR_EL1 though, as we haven't
258 * saved the guest context yet, and we may return early...
260 par
= read_sysreg(par_el1
);
261 asm volatile("at s1e1r, %0" : : "r" (far
));
264 tmp
= read_sysreg(par_el1
);
265 write_sysreg(par
, par_el1
);
267 if (unlikely(tmp
& 1))
268 return false; /* Translation failed, back to guest */
270 /* Convert PAR to HPFAR format */
271 *hpfar
= PAR_TO_HPFAR(tmp
);
275 static bool __hyp_text
__populate_fault_info(struct kvm_vcpu
*vcpu
)
281 esr
= vcpu
->arch
.fault
.esr_el2
;
282 ec
= ESR_ELx_EC(esr
);
284 if (ec
!= ESR_ELx_EC_DABT_LOW
&& ec
!= ESR_ELx_EC_IABT_LOW
)
287 far
= read_sysreg_el2(far
);
290 * The HPFAR can be invalid if the stage 2 fault did not
291 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
292 * bit is clear) and one of the two following cases are true:
293 * 1. The fault was due to a permission fault
294 * 2. The processor carries errata 834220
296 * Therefore, for all non S1PTW faults where we either have a
297 * permission fault or the errata workaround is enabled, we
298 * resolve the IPA using the AT instruction.
300 if (!(esr
& ESR_ELx_S1PTW
) &&
301 (__check_arm_834220()() || (esr
& ESR_ELx_FSC_TYPE
) == FSC_PERM
)) {
302 if (!__translate_far_to_hpfar(far
, &hpfar
))
305 hpfar
= read_sysreg(hpfar_el2
);
308 vcpu
->arch
.fault
.far_el2
= far
;
309 vcpu
->arch
.fault
.hpfar_el2
= hpfar
;
313 /* Check for an FPSIMD/SVE trap and handle as appropriate */
314 static bool __hyp_text
__hyp_handle_fpsimd(struct kvm_vcpu
*vcpu
)
316 bool vhe
, sve_guest
, sve_host
;
319 if (!system_supports_fpsimd())
322 if (system_supports_sve()) {
323 sve_guest
= vcpu_has_sve(vcpu
);
324 sve_host
= vcpu
->arch
.flags
& KVM_ARM64_HOST_SVE_IN_USE
;
332 hsr_ec
= kvm_vcpu_trap_get_class(vcpu
);
333 if (hsr_ec
!= ESR_ELx_EC_FP_ASIMD
&&
334 hsr_ec
!= ESR_ELx_EC_SVE
)
337 /* Don't handle SVE traps for non-SVE vcpus here: */
339 if (hsr_ec
!= ESR_ELx_EC_FP_ASIMD
)
342 /* Valid trap. Switch the context: */
345 u64 reg
= read_sysreg(cpacr_el1
) | CPACR_EL1_FPEN
;
348 reg
|= CPACR_EL1_ZEN
;
350 write_sysreg(reg
, cpacr_el1
);
352 write_sysreg(read_sysreg(cptr_el2
) & ~(u64
)CPTR_EL2_TFP
,
358 if (vcpu
->arch
.flags
& KVM_ARM64_FP_HOST
) {
360 * In the SVE case, VHE is assumed: it is enforced by
361 * Kconfig and kvm_arch_init().
364 struct thread_struct
*thread
= container_of(
365 vcpu
->arch
.host_fpsimd_state
,
366 struct thread_struct
, uw
.fpsimd_state
);
368 sve_save_state(sve_pffr(thread
),
369 &vcpu
->arch
.host_fpsimd_state
->fpsr
);
371 __fpsimd_save_state(vcpu
->arch
.host_fpsimd_state
);
374 vcpu
->arch
.flags
&= ~KVM_ARM64_FP_HOST
;
378 sve_load_state(vcpu_sve_pffr(vcpu
),
379 &vcpu
->arch
.ctxt
.gp_regs
.fp_regs
.fpsr
,
380 sve_vq_from_vl(vcpu
->arch
.sve_max_vl
) - 1);
381 write_sysreg_s(vcpu
->arch
.ctxt
.sys_regs
[ZCR_EL1
], SYS_ZCR_EL12
);
383 __fpsimd_restore_state(&vcpu
->arch
.ctxt
.gp_regs
.fp_regs
);
386 /* Skip restoring fpexc32 for AArch64 guests */
387 if (!(read_sysreg(hcr_el2
) & HCR_RW
))
388 write_sysreg(vcpu
->arch
.ctxt
.sys_regs
[FPEXC32_EL2
],
391 vcpu
->arch
.flags
|= KVM_ARM64_FP_ENABLED
;
397 * Return true when we were able to fixup the guest exit and should return to
398 * the guest, false when we should restore the host state and return to the
401 static bool __hyp_text
fixup_guest_exit(struct kvm_vcpu
*vcpu
, u64
*exit_code
)
403 if (ARM_EXCEPTION_CODE(*exit_code
) != ARM_EXCEPTION_IRQ
)
404 vcpu
->arch
.fault
.esr_el2
= read_sysreg_el2(esr
);
407 * We're using the raw exception code in order to only process
408 * the trap if no SError is pending. We will come back to the
409 * same PC once the SError has been injected, and replay the
410 * trapping instruction.
412 if (*exit_code
!= ARM_EXCEPTION_TRAP
)
416 * We trap the first access to the FP/SIMD to save the host context
417 * and restore the guest context lazily.
418 * If FP/SIMD is not implemented, handle the trap and inject an
419 * undefined instruction exception to the guest.
420 * Similarly for trapped SVE accesses.
422 if (__hyp_handle_fpsimd(vcpu
))
425 if (!__populate_fault_info(vcpu
))
428 if (static_branch_unlikely(&vgic_v2_cpuif_trap
)) {
431 valid
= kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_DABT_LOW
&&
432 kvm_vcpu_trap_get_fault_type(vcpu
) == FSC_FAULT
&&
433 kvm_vcpu_dabt_isvalid(vcpu
) &&
434 !kvm_vcpu_dabt_isextabt(vcpu
) &&
435 !kvm_vcpu_dabt_iss1tw(vcpu
);
438 int ret
= __vgic_v2_perform_cpuif_access(vcpu
);
443 /* Promote an illegal access to an SError.*/
445 *exit_code
= ARM_EXCEPTION_EL1_SERROR
;
451 if (static_branch_unlikely(&vgic_v3_cpuif_trap
) &&
452 (kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_SYS64
||
453 kvm_vcpu_trap_get_class(vcpu
) == ESR_ELx_EC_CP15_32
)) {
454 int ret
= __vgic_v3_perform_cpuif_access(vcpu
);
461 /* Return to the host kernel and handle the exit */
465 static inline bool __hyp_text
__needs_ssbd_off(struct kvm_vcpu
*vcpu
)
467 if (!cpus_have_const_cap(ARM64_SSBD
))
470 return !(vcpu
->arch
.workaround_flags
& VCPU_WORKAROUND_2_FLAG
);
473 static void __hyp_text
__set_guest_arch_workaround_state(struct kvm_vcpu
*vcpu
)
475 #ifdef CONFIG_ARM64_SSBD
477 * The host runs with the workaround always present. If the
478 * guest wants it disabled, so be it...
480 if (__needs_ssbd_off(vcpu
) &&
481 __hyp_this_cpu_read(arm64_ssbd_callback_required
))
482 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2
, 0, NULL
);
486 static void __hyp_text
__set_host_arch_workaround_state(struct kvm_vcpu
*vcpu
)
488 #ifdef CONFIG_ARM64_SSBD
490 * If the guest has disabled the workaround, bring it back on.
492 if (__needs_ssbd_off(vcpu
) &&
493 __hyp_this_cpu_read(arm64_ssbd_callback_required
))
494 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2
, 1, NULL
);
499 * Disable host events, enable guest events
501 static bool __hyp_text
__pmu_switch_to_guest(struct kvm_cpu_context
*host_ctxt
)
503 struct kvm_host_data
*host
;
504 struct kvm_pmu_events
*pmu
;
506 host
= container_of(host_ctxt
, struct kvm_host_data
, host_ctxt
);
507 pmu
= &host
->pmu_events
;
509 if (pmu
->events_host
)
510 write_sysreg(pmu
->events_host
, pmcntenclr_el0
);
512 if (pmu
->events_guest
)
513 write_sysreg(pmu
->events_guest
, pmcntenset_el0
);
515 return (pmu
->events_host
|| pmu
->events_guest
);
519 * Disable guest events, enable host events
521 static void __hyp_text
__pmu_switch_to_host(struct kvm_cpu_context
*host_ctxt
)
523 struct kvm_host_data
*host
;
524 struct kvm_pmu_events
*pmu
;
526 host
= container_of(host_ctxt
, struct kvm_host_data
, host_ctxt
);
527 pmu
= &host
->pmu_events
;
529 if (pmu
->events_guest
)
530 write_sysreg(pmu
->events_guest
, pmcntenclr_el0
);
532 if (pmu
->events_host
)
533 write_sysreg(pmu
->events_host
, pmcntenset_el0
);
536 /* Switch to the guest for VHE systems running in EL2 */
537 int kvm_vcpu_run_vhe(struct kvm_vcpu
*vcpu
)
539 struct kvm_cpu_context
*host_ctxt
;
540 struct kvm_cpu_context
*guest_ctxt
;
543 host_ctxt
= vcpu
->arch
.host_cpu_context
;
544 host_ctxt
->__hyp_running_vcpu
= vcpu
;
545 guest_ctxt
= &vcpu
->arch
.ctxt
;
547 sysreg_save_host_state_vhe(host_ctxt
);
550 * ARM erratum 1165522 requires us to configure both stage 1 and
551 * stage 2 translation for the guest context before we clear
554 * We have already configured the guest's stage 1 translation in
555 * kvm_vcpu_load_sysregs above. We must now call __activate_vm
556 * before __activate_traps, because __activate_vm configures
557 * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
558 * (among other things).
560 __activate_vm(vcpu
->kvm
);
561 __activate_traps(vcpu
);
563 sysreg_restore_guest_state_vhe(guest_ctxt
);
564 __debug_switch_to_guest(vcpu
);
566 __set_guest_arch_workaround_state(vcpu
);
569 /* Jump in the fire! */
570 exit_code
= __guest_enter(vcpu
, host_ctxt
);
572 /* And we're baaack! */
573 } while (fixup_guest_exit(vcpu
, &exit_code
));
575 __set_host_arch_workaround_state(vcpu
);
577 sysreg_save_guest_state_vhe(guest_ctxt
);
579 __deactivate_traps(vcpu
);
581 sysreg_restore_host_state_vhe(host_ctxt
);
583 if (vcpu
->arch
.flags
& KVM_ARM64_FP_ENABLED
)
584 __fpsimd_save_fpexc32(vcpu
);
586 __debug_switch_to_host(vcpu
);
590 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe
);
592 /* Switch to the guest for legacy non-VHE systems */
593 int __hyp_text
__kvm_vcpu_run_nvhe(struct kvm_vcpu
*vcpu
)
595 struct kvm_cpu_context
*host_ctxt
;
596 struct kvm_cpu_context
*guest_ctxt
;
597 bool pmu_switch_needed
;
601 * Having IRQs masked via PMR when entering the guest means the GIC
602 * will not signal the CPU of interrupts of lower priority, and the
603 * only way to get out will be via guest exceptions.
604 * Naturally, we want to avoid this.
606 if (system_uses_irq_prio_masking()) {
607 gic_write_pmr(GIC_PRIO_IRQON
);
611 vcpu
= kern_hyp_va(vcpu
);
613 host_ctxt
= kern_hyp_va(vcpu
->arch
.host_cpu_context
);
614 host_ctxt
->__hyp_running_vcpu
= vcpu
;
615 guest_ctxt
= &vcpu
->arch
.ctxt
;
617 pmu_switch_needed
= __pmu_switch_to_guest(host_ctxt
);
619 __sysreg_save_state_nvhe(host_ctxt
);
621 __activate_vm(kern_hyp_va(vcpu
->kvm
));
622 __activate_traps(vcpu
);
624 __hyp_vgic_restore_state(vcpu
);
625 __timer_enable_traps(vcpu
);
628 * We must restore the 32-bit state before the sysregs, thanks
629 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
631 __sysreg32_restore_state(vcpu
);
632 __sysreg_restore_state_nvhe(guest_ctxt
);
633 __debug_switch_to_guest(vcpu
);
635 __set_guest_arch_workaround_state(vcpu
);
638 /* Jump in the fire! */
639 exit_code
= __guest_enter(vcpu
, host_ctxt
);
641 /* And we're baaack! */
642 } while (fixup_guest_exit(vcpu
, &exit_code
));
644 __set_host_arch_workaround_state(vcpu
);
646 __sysreg_save_state_nvhe(guest_ctxt
);
647 __sysreg32_save_state(vcpu
);
648 __timer_disable_traps(vcpu
);
649 __hyp_vgic_save_state(vcpu
);
651 __deactivate_traps(vcpu
);
652 __deactivate_vm(vcpu
);
654 __sysreg_restore_state_nvhe(host_ctxt
);
656 if (vcpu
->arch
.flags
& KVM_ARM64_FP_ENABLED
)
657 __fpsimd_save_fpexc32(vcpu
);
660 * This must come after restoring the host sysregs, since a non-VHE
661 * system may enable SPE here and make use of the TTBRs.
663 __debug_switch_to_host(vcpu
);
665 if (pmu_switch_needed
)
666 __pmu_switch_to_host(host_ctxt
);
668 /* Returning to host will clear PSR.I, remask PMR if needed */
669 if (system_uses_irq_prio_masking())
670 gic_write_pmr(GIC_PRIO_IRQOFF
);
675 static const char __hyp_panic_string
[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
677 static void __hyp_text
__hyp_call_panic_nvhe(u64 spsr
, u64 elr
, u64 par
,
678 struct kvm_cpu_context
*__host_ctxt
)
680 struct kvm_vcpu
*vcpu
;
681 unsigned long str_va
;
683 vcpu
= __host_ctxt
->__hyp_running_vcpu
;
685 if (read_sysreg(vttbr_el2
)) {
686 __timer_disable_traps(vcpu
);
687 __deactivate_traps(vcpu
);
688 __deactivate_vm(vcpu
);
689 __sysreg_restore_state_nvhe(__host_ctxt
);
693 * Force the panic string to be loaded from the literal pool,
694 * making sure it is a kernel address and not a PC-relative
697 asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va
));
699 __hyp_do_panic(str_va
,
701 read_sysreg(esr_el2
), read_sysreg_el2(far
),
702 read_sysreg(hpfar_el2
), par
, vcpu
);
705 static void __hyp_call_panic_vhe(u64 spsr
, u64 elr
, u64 par
,
706 struct kvm_cpu_context
*host_ctxt
)
708 struct kvm_vcpu
*vcpu
;
709 vcpu
= host_ctxt
->__hyp_running_vcpu
;
711 __deactivate_traps(vcpu
);
712 sysreg_restore_host_state_vhe(host_ctxt
);
714 panic(__hyp_panic_string
,
716 read_sysreg_el2(esr
), read_sysreg_el2(far
),
717 read_sysreg(hpfar_el2
), par
, vcpu
);
719 NOKPROBE_SYMBOL(__hyp_call_panic_vhe
);
721 void __hyp_text __noreturn
hyp_panic(struct kvm_cpu_context
*host_ctxt
)
723 u64 spsr
= read_sysreg_el2(spsr
);
724 u64 elr
= read_sysreg_el2(elr
);
725 u64 par
= read_sysreg(par_el1
);
728 __hyp_call_panic_nvhe(spsr
, elr
, par
, host_ctxt
);
730 __hyp_call_panic_vhe(spsr
, elr
, par
, host_ctxt
);