From ce66109cec867c9afd2ee1ecf1aff8d73cdb2f89 Mon Sep 17 00:00:00 2001 From: Oliver Upton Date: Tue, 8 Jul 2025 10:25:23 -0700 Subject: [PATCH] KVM: arm64: nv: Take "masked" aborts to EL2 when HCRX_EL2.TMEA is set HCRX_EL2.TMEA further modifies the external abort behavior where unmasked aborts are taken to EL1 and masked aborts are taken to EL2. It's rather weird when you consider that SEAs are, well, *synchronous* and therefore not actually maskable. However, for the purposes of exception routing, they're considered "masked" if the A flag is set. This gets a bit hairier when considering the fact that TMEA also enables vSErrors, i.e. KVM has delegated the HW vSError context to the guest hypervisor. We can keep the vSError context delegation as-is by taking advantage of a couple properties: - If SErrors are unmasked, the 'physical' SError can be taken in-context immediately. In other words, KVM can emulate the EL1 SError while preserving vEL2's ownership of the vSError context. - If SErrors are masked, the 'physical' SError is taken to EL2 immediately and needs the usual nested exception entry. Note that the new in-context handling has the benign effect where unmasked SError injections are emulated even for non-nested VMs. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20250708172532.1699409-19-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- arch/arm64/kvm/inject_fault.c | 34 ++++++++++++++++++++++++++++++++-- 1 file changed, 32 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index cab14a926bc6c..91efc3ed17747 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -204,7 +204,14 @@ static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr) static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu) { - return __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA); + if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA)) + return true; + + if (!vcpu_mode_priv(vcpu)) + return false; + + return (*vcpu_cpsr(vcpu) & PSR_A_BIT) && + (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA); } int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr) @@ -258,9 +265,20 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu) inject_undef64(vcpu); } +static bool serror_is_masked(struct kvm_vcpu *vcpu) +{ + return *vcpu_cpsr(vcpu) & PSR_A_BIT; +} + static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu) { - return is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu); + if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu)) + return true; + + if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA)) + return false; + + return serror_is_masked(vcpu); } static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu) @@ -281,6 +299,18 @@ int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr) return 1; } + /* + * Emulate the exception entry if SErrors are unmasked. This is useful if + * the vCPU is in a nested context w/ vSErrors enabled then we've already + * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2, + * VDISR_EL2) to the guest hypervisor. + */ + if (!serror_is_masked(vcpu)) { + pend_serror_exception(vcpu); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); + return 1; + } + vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK); *vcpu_hcr(vcpu) |= HCR_VSE; return 1; -- 2.47.2