From: Oliver Upton Date: Tue, 8 Jul 2025 17:25:21 +0000 (-0700) Subject: KVM: arm64: nv: Ensure Address size faults affect correct ESR X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=178ec0ae35f8a2181a60a2e7c31d8671cbb56f3a;p=thirdparty%2Flinux.git KVM: arm64: nv: Ensure Address size faults affect correct ESR For historical reasons, Address size faults are first injected into the guest as an SEA and ESR_EL1 is subsequently modified to reflect the correct FSC. Of course, when dealing with a vEL2 this should poke ESR_EL2. Reviewed-by: Marc Zyngier Link: https://lore.kernel.org/r/20250708172532.1699409-17-oliver.upton@linux.dev Signed-off-by: Oliver Upton --- diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index 4df42a41d0abd..88bc85ecdbb04 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -41,6 +41,22 @@ static unsigned int exception_target_el(struct kvm_vcpu *vcpu) } } +static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu) +{ + if (exception_target_el(vcpu) == PSR_MODE_EL2h) + return ESR_EL2; + + return ESR_EL1; +} + +static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu) +{ + if (exception_target_el(vcpu) == PSR_MODE_EL2h) + return FAR_EL2; + + return FAR_EL1; +} + static void pend_sync_exception(struct kvm_vcpu *vcpu) { if (exception_target_el(vcpu) == PSR_MODE_EL1h) @@ -49,11 +65,6 @@ static void pend_sync_exception(struct kvm_vcpu *vcpu) kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC); } -static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target) -{ - return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target); -} - static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr) { unsigned long cpsr = *vcpu_cpsr(vcpu); @@ -83,13 +94,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr esr |= ESR_ELx_FSC_EXTABT; - if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) { - vcpu_write_sys_reg(vcpu, addr, FAR_EL1); - vcpu_write_sys_reg(vcpu, esr, ESR_EL1); - } else { - vcpu_write_sys_reg(vcpu, addr, FAR_EL2); - vcpu_write_sys_reg(vcpu, esr, ESR_EL2); - } + vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu)); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); } static void inject_undef64(struct kvm_vcpu *vcpu) @@ -105,10 +111,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) if (kvm_vcpu_trap_il_is32bit(vcpu)) esr |= ESR_ELx_IL; - if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) - vcpu_write_sys_reg(vcpu, esr, ESR_EL1); - else - vcpu_write_sys_reg(vcpu, esr, ESR_EL2); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); } #define DFSR_FSC_EXTABT_LPAE 0x10 @@ -199,9 +202,9 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu) !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE)) return; - esr = vcpu_read_sys_reg(vcpu, ESR_EL1); + esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu)); esr &= ~GENMASK_ULL(5, 0); - vcpu_write_sys_reg(vcpu, esr, ESR_EL1); + vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu)); } /**