]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: nv: Ensure Address size faults affect correct ESR
authorOliver Upton <oliver.upton@linux.dev>
Tue, 8 Jul 2025 17:25:21 +0000 (10:25 -0700)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 8 Jul 2025 18:36:35 +0000 (11:36 -0700)
For historical reasons, Address size faults are first injected into the
guest as an SEA and ESR_EL1 is subsequently modified to reflect the
correct FSC. Of course, when dealing with a vEL2 this should poke
ESR_EL2.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250708172532.1699409-17-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/inject_fault.c

index 4df42a41d0abd696bcf8f3ce600ed375d7c46e49..88bc85ecdbb04b21f6712750e574235a64e31972 100644 (file)
@@ -41,6 +41,22 @@ static unsigned int exception_target_el(struct kvm_vcpu *vcpu)
        }
 }
 
+static enum vcpu_sysreg exception_esr_elx(struct kvm_vcpu *vcpu)
+{
+       if (exception_target_el(vcpu) == PSR_MODE_EL2h)
+               return ESR_EL2;
+
+       return ESR_EL1;
+}
+
+static enum vcpu_sysreg exception_far_elx(struct kvm_vcpu *vcpu)
+{
+       if (exception_target_el(vcpu) == PSR_MODE_EL2h)
+               return FAR_EL2;
+
+       return FAR_EL1;
+}
+
 static void pend_sync_exception(struct kvm_vcpu *vcpu)
 {
        if (exception_target_el(vcpu) == PSR_MODE_EL1h)
@@ -49,11 +65,6 @@ static void pend_sync_exception(struct kvm_vcpu *vcpu)
                kvm_pend_exception(vcpu, EXCEPT_AA64_EL2_SYNC);
 }
 
-static bool match_target_el(struct kvm_vcpu *vcpu, unsigned long target)
-{
-       return (vcpu_get_flag(vcpu, EXCEPT_MASK) == target);
-}
-
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
        unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -83,13 +94,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
 
        esr |= ESR_ELx_FSC_EXTABT;
 
-       if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC))) {
-               vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
-               vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
-       } else {
-               vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
-               vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
-       }
+       vcpu_write_sys_reg(vcpu, addr, exception_far_elx(vcpu));
+       vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
 }
 
 static void inject_undef64(struct kvm_vcpu *vcpu)
@@ -105,10 +111,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
        if (kvm_vcpu_trap_il_is32bit(vcpu))
                esr |= ESR_ELx_IL;
 
-       if (match_target_el(vcpu, unpack_vcpu_flag(EXCEPT_AA64_EL1_SYNC)))
-               vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
-       else
-               vcpu_write_sys_reg(vcpu, esr, ESR_EL2);
+       vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
 }
 
 #define DFSR_FSC_EXTABT_LPAE   0x10
@@ -199,9 +202,9 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
            !(vcpu_read_sys_reg(vcpu, TCR_EL1) & TTBCR_EAE))
                return;
 
-       esr = vcpu_read_sys_reg(vcpu, ESR_EL1);
+       esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
        esr &= ~GENMASK_ULL(5, 0);
-       vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
+       vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
 }
 
 /**