]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: arm64: nv: Respect exception routing rules for SEAs
authorOliver Upton <oliver.upton@linux.dev>
Tue, 8 Jul 2025 17:25:10 +0000 (10:25 -0700)
committerOliver Upton <oliver.upton@linux.dev>
Tue, 8 Jul 2025 18:35:54 +0000 (11:35 -0700)
Synchronous external aborts are taken to EL2 if ELIsInHost() or
HCR_EL2.TEA=1. Rework the SEA injection plumbing to respect the imposed
routing of the guest hypervisor and opportunistically rephrase things to
make their function a bit more obvious.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20250708172532.1699409-6-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/guest.c
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/mmio.c
arch/arm64/kvm/mmu.c

index 8ba991b4bcfdead493aaff9da10d3f1963015f60..3a27ed4de9ac183d44078656e5823db45d673d2b 100644 (file)
@@ -46,15 +46,25 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
 
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
-void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
-void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
 
+static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+       return kvm_inject_sea(vcpu, false, addr);
+}
+
+static inline int kvm_inject_sea_iabt(struct kvm_vcpu *vcpu, u64 addr)
+{
+       return kvm_inject_sea(vcpu, true, addr);
+}
+
 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
 
 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
+int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
 
 static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
 {
index 1de4a9001d9d529c00c8ec15171a57a0cb378834..65a2471c5638f995a935306b73f46de7e0c4ce44 100644 (file)
@@ -2811,3 +2811,13 @@ int kvm_inject_nested_irq(struct kvm_vcpu *vcpu)
        /* esr_el2 value doesn't matter for exits due to irqs. */
        return kvm_inject_nested(vcpu, 0, except_type_irq);
 }
+
+int kvm_inject_nested_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
+{
+       u64 esr = FIELD_PREP(ESR_ELx_EC_MASK,
+                            iabt ? ESR_ELx_EC_IABT_LOW : ESR_ELx_EC_DABT_LOW);
+       esr |= ESR_ELx_FSC_EXTABT | ESR_ELx_IL;
+
+       vcpu_write_sys_reg(vcpu, FAR_EL2, addr);
+       return kvm_inject_nested_sync(vcpu, esr);
+}
index 2196979a24a325311d6111404e4d089287c41bfe..8983a43fb45e9a475fcf8fb3cc8de40b3f86a189 100644 (file)
@@ -839,6 +839,7 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
        bool serror_pending = events->exception.serror_pending;
        bool has_esr = events->exception.serror_has_esr;
        bool ext_dabt_pending = events->exception.ext_dabt_pending;
+       int ret = 0;
 
        if (serror_pending && has_esr) {
                if (!cpus_have_final_cap(ARM64_HAS_RAS_EXTN))
@@ -853,9 +854,9 @@ int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
        }
 
        if (ext_dabt_pending)
-               kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
+               ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
 
-       return 0;
+       return (ret < 0) ? ret : 0;
 }
 
 u32 __attribute_const__ kvm_target_cpu(void)
index a640e839848e601f622c7eeacec7b3600608dbbd..d9fa4046b602170b37e1d9b59d19c63adac64974 100644 (file)
@@ -155,36 +155,28 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, u32 addr)
        vcpu_write_sys_reg(vcpu, far, FAR_EL1);
 }
 
-/**
- * kvm_inject_dabt - inject a data abort into the guest
- * @vcpu: The VCPU to receive the data abort
- * @addr: The address to report in the DFAR
- *
- * It is assumed that this code is called from the VCPU thread and that the
- * VCPU therefore is not currently executing guest code.
- */
-void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
+static void __kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
 {
        if (vcpu_el1_is_32bit(vcpu))
-               inject_abt32(vcpu, false, addr);
+               inject_abt32(vcpu, iabt, addr);
        else
-               inject_abt64(vcpu, false, addr);
+               inject_abt64(vcpu, iabt, addr);
 }
 
-/**
- * kvm_inject_pabt - inject a prefetch abort into the guest
- * @vcpu: The VCPU to receive the prefetch abort
- * @addr: The address to report in the DFAR
- *
- * It is assumed that this code is called from the VCPU thread and that the
- * VCPU therefore is not currently executing guest code.
- */
-void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
+static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
 {
-       if (vcpu_el1_is_32bit(vcpu))
-               inject_abt32(vcpu, true, addr);
-       else
-               inject_abt64(vcpu, true, addr);
+       return __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA);
+}
+
+int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
+{
+       lockdep_assert_held(&vcpu->mutex);
+
+       if (is_nested_ctxt(vcpu) && kvm_sea_target_is_el2(vcpu))
+               return kvm_inject_nested_sea(vcpu, iabt, addr);
+
+       __kvm_inject_sea(vcpu, iabt, addr);
+       return 1;
 }
 
 void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
@@ -194,10 +186,7 @@ void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
        addr  = kvm_vcpu_get_fault_ipa(vcpu);
        addr |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
 
-       if (kvm_vcpu_trap_is_iabt(vcpu))
-               kvm_inject_pabt(vcpu, addr);
-       else
-               kvm_inject_dabt(vcpu, addr);
+       __kvm_inject_sea(vcpu, kvm_vcpu_trap_is_iabt(vcpu), addr);
 
        /*
         * If AArch64 or LPAE, set FSC to 0 to indicate an Address
index ab365e839874e580ba60df273e623ba489599e5a..573a6ade2f4e9bca0dcf915fa5608dee2b81f6d3 100644 (file)
@@ -169,10 +169,8 @@ int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
                trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
                                    kvm_vcpu_get_hfar(vcpu), fault_ipa);
 
-               if (vcpu_is_protected(vcpu)) {
-                       kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-                       return 1;
-               }
+               if (vcpu_is_protected(vcpu))
+                       return kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
 
                if (test_bit(KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER,
                             &vcpu->kvm->arch.flags)) {
index 2942ec92c5a4a2746a94837c1e98a178ff98ff6f..f05d70dd6d51dd9dd3f3ad36f11b3178e2eb0432 100644 (file)
@@ -1836,11 +1836,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
                if (fault_ipa >= BIT_ULL(VTCR_EL2_IPA(vcpu->arch.hw_mmu->vtcr))) {
                        fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);
 
-                       if (is_iabt)
-                               kvm_inject_pabt(vcpu, fault_ipa);
-                       else
-                               kvm_inject_dabt(vcpu, fault_ipa);
-                       return 1;
+                       return kvm_inject_sea(vcpu, is_iabt, fault_ipa);
                }
        }
 
@@ -1912,8 +1908,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
                }
 
                if (kvm_vcpu_abt_iss1tw(vcpu)) {
-                       kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-                       ret = 1;
+                       ret = kvm_inject_sea_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
                        goto out_unlock;
                }
 
@@ -1958,10 +1953,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        if (ret == 0)
                ret = 1;
 out:
-       if (ret == -ENOEXEC) {
-               kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
-               ret = 1;
-       }
+       if (ret == -ENOEXEC)
+               ret = kvm_inject_sea_iabt(vcpu, kvm_vcpu_get_hfar(vcpu));
 out_unlock:
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
        return ret;