]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: arm64: Handle DABT caused by LS64* instructions on unsupported memory
authorYicong Yang <yangyicong@hisilicon.com>
Mon, 19 Jan 2026 02:29:24 +0000 (10:29 +0800)
committerWill Deacon <will@kernel.org>
Thu, 22 Jan 2026 13:24:49 +0000 (13:24 +0000)
If FEAT_LS64WB not supported, FEAT_LS64* instructions only support
to access Device/Uncacheable memory, otherwise a data abort for
unsupported Exclusive or atomic access (0x35, UAoEF) is generated
per spec. It's implementation defined whether the target exception
level is routed and is possible to implemented as route to EL2 on a
VHE VM according to DDI0487L.b Section C3.2.6 Single-copy atomic
64-byte load/store.

If it's implemented as generate the DABT to the final enabled stage
(stage-2), inject the UAoEF back to the guest after checking the
memslot is valid.

Acked-by: Arnd Bergmann <arnd@arndb.de>
Acked-by: Oliver Upton <oupton@kernel.org>
Signed-off-by: Yicong Yang <yangyicong@hisilicon.com>
Signed-off-by: Zhou Wang <wangzhou1@hisilicon.com>
Signed-off-by: Will Deacon <will@kernel.org>
arch/arm64/include/asm/esr.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/kvm/inject_fault.c
arch/arm64/kvm/mmu.c

index 4975a92cbd17c85f632b19f382de6ce520eb73e9..7e86d400864e031e0c328516134dc1779151c3a8 100644 (file)
 #define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
 #define ESR_ELx_FSC_SECC       (0x18)
 #define ESR_ELx_FSC_SECC_TTW(n)        (0x1c + (n))
+#define ESR_ELx_FSC_EXCL_ATOMIC        (0x35)
 #define ESR_ELx_FSC_ADDRSZ     (0x00)
 
 /*
@@ -488,6 +489,13 @@ static inline bool esr_fsc_is_access_flag_fault(unsigned long esr)
               (esr == ESR_ELx_FSC_ACCESS_L(0));
 }
 
+static inline bool esr_fsc_is_excl_atomic_fault(unsigned long esr)
+{
+       esr = esr & ESR_ELx_FSC;
+
+       return esr == ESR_ELx_FSC_EXCL_ATOMIC;
+}
+
 static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
 {
        esr &= ESR_ELx_FSC;
index c9eab316398e2ea4c1a5b937ff9d476927fbcd58..bab967d657157f3b971b0ed1510cbc6436d6a370 100644 (file)
@@ -47,6 +47,7 @@ void kvm_skip_instr32(struct kvm_vcpu *vcpu);
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
 int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
+int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr);
 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
 
 static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
index dfcd66c6551799774c130eafddb9424ca00f72f0..6cc7ad84d7d8ee0462582760936bfdb3920a08bd 100644 (file)
@@ -253,6 +253,40 @@ int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
        return 1;
 }
 
+static int kvm_inject_nested_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
+{
+       u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_DABT_LOW) |
+                 FIELD_PREP(ESR_ELx_FSC, ESR_ELx_FSC_EXCL_ATOMIC) |
+                 ESR_ELx_IL;
+
+       vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
+       return kvm_inject_nested_sync(vcpu, esr);
+}
+
+/**
+ * kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive
+ *                              or atomic access
+ * @vcpu: The VCPU to receive the data abort
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
+{
+       u64 esr;
+
+       if (is_nested_ctxt(vcpu) && (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM))
+               return kvm_inject_nested_excl_atomic(vcpu, addr);
+
+       __kvm_inject_sea(vcpu, false, addr);
+       esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
+       esr &= ~ESR_ELx_FSC;
+       esr |= ESR_ELx_FSC_EXCL_ATOMIC;
+       vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
+       return 1;
+}
+
 void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
 {
        unsigned long addr, esr;
index 48d7c372a4cd02da5ea7a7cc0f685e8617a682dd..edc348431d713d5b2222bd8d7629d113d228695d 100644 (file)
@@ -1845,6 +1845,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
                return ret;
        }
 
+       /*
+        * Guest performs atomic/exclusive operations on memory with unsupported
+        * attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
+        * and trigger the exception here. Since the memslot is valid, inject
+        * the fault back to the guest.
+        */
+       if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
+               kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
+               return 1;
+       }
+
        if (nested)
                adjust_nested_fault_perms(nested, &prot, &writable);
 
@@ -2082,7 +2093,8 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
        /* Check the stage-2 fault is trans. fault or write fault */
        if (!esr_fsc_is_translation_fault(esr) &&
            !esr_fsc_is_permission_fault(esr) &&
-           !esr_fsc_is_access_flag_fault(esr)) {
+           !esr_fsc_is_access_flag_fault(esr) &&
+           !esr_fsc_is_excl_atomic_fault(esr)) {
                kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
                        kvm_vcpu_trap_get_class(vcpu),
                        (unsigned long)kvm_vcpu_trap_get_fault(vcpu),