#define ESR_ELx_FSC_SEA_TTW(n) (0x14 + (n))
#define ESR_ELx_FSC_SECC (0x18)
#define ESR_ELx_FSC_SECC_TTW(n) (0x1c + (n))
+#define ESR_ELx_FSC_EXCL_ATOMIC (0x35)
#define ESR_ELx_FSC_ADDRSZ (0x00)
/*
(esr == ESR_ELx_FSC_ACCESS_L(0));
}
+static inline bool esr_fsc_is_excl_atomic_fault(unsigned long esr)
+{
+ esr = esr & ESR_ELx_FSC;
+
+ return esr == ESR_ELx_FSC_EXCL_ATOMIC;
+}
+
static inline bool esr_fsc_is_addr_sz_fault(unsigned long esr)
{
esr &= ESR_ELx_FSC;
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
int kvm_inject_serror_esr(struct kvm_vcpu *vcpu, u64 esr);
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr);
+int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr);
void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
static inline int kvm_inject_sea_dabt(struct kvm_vcpu *vcpu, u64 addr)
return 1;
}
+static int kvm_inject_nested_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
+{
+ u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_DABT_LOW) |
+ FIELD_PREP(ESR_ELx_FSC, ESR_ELx_FSC_EXCL_ATOMIC) |
+ ESR_ELx_IL;
+
+ vcpu_write_sys_reg(vcpu, addr, FAR_EL2);
+ return kvm_inject_nested_sync(vcpu, esr);
+}
+
+/**
+ * kvm_inject_dabt_excl_atomic - inject a data abort for unsupported exclusive
+ * or atomic access
+ * @vcpu: The VCPU to receive the data abort
+ * @addr: The address to report in the DFAR
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+int kvm_inject_dabt_excl_atomic(struct kvm_vcpu *vcpu, u64 addr)
+{
+ u64 esr;
+
+ if (is_nested_ctxt(vcpu) && (vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM))
+ return kvm_inject_nested_excl_atomic(vcpu, addr);
+
+ __kvm_inject_sea(vcpu, false, addr);
+ esr = vcpu_read_sys_reg(vcpu, exception_esr_elx(vcpu));
+ esr &= ~ESR_ELx_FSC;
+ esr |= ESR_ELx_FSC_EXCL_ATOMIC;
+ vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
+ return 1;
+}
+
void kvm_inject_size_fault(struct kvm_vcpu *vcpu)
{
unsigned long addr, esr;
return ret;
}
+ /*
+ * Guest performs atomic/exclusive operations on memory with unsupported
+ * attributes (e.g. ld64b/st64b on normal memory when no FEAT_LS64WB)
+ * and trigger the exception here. Since the memslot is valid, inject
+ * the fault back to the guest.
+ */
+ if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
+ kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
+ return 1;
+ }
+
if (nested)
adjust_nested_fault_perms(nested, &prot, &writable);
/* Check the stage-2 fault is trans. fault or write fault */
if (!esr_fsc_is_translation_fault(esr) &&
!esr_fsc_is_permission_fault(esr) &&
- !esr_fsc_is_access_flag_fault(esr)) {
+ !esr_fsc_is_access_flag_fault(esr) &&
+ !esr_fsc_is_excl_atomic_fault(esr)) {
kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
kvm_vcpu_trap_get_class(vcpu),
(unsigned long)kvm_vcpu_trap_get_fault(vcpu),