bool is_write;
int len;
u8 data_buf[8];
+ u64 esr;
+
+ esr = kvm_vcpu_get_esr(vcpu);
/*
* No valid syndrome? Ask userspace for help if it has
* though, so directly deliver an exception to the guest.
*/
if (!kvm_vcpu_dabt_isvalid(vcpu)) {
- trace_kvm_mmio_nisv(*vcpu_pc(vcpu), kvm_vcpu_get_esr(vcpu),
+ trace_kvm_mmio_nisv(*vcpu_pc(vcpu), esr,
kvm_vcpu_get_hfar(vcpu), fault_ipa);
if (vcpu_is_protected(vcpu))
return -ENOSYS;
}
+ /*
+ * When (DFSC == 0b00xxxx || DFSC == 0b10101x) && DFSC != 0b0000xx
+ * ESR_EL2[12:11] describe the Load/Store Type. This allows us to
+ * punt the LD64B/ST64B/ST64BV/ST64BV0 instructions to userspace,
+ * which will have to provide a full emulation of these 4
+ * instructions. No, we don't expect this do be fast.
+ *
+ * We rely on traps being set if the corresponding features are not
+ * enabled, so if we get here, userspace has promised us to handle
+ * it already.
+ */
+ switch (kvm_vcpu_trap_get_fault(vcpu)) {
+ case 0b000100 ... 0b001111:
+ case 0b101010 ... 0b101011:
+ if (FIELD_GET(GENMASK(12, 11), esr)) {
+ run->exit_reason = KVM_EXIT_ARM_LDST64B;
+ run->arm_nisv.esr_iss = esr & ~(u64)ESR_ELx_FSC;
+ run->arm_nisv.fault_ipa = fault_ipa;
+ return 0;
+ }
+ }
+
/*
* Prepare MMIO operation. First decode the syndrome data we get
* from the CPU. Then try if some in-kernel emulation feels
#define KVM_EXIT_MEMORY_FAULT 39
#define KVM_EXIT_TDX 40
#define KVM_EXIT_ARM_SEA 41
+#define KVM_EXIT_ARM_LDST64B 42
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
- /* KVM_EXIT_ARM_NISV */
+ /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;