kvm_skip_host_instr();
}
+/*
+ * Inject an Undefined Instruction exception into the host.
+ *
+ * This is open-coded to allow control over PSTATE construction without
+ * complicating the generic exception entry helpers.
+ */
+static void inject_undef64(void)
+{
+ u64 spsr_mask, vbar, sctlr, old_spsr, new_spsr, esr, offset;
+
+ spsr_mask = PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT | PSR_DIT_BIT | PSR_PAN_BIT;
+
+ vbar = read_sysreg_el1(SYS_VBAR);
+ sctlr = read_sysreg_el1(SYS_SCTLR);
+ old_spsr = read_sysreg_el2(SYS_SPSR);
+
+ new_spsr = old_spsr & spsr_mask;
+ new_spsr |= PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT;
+ new_spsr |= PSR_MODE_EL1h;
+
+ if (!(sctlr & SCTLR_EL1_SPAN))
+ new_spsr |= PSR_PAN_BIT;
+
+ if (sctlr & SCTLR_ELx_DSSBS)
+ new_spsr |= PSR_SSBS_BIT;
+
+ if (system_supports_mte())
+ new_spsr |= PSR_TCO_BIT;
+
+ esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT) | ESR_ELx_IL;
+ offset = CURRENT_EL_SP_ELx_VECTOR + except_type_sync;
+
+ write_sysreg_el1(esr, SYS_ESR);
+ write_sysreg_el1(read_sysreg_el2(SYS_ELR), SYS_ELR);
+ write_sysreg_el1(old_spsr, SYS_SPSR);
+ write_sysreg_el2(vbar + offset, SYS_ELR);
+ write_sysreg_el2(new_spsr, SYS_SPSR);
+}
+
+static bool handle_host_mte(u64 esr)
+{
+ switch (esr_sys64_to_sysreg(esr)) {
+ case SYS_RGSR_EL1:
+ case SYS_GCR_EL1:
+ case SYS_TFSR_EL1:
+ case SYS_TFSRE0_EL1:
+ /* If we're here for any reason other than MTE, it's a bug. */
+ if (read_sysreg(HCR_EL2) & HCR_ATA)
+ return false;
+ break;
+ case SYS_GMID_EL1:
+ /* If we're here for any reason other than MTE, it's a bug. */
+ if (!(read_sysreg(HCR_EL2) & HCR_TID5))
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ inject_undef64();
+ return true;
+}
+
void handle_trap(struct kvm_cpu_context *host_ctxt)
{
u64 esr = read_sysreg_el2(SYS_ESR);
case ESR_ELx_EC_DABT_LOW:
handle_host_mem_abort(host_ctxt);
break;
+ case ESR_ELx_EC_SYS64:
+ if (handle_host_mte(esr))
+ break;
+ fallthrough;
default:
BUG();
}