static bool kvm_sea_target_is_el2(struct kvm_vcpu *vcpu)
{
- return __vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA);
+ if (__vcpu_sys_reg(vcpu, HCR_EL2) & (HCR_TGE | HCR_TEA))
+ return true;
+
+ if (!vcpu_mode_priv(vcpu))
+ return false;
+
+ return (*vcpu_cpsr(vcpu) & PSR_A_BIT) &&
+ (__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA);
}
int kvm_inject_sea(struct kvm_vcpu *vcpu, bool iabt, u64 addr)
inject_undef64(vcpu);
}
+static bool serror_is_masked(struct kvm_vcpu *vcpu)
+{
+ return *vcpu_cpsr(vcpu) & PSR_A_BIT;
+}
+
static bool kvm_serror_target_is_el2(struct kvm_vcpu *vcpu)
{
- return is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu);
+ if (is_hyp_ctxt(vcpu) || vcpu_el2_amo_is_set(vcpu))
+ return true;
+
+ if (!(__vcpu_sys_reg(vcpu, HCRX_EL2) & HCRX_EL2_TMEA))
+ return false;
+
+ return serror_is_masked(vcpu);
}
static bool kvm_serror_undeliverable_at_el2(struct kvm_vcpu *vcpu)
return 1;
}
+ /*
+ * Emulate the exception entry if SErrors are unmasked. This is useful if
+ * the vCPU is in a nested context w/ vSErrors enabled then we've already
+ * delegated he hardware vSError context (i.e. HCR_EL2.VSE, VSESR_EL2,
+ * VDISR_EL2) to the guest hypervisor.
+ */
+ if (!serror_is_masked(vcpu)) {
+ pend_serror_exception(vcpu);
+ vcpu_write_sys_reg(vcpu, esr, exception_esr_elx(vcpu));
+ return 1;
+ }
+
vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
*vcpu_hcr(vcpu) |= HCR_VSE;
return 1;