svm->vmcb->control.insn_len);
}
+static int svm_check_emulate_instruction(struct kvm_vcpu *vcpu, int emul_type,
+ void *insn, int insn_len);
+
static int npf_interception(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
if (WARN_ON_ONCE(error_code & PFERR_SYNTHETIC_MASK))
error_code &= ~PFERR_SYNTHETIC_MASK;
+ /*
+ * Expedite fast MMIO kicks if the next RIP is known and KVM is allowed
+ * emulate a page fault, e.g. skipping the current instruction is wrong
+ * if the #NPF occurred while vectoring an event.
+ */
+ if ((error_code & PFERR_RSVD_MASK) && !is_guest_mode(vcpu)) {
+ const int emul_type = EMULTYPE_PF | EMULTYPE_NO_DECODE;
+
+ if (svm_check_emulate_instruction(vcpu, emul_type, NULL, 0))
+ return 1;
+
+ if (nrips && svm->vmcb->control.next_rip &&
+ !kvm_io_bus_write(vcpu, KVM_FAST_MMIO_BUS, gpa, 0, NULL)) {
+ trace_kvm_fast_mmio(gpa);
+ return kvm_skip_emulated_instruction(vcpu);
+ }
+ }
+
if (sev_snp_guest(vcpu->kvm) && (error_code & PFERR_GUEST_ENC_MASK))
error_code |= PFERR_PRIVATE_ACCESS;