vmcb12->control.exit_info_1 = vmcb02->control.exit_info_1;
vmcb12->control.exit_info_2 = vmcb02->control.exit_info_2;
- if (vmcb12->control.exit_code != SVM_EXIT_ERR)
+ if (!svm_is_vmrun_failure(vmcb12->control.exit_code))
nested_save_pending_event_to_vmcb12(svm, vmcb12);
if (guest_cpu_cap_has(vcpu, X86_FEATURE_NRIPS))
u32 exit_code = svm->vmcb->control.exit_code;
int vmexit = NESTED_EXIT_HOST;
+ if (svm_is_vmrun_failure(exit_code))
+ return NESTED_EXIT_DONE;
+
switch (exit_code) {
case SVM_EXIT_MSR:
vmexit = nested_svm_exit_handled_msr(svm);
case SVM_EXIT_IOIO:
vmexit = nested_svm_intercept_ioio(svm);
break;
- case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
+ case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f:
/*
* Host-intercepted exceptions have been checked already in
* nested_svm_exit_special. There is nothing to do here,
*/
vmexit = NESTED_EXIT_DONE;
break;
- }
- case SVM_EXIT_ERR: {
- vmexit = NESTED_EXIT_DONE;
- break;
- }
- default: {
+ default:
if (vmcb12_is_intercept(&svm->nested.ctl, exit_code))
vmexit = NESTED_EXIT_DONE;
- }
+ break;
}
return vmexit;
return 1;
}
- if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
+ if (svm_is_vmrun_failure(svm->vmcb->control.exit_code)) {
kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
kvm_run->fail_entry.hardware_entry_failure_reason
= svm->vmcb->control.exit_code;
/* Track VMRUNs that have made past consistency checking */
if (svm->nested.nested_run_pending &&
- svm->vmcb->control.exit_code != SVM_EXIT_ERR)
+ !svm_is_vmrun_failure(svm->vmcb->control.exit_code))
++vcpu->stat.nested_run;
svm->nested.nested_run_pending = 0;
return container_of(vcpu, struct vcpu_svm, vcpu);
}
+static inline bool svm_is_vmrun_failure(u64 exit_code)
+{
+ return (u32)exit_code == (u32)SVM_EXIT_ERR;
+}
+
/*
* Only the PDPTRs are loaded on demand into the shadow MMU. All other
* fields are synchronized on VM-Exit, because accessing the VMCB is cheap.