int nested_svm_vmrun(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);
- int ret, err;
+ int ret;
u64 vmcb12_gpa;
struct vmcb *vmcb01 = svm->vmcb01.ptr;
return -EINVAL;
vmcb12_gpa = svm->vmcb->save.rax;
- err = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
- if (err == -EFAULT) {
- kvm_inject_gp(vcpu, 0);
- return 1;
+
+ ret = nested_svm_copy_vmcb12_to_cache(vcpu, vmcb12_gpa);
+ if (ret) {
+ if (ret == -EFAULT) {
+ kvm_inject_gp(vcpu, 0);
+ return 1;
+ }
+
+ /* Advance RIP past VMRUN as part of the nested #VMEXIT. */
+ return kvm_skip_emulated_instruction(vcpu);
}
- /*
- * Advance RIP if #GP or #UD are not injected, but otherwise stop if
- * copying and checking vmcb12 failed.
- */
+ /* At this point, VMRUN is guaranteed to not fault; advance RIP. */
ret = kvm_skip_emulated_instruction(vcpu);
- if (err)
- return ret;
/*
* Since vmcb01 is not in use, we can use it to store some of the L1