* L1 re-enters L2, the same instruction will trigger a VM-Exit and the
* entire cycle start over.
*/
- if (vmcb02->save.rip && (svm->nested.ctl.bus_lock_rip == vmcb02->save.rip))
+ if (vmcb02->save.rip && (svm->nested.last_bus_lock_rip == vmcb02->save.rip))
vmcb02->control.bus_lock_counter = 1;
else
vmcb02->control.bus_lock_counter = 0;
}
/*
- * Invalidate bus_lock_rip unless KVM is still waiting for the guest
- * to make forward progress before re-enabling bus lock detection.
+ * Invalidate last_bus_lock_rip unless KVM is still waiting for the
+ * guest to make forward progress before re-enabling bus lock detection.
*/
if (!vmcb02->control.bus_lock_counter)
- svm->nested.ctl.bus_lock_rip = INVALID_GPA;
+ svm->nested.last_bus_lock_rip = INVALID_GPA;
nested_svm_copy_common_state(svm->nested.vmcb02.ptr, svm->vmcb01.ptr);
vcpu->arch.complete_userspace_io = complete_userspace_buslock;
if (is_guest_mode(vcpu))
- svm->nested.ctl.bus_lock_rip = vcpu->arch.cui_linear_rip;
+ svm->nested.last_bus_lock_rip = vcpu->arch.cui_linear_rip;
return 0;
}
u64 nested_cr3;
u64 virt_ext;
u32 clean;
- u64 bus_lock_rip;
union {
#if IS_ENABLED(CONFIG_HYPERV) || IS_ENABLED(CONFIG_KVM_HYPERV)
struct hv_vmcb_enlightenments hv_enlightenments;
u64 vm_cr_msr;
u64 vmcb12_gpa;
u64 last_vmcb12_gpa;
+ u64 last_bus_lock_rip;
/*
* The MSR permissions map used for vmcb02, which is the merge result