}
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
- u64 guest_val, u64 host_val, bool entry_only)
+ u64 guest_val, u64 host_val)
{
int i, j = 0;
struct msr_autoload *m = &vmx->msr_autoload;
}
i = vmx_find_loadstore_msr_slot(&m->guest, msr);
- if (!entry_only)
- j = vmx_find_loadstore_msr_slot(&m->host, msr);
+ j = vmx_find_loadstore_msr_slot(&m->host, msr);
if ((i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS) ||
(j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS)) {
m->guest.val[i].index = msr;
m->guest.val[i].value = guest_val;
- if (entry_only)
- return;
-
if (j < 0) {
j = m->host.nr++;
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
if (!(guest_efer & EFER_LMA))
guest_efer &= ~EFER_LME;
if (guest_efer != kvm_host.efer)
- add_atomic_switch_msr(vmx, MSR_EFER,
- guest_efer, kvm_host.efer, false);
+ add_atomic_switch_msr(vmx, MSR_EFER, guest_efer, kvm_host.efer);
else
clear_atomic_switch_msr(vmx, MSR_EFER);
return false;
clear_atomic_switch_msr(vmx, msrs[i].msr);
else
add_atomic_switch_msr(vmx, msrs[i].msr, msrs[i].guest,
- msrs[i].host, false);
+ msrs[i].host);
}
static void vmx_update_hv_timer(struct kvm_vcpu *vcpu, bool force_immediate_exit)