static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
u64 guest_val, u64 host_val)
{
- int i, j = 0;
struct msr_autoload *m = &vmx->msr_autoload;
struct kvm *kvm = vmx->vcpu.kvm;
+ int i;
switch (msr) {
case MSR_EFER:
}
i = vmx_find_loadstore_msr_slot(&m->guest, msr);
- j = vmx_find_loadstore_msr_slot(&m->host, msr);
-
- if (KVM_BUG_ON(i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm) ||
- KVM_BUG_ON(j < 0 && m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
- return;
-
if (i < 0) {
+ if (KVM_BUG_ON(m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm))
+ return;
+
i = m->guest.nr++;
m->guest.val[i].index = msr;
vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
}
m->guest.val[i].value = guest_val;
- if (j < 0) {
- j = m->host.nr++;
- m->host.val[j].index = msr;
+ i = vmx_find_loadstore_msr_slot(&m->host, msr);
+ if (i < 0) {
+ if (KVM_BUG_ON(m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
+ return;
+
+ i = m->host.nr++;
+ m->host.val[i].index = msr;
vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
}
- m->host.val[j].value = host_val;
+ m->host.val[i].value = host_val;
}
static bool update_transition_efer(struct vcpu_vmx *vmx)