vm_exit_controls_setbit(vmx, exit);
}
+static void vmx_add_auto_msr(struct vmx_msrs *m, u32 msr, u64 value,
+ unsigned long vmcs_count_field, struct kvm *kvm)
+{
+ int i;
+
+ i = vmx_find_loadstore_msr_slot(m, msr);
+ if (i < 0) {
+ if (KVM_BUG_ON(m->nr == MAX_NR_LOADSTORE_MSRS, kvm))
+ return;
+
+ i = m->nr++;
+ m->val[i].index = msr;
+ vmcs_write32(vmcs_count_field, m->nr);
+ }
+ m->val[i].value = value;
+}
+
static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
u64 guest_val, u64 host_val)
{
struct msr_autoload *m = &vmx->msr_autoload;
struct kvm *kvm = vmx->vcpu.kvm;
- int i;
switch (msr) {
case MSR_EFER:
wrmsrq(MSR_IA32_PEBS_ENABLE, 0);
}
- i = vmx_find_loadstore_msr_slot(&m->guest, msr);
- if (i < 0) {
- if (KVM_BUG_ON(m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm))
- return;
-
- i = m->guest.nr++;
- m->guest.val[i].index = msr;
- vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
- }
- m->guest.val[i].value = guest_val;
-
- i = vmx_find_loadstore_msr_slot(&m->host, msr);
- if (i < 0) {
- if (KVM_BUG_ON(m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
- return;
-
- i = m->host.nr++;
- m->host.val[i].index = msr;
- vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
- }
- m->host.val[i].value = host_val;
+ vmx_add_auto_msr(&m->guest, msr, guest_val, VM_ENTRY_MSR_LOAD_COUNT, kvm);
+ vmx_add_auto_msr(&m->guest, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm);
}
static bool update_transition_efer(struct vcpu_vmx *vmx)