]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: VMX: Dedup code for adding MSR to VMCS's auto list
authorSean Christopherson <seanjc@google.com>
Sat, 6 Dec 2025 00:17:18 +0000 (16:17 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 19:52:22 +0000 (11:52 -0800)
Add a helper to add an MSR to a VMCS's "auto" list to deduplicate the code
in add_atomic_switch_msr(), and so that the functionality can be used in
the future for managing the MSR auto-store list.

No functional change intended.

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-43-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/vmx.c

index 018e01daab68c71fa8b40592892d8752208f981a..3f64d4b1b19c529073b5e2ac489ec2b85060a84c 100644 (file)
@@ -1093,12 +1093,28 @@ static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
        vm_exit_controls_setbit(vmx, exit);
 }
 
+static void vmx_add_auto_msr(struct vmx_msrs *m, u32 msr, u64 value,
+                            unsigned long vmcs_count_field, struct kvm *kvm)
+{
+       int i;
+
+       i = vmx_find_loadstore_msr_slot(m, msr);
+       if (i < 0) {
+               if (KVM_BUG_ON(m->nr == MAX_NR_LOADSTORE_MSRS, kvm))
+                       return;
+
+               i = m->nr++;
+               m->val[i].index = msr;
+               vmcs_write32(vmcs_count_field, m->nr);
+       }
+       m->val[i].value = value;
+}
+
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                                  u64 guest_val, u64 host_val)
 {
        struct msr_autoload *m = &vmx->msr_autoload;
        struct kvm *kvm = vmx->vcpu.kvm;
-       int i;
 
        switch (msr) {
        case MSR_EFER:
@@ -1132,27 +1148,8 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                wrmsrq(MSR_IA32_PEBS_ENABLE, 0);
        }
 
-       i = vmx_find_loadstore_msr_slot(&m->guest, msr);
-       if (i < 0) {
-               if (KVM_BUG_ON(m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm))
-                       return;
-
-               i = m->guest.nr++;
-               m->guest.val[i].index = msr;
-               vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
-       }
-       m->guest.val[i].value = guest_val;
-
-       i = vmx_find_loadstore_msr_slot(&m->host, msr);
-       if (i < 0) {
-               if (KVM_BUG_ON(m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
-                       return;
-
-               i = m->host.nr++;
-               m->host.val[i].index = msr;
-               vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
-       }
-       m->host.val[i].value = host_val;
+       vmx_add_auto_msr(&m->guest, msr, guest_val, VM_ENTRY_MSR_LOAD_COUNT, kvm);
+       vmx_add_auto_msr(&m->guest, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm);
 }
 
 static bool update_transition_efer(struct vcpu_vmx *vmx)