]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: VMX: Compartmentalize adding MSRs to host vs. guest auto-load list
authorSean Christopherson <seanjc@google.com>
Sat, 6 Dec 2025 00:17:17 +0000 (16:17 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 8 Jan 2026 19:52:21 +0000 (11:52 -0800)
Undo the bundling of the "host" and "guest" MSR auto-load list logic so
that the code can be deduplicated by factoring out the logic to a separate
helper.  Now that "list full" situations are treated as fatal to the VM,
there is no need to pre-check both lists.

For all intents and purposes, this reverts the add_atomic_switch_msr()
changes made by commit 3190709335dd ("x86/KVM/VMX: Separate the VMX
AUTOLOAD guest/host number accounting").

Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Tested-by: Manali Shukla <manali.shukla@amd.com>
Link: https://patch.msgid.link/20251206001720.468579-42-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/vmx/vmx.c

index be2a2580e8f1bb4c455204cab2f7185f881846fa..018e01daab68c71fa8b40592892d8752208f981a 100644 (file)
@@ -1096,9 +1096,9 @@ static __always_inline void add_atomic_switch_msr_special(struct vcpu_vmx *vmx,
 static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
                                  u64 guest_val, u64 host_val)
 {
-       int i, j = 0;
        struct msr_autoload *m = &vmx->msr_autoload;
        struct kvm *kvm = vmx->vcpu.kvm;
+       int i;
 
        switch (msr) {
        case MSR_EFER:
@@ -1133,25 +1133,26 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
        }
 
        i = vmx_find_loadstore_msr_slot(&m->guest, msr);
-       j = vmx_find_loadstore_msr_slot(&m->host, msr);
-
-       if (KVM_BUG_ON(i < 0 && m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm) ||
-           KVM_BUG_ON(j < 0 &&  m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
-               return;
-
        if (i < 0) {
+               if (KVM_BUG_ON(m->guest.nr == MAX_NR_LOADSTORE_MSRS, kvm))
+                       return;
+
                i = m->guest.nr++;
                m->guest.val[i].index = msr;
                vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, m->guest.nr);
        }
        m->guest.val[i].value = guest_val;
 
-       if (j < 0) {
-               j = m->host.nr++;
-               m->host.val[j].index = msr;
+       i = vmx_find_loadstore_msr_slot(&m->host, msr);
+       if (i < 0) {
+               if (KVM_BUG_ON(m->host.nr == MAX_NR_LOADSTORE_MSRS, kvm))
+                       return;
+
+               i = m->host.nr++;
+               m->host.val[i].index = msr;
                vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, m->host.nr);
        }
-       m->host.val[j].value = host_val;
+       m->host.val[i].value = host_val;
 }
 
 static bool update_transition_efer(struct vcpu_vmx *vmx)