]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86: Move INIT_RECEIVED vs. INIT/SIPI blocked check to KVM_RUN
authorSean Christopherson <seanjc@google.com>
Thu, 5 Jun 2025 19:50:17 +0000 (12:50 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:08:00 +0000 (13:08 -0700)
Check for the should-be-impossible scenario of a vCPU being in
Wait-For-SIPI with INIT/SIPI blocked during KVM_RUN instead of trying to
detect and prevent illegal combinations in every ioctl that sets relevant
state.  Attempting to handle every possible "set" path is a losing game of
whack-a-mole, and risks breaking userspace.  E.g. INIT/SIPI are blocked on
Intel if the vCPU is in VMX Root mode (post-VMXON), and on AMD if GIF=0.
Handling those scenarios would require potentially breaking changes to
{vmx,svm}_set_nested_state().

Moving the check to KVM_RUN fixes a syzkaller-induced splat due to the
aforementioned VMXON case, and in theory should close the hole once and for
all.

Note, kvm_x86_vcpu_pre_run() already handles SIPI_RECEIVED, only the WFS
case needs additional attention.

Reported-by: syzbot+c1cbaedc2613058d5194@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?id=490ae63d8d89cb82c5d462d16962cf371df0e476
Link: https://lore.kernel.org/r/20250605195018.539901-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/x86.c

index f2db09e8a13e8d706b4b43a685b4bc3d6dd43ae5..f7eaff64ed0195d22f9857bfd7d69a760c52e79a 100644 (file)
@@ -5487,12 +5487,6 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
            (events->exception.nr > 31 || events->exception.nr == NMI_VECTOR))
                return -EINVAL;
 
-       /* INITs are latched while in SMM */
-       if (events->flags & KVM_VCPUEVENT_VALID_SMM &&
-           (events->smi.smm || events->smi.pending) &&
-           vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
-               return -EINVAL;
-
        process_nmi(vcpu);
 
        /*
@@ -11579,6 +11573,14 @@ static int kvm_x86_vcpu_pre_run(struct kvm_vcpu *vcpu)
        if (WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED))
                return -EINVAL;
 
+       /*
+        * Disallow running the vCPU if userspace forced it into an impossible
+        * MP_STATE, e.g. if the vCPU is in WFS but SIPI is blocked.
+        */
+       if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED &&
+           !kvm_apic_init_sipi_allowed(vcpu))
+               return -EINVAL;
+
        return kvm_x86_call(vcpu_pre_run)(vcpu);
 }
 
@@ -11927,16 +11929,6 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                goto out;
        }
 
-       /*
-        * Pending INITs are reported using KVM_SET_VCPU_EVENTS, disallow
-        * forcing the guest into INIT/SIPI if those events are supposed to be
-        * blocked.
-        */
-       if (!kvm_apic_init_sipi_allowed(vcpu) &&
-           (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
-            mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
-               goto out;
-
        if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
                kvm_set_mp_state(vcpu, KVM_MP_STATE_INIT_RECEIVED);
                set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);