]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86: WARN and reject KVM_RUN if vCPU's MP_STATE is SIPI_RECEIVED
authorSean Christopherson <seanjc@google.com>
Thu, 5 Jun 2025 19:50:16 +0000 (12:50 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 20 Jun 2025 20:07:59 +0000 (13:07 -0700)
WARN if KVM_RUN is reached with a vCPU's mp_state set to SIPI_RECEIVED, as
KVM no longer uses SIPI_RECEIVED internally, and should morph SIPI_RECEIVED
into INIT_RECEIVED with a pending SIPI if userspace forces SIPI_RECEIVED.

See commit 66450a21f996 ("KVM: x86: Rework INIT and SIPI handling") for
more history and details.

Link: https://lore.kernel.org/r/20250605195018.539901-3-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/x86.c

index f183951a8ea8b60842e8b0281d43fd99217a264f..f2db09e8a13e8d706b4b43a685b4bc3d6dd43ae5 100644 (file)
@@ -11568,6 +11568,20 @@ static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
        trace_kvm_fpu(0);
 }
 
+static int kvm_x86_vcpu_pre_run(struct kvm_vcpu *vcpu)
+{
+       /*
+        * SIPI_RECEIVED is obsolete; KVM leaves the vCPU in Wait-For-SIPI and
+        * tracks the pending SIPI separately.  SIPI_RECEIVED is still accepted
+        * by KVM_SET_VCPU_EVENTS for backwards compatibility, but should be
+        * converted to INIT_RECEIVED.
+        */
+       if (WARN_ON_ONCE(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED))
+               return -EINVAL;
+
+       return kvm_x86_call(vcpu_pre_run)(vcpu);
+}
+
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
 {
        struct kvm_queued_exception *ex = &vcpu->arch.exception;
@@ -11670,7 +11684,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
                goto out;
        }
 
-       r = kvm_x86_call(vcpu_pre_run)(vcpu);
+       r = kvm_x86_vcpu_pre_run(vcpu);
        if (r <= 0)
                goto out;