]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: SEV: Disallow LAUNCH_FINISH if vCPUs are actively being created
authorSean Christopherson <seanjc@google.com>
Tue, 10 Mar 2026 23:48:12 +0000 (16:48 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 3 Apr 2026 16:37:36 +0000 (09:37 -0700)
Reject LAUNCH_FINISH for SEV-ES and SNP VMs if KVM is actively creating
one or more vCPUs, as KVM needs to process and encrypt each vCPU's VMSA.
Letting userspace create vCPUs while LAUNCH_FINISH is in-progress is
"fine", at least in the current code base, as kvm_for_each_vcpu() operates
on online_vcpus, LAUNCH_FINISH (all SEV+ sub-ioctls) holds kvm->mutex, and
fully onlining a vCPU in kvm_vm_ioctl_create_vcpu() is done under
kvm->mutex.  I.e. there's no difference between an in-progress vCPU and a
vCPU that is created entirely after LAUNCH_FINISH.

However, given that concurrent LAUNCH_FINISH and vCPU creation can't
possibly work (for any reasonable definition of "work"), since userspace
can't guarantee whether a particular vCPU will be encrypted or not,
disallow the combination as a hardening measure, to reduce the probability
of introducing bugs in the future, and to avoid having to reason about the
safety of future changes related to LAUNCH_FINISH.

Cc: Jethro Beekman <jethro@fortanix.com>
Closes: https://lore.kernel.org/all/b31f7c6e-2807-4662-bcdd-eea2c1e132fa@fortanix.com
Cc: stable@vger.kernel.org
Link: https://patch.msgid.link/20260310234829.2608037-5-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/sev.c
include/linux/kvm_host.h

index 9265ebd9aa1867386aa12a0fb540300fb463b279..10b12db7f902ddc36a75d05280a5f08a86ae20e2 100644 (file)
@@ -1032,6 +1032,9 @@ static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (!sev_es_guest(kvm))
                return -ENOTTY;
 
+       if (kvm_is_vcpu_creation_in_progress(kvm))
+               return -EBUSY;
+
        kvm_for_each_vcpu(i, vcpu, kvm) {
                ret = mutex_lock_killable(&vcpu->mutex);
                if (ret)
@@ -2052,8 +2055,8 @@ static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
        struct kvm_vcpu *src_vcpu;
        unsigned long i;
 
-       if (src->created_vcpus != atomic_read(&src->online_vcpus) ||
-           dst->created_vcpus != atomic_read(&dst->online_vcpus))
+       if (kvm_is_vcpu_creation_in_progress(src) ||
+           kvm_is_vcpu_creation_in_progress(dst))
                return -EBUSY;
 
        if (!sev_es_guest(src))
@@ -2452,6 +2455,9 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
        unsigned long i;
        int ret;
 
+       if (kvm_is_vcpu_creation_in_progress(kvm))
+               return -EBUSY;
+
        data.gctx_paddr = __psp_pa(sev->snp_context);
        data.page_type = SNP_PAGE_TYPE_VMSA;
 
index 34759a262b2892b54d2bef78029c06b55fff58e1..3c7f8557f7af5c5db172d52eceb18e779d215bc5 100644 (file)
@@ -1029,6 +1029,13 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
        return NULL;
 }
 
+static inline bool kvm_is_vcpu_creation_in_progress(struct kvm *kvm)
+{
+       lockdep_assert_held(&kvm->lock);
+
+       return kvm->created_vcpus != atomic_read(&kvm->online_vcpus);
+}
+
 void kvm_destroy_vcpus(struct kvm *kvm);
 
 int kvm_trylock_all_vcpus(struct kvm *kvm);