]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: SEV: Lock all vCPUs when synchronzing VMSAs for SNP launch finish
authorSean Christopherson <seanjc@google.com>
Tue, 10 Mar 2026 23:48:13 +0000 (16:48 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Wed, 22 Apr 2026 11:32:20 +0000 (13:32 +0200)
commit cb923ee6a80f4e604e6242a4702b59251e61a380 upstream.

Lock all vCPUs when synchronizing and encrypting VMSAs for SNP guests, as
allowing userspace to manipulate and/or run a vCPU while its state is being
synchronized would at best corrupt vCPU state, and at worst crash the host
kernel.

Opportunistically assert that vcpu->mutex is held when synchronizing its
VMSA (the SEV-ES path already locks vCPUs).

Fixes: ad27ce155566 ("KVM: SEV: Add KVM_SEV_SNP_LAUNCH_FINISH command")
Cc: stable@vger.kernel.org
Link: https://patch.msgid.link/20260310234829.2608037-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
arch/x86/kvm/svm/sev.c

index 5de36bbc4c535904b41d4053d3ac208ab726bf8a..42f85646c20389897dfd76df7bab1d69dae843e0 100644 (file)
@@ -882,6 +882,8 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
        u8 *d;
        int i;
 
+       lockdep_assert_held(&vcpu->mutex);
+
        if (vcpu->arch.guest_state_protected)
                return -EINVAL;
 
@@ -2456,6 +2458,10 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (kvm_is_vcpu_creation_in_progress(kvm))
                return -EBUSY;
 
+       ret = kvm_lock_all_vcpus(kvm);
+       if (ret)
+               return ret;
+
        data.gctx_paddr = __psp_pa(sev->snp_context);
        data.page_type = SNP_PAGE_TYPE_VMSA;
 
@@ -2465,12 +2471,12 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
                ret = sev_es_sync_vmsa(svm);
                if (ret)
-                       return ret;
+                       goto out;
 
                /* Transition the VMSA page to a firmware state. */
                ret = rmp_make_private(pfn, INITIAL_VMSA_GPA, PG_LEVEL_4K, sev->asid, true);
                if (ret)
-                       return ret;
+                       goto out;
 
                /* Issue the SNP command to encrypt the VMSA */
                data.address = __sme_pa(svm->sev_es.vmsa);
@@ -2479,7 +2485,7 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
                if (ret) {
                        snp_page_reclaim(kvm, pfn);
 
-                       return ret;
+                       goto out;
                }
 
                svm->vcpu.arch.guest_state_protected = true;
@@ -2493,7 +2499,9 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
                svm_enable_lbrv(vcpu);
        }
 
-       return 0;
+out:
+       kvm_unlock_all_vcpus(kvm);
+       return ret;
 }
 
 static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)