]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: SEV: Lock all vCPUs when synchronzing VMSAs for SNP launch finish
authorSean Christopherson <seanjc@google.com>
Tue, 10 Mar 2026 23:48:13 +0000 (16:48 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 8 Apr 2026 23:04:19 +0000 (16:04 -0700)
Lock all vCPUs when synchronizing and encrypting VMSAs for SNP guests, as
allowing userspace to manipulate and/or run a vCPU while its state is being
synchronized would at best corrupt vCPU state, and at worst crash the host
kernel.

Opportunistically assert that vcpu->mutex is held when synchronizing its
VMSA (the SEV-ES path already locks vCPUs).

Fixes: ad27ce155566 ("KVM: SEV: Add KVM_SEV_SNP_LAUNCH_FINISH command")
Cc: stable@vger.kernel.org
Link: https://patch.msgid.link/20260310234829.2608037-6-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/sev.c

index 10b12db7f902ddc36a75d05280a5f08a86ae20e2..709e611188c1c7ceb6099604877729c0acbae0ca 100644 (file)
@@ -884,6 +884,8 @@ static int sev_es_sync_vmsa(struct vcpu_svm *svm)
        u8 *d;
        int i;
 
+       lockdep_assert_held(&vcpu->mutex);
+
        if (vcpu->arch.guest_state_protected)
                return -EINVAL;
 
@@ -2458,6 +2460,10 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
        if (kvm_is_vcpu_creation_in_progress(kvm))
                return -EBUSY;
 
+       ret = kvm_lock_all_vcpus(kvm);
+       if (ret)
+               return ret;
+
        data.gctx_paddr = __psp_pa(sev->snp_context);
        data.page_type = SNP_PAGE_TYPE_VMSA;
 
@@ -2467,12 +2473,12 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
 
                ret = sev_es_sync_vmsa(svm);
                if (ret)
-                       return ret;
+                       goto out;
 
                /* Transition the VMSA page to a firmware state. */
                ret = rmp_make_private(pfn, INITIAL_VMSA_GPA, PG_LEVEL_4K, sev->asid, true);
                if (ret)
-                       return ret;
+                       goto out;
 
                /* Issue the SNP command to encrypt the VMSA */
                data.address = __sme_pa(svm->sev_es.vmsa);
@@ -2481,7 +2487,7 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
                if (ret) {
                        snp_page_reclaim(kvm, pfn);
 
-                       return ret;
+                       goto out;
                }
 
                svm->vcpu.arch.guest_state_protected = true;
@@ -2495,7 +2501,9 @@ static int snp_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
                svm_enable_lbrv(vcpu);
        }
 
-       return 0;
+out:
+       kvm_unlock_all_vcpus(kvm);
+       return ret;
 }
 
 static int snp_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)