]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: SEV: Assert that kvm->lock is held when querying SEV+ support
authorSean Christopherson <seanjc@google.com>
Tue, 10 Mar 2026 23:48:23 +0000 (16:48 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 9 Apr 2026 19:00:20 +0000 (12:00 -0700)
Assert that kvm->lock is held when checking if a VM is an SEV+ VM, as KVM
sets *and* resets the relevant flags when initialization SEV state, i.e.
it's extremely easy to end up with TOCTOU bugs if kvm->lock isn't held.

Add waivers for a VM being torn down (refcount is '0') and for there being
a loaded vCPU, with comments for both explaining why they're safe.

Note, the "vCPU loaded" waiver is necessary to avoid splats on the SNP
checks in sev_gmem_prepare() and sev_gmem_max_mapping_level(), which are
currently called when handling nested page faults.  Alternatively, those
checks could key off KVM_X86_SNP_VM, as kvm_arch.vm_type is stable early
in VM creation.  Prioritize consistency, at least for now, and to leave a
"reminder" that the max mapping level code in particular likely needs
special attention if/when KVM supports dirty logging for SNP guests.

Link: https://patch.msgid.link/20260310234829.2608037-16-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/svm/sev.c

index ed8bb60341ae3d0dc26edd69515f1e0a8a16fc0b..57f3ec36b62a0d69bc599cac1f2a97cd85da8702 100644 (file)
@@ -107,17 +107,42 @@ static unsigned int nr_asids;
 static unsigned long *sev_asid_bitmap;
 static unsigned long *sev_reclaim_asid_bitmap;
 
+static __always_inline void kvm_lockdep_assert_sev_lock_held(struct kvm *kvm)
+{
+#ifdef CONFIG_PROVE_LOCKING
+       /*
+        * Querying SEV+ support is safe if there are no other references, i.e.
+        * if concurrent initialization of SEV+ is impossible.
+        */
+       if (!refcount_read(&kvm->users_count))
+               return;
+
+       /*
+        * Querying SEV+ support from vCPU context is always safe, as vCPUs can
+        * only be created after SEV+ is initialized (and KVM disallows all SEV
+        * sub-ioctls while vCPU creation is in-progress).
+        */
+       if (kvm_get_running_vcpu())
+               return;
+
+       lockdep_assert_held(&kvm->lock);
+#endif
+}
+
 static bool sev_guest(struct kvm *kvm)
 {
+       kvm_lockdep_assert_sev_lock_held(kvm);
        return ____sev_guest(kvm);
 }
 static bool sev_es_guest(struct kvm *kvm)
 {
+       kvm_lockdep_assert_sev_lock_held(kvm);
        return ____sev_es_guest(kvm);
 }
 
 static bool sev_snp_guest(struct kvm *kvm)
 {
+       kvm_lockdep_assert_sev_lock_held(kvm);
        return ____sev_snp_guest(kvm);
 }