]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/mmu: Unnest TDP MMU helpers that allocate SPs for eager splitting
authorDavid Matlack <dmatlack@google.com>
Tue, 11 Jun 2024 22:05:11 +0000 (15:05 -0700)
committerSean Christopherson <seanjc@google.com>
Fri, 14 Jun 2024 16:24:23 +0000 (09:24 -0700)
Move the implementation of tdp_mmu_alloc_sp_for_split() to its one and
only caller to reduce unnecessary nesting and make it more clear why the
eager split loop continues after allocating a new SP.

Opportunistically drop the double-underscores from
__tdp_mmu_alloc_sp_for_split() now that its parent is gone.

No functional change intended.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: David Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20240611220512.2426439-4-dmatlack@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/tdp_mmu.c

index 20ee1bc64f3c2deb7124e9a060e300542f6e6301..028f5a667482d213233edfb9891d4661fc11241a 100644 (file)
@@ -1339,7 +1339,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
        return spte_set;
 }
 
-static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void)
+static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void)
 {
        struct kvm_mmu_page *sp;
 
@@ -1356,34 +1356,6 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void)
        return sp;
 }
 
-static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
-                                                      struct tdp_iter *iter,
-                                                      bool shared)
-{
-       struct kvm_mmu_page *sp;
-
-       kvm_lockdep_assert_mmu_lock_held(kvm, shared);
-
-       rcu_read_unlock();
-
-       if (shared)
-               read_unlock(&kvm->mmu_lock);
-       else
-               write_unlock(&kvm->mmu_lock);
-
-       iter->yielded = true;
-       sp = __tdp_mmu_alloc_sp_for_split();
-
-       if (shared)
-               read_lock(&kvm->mmu_lock);
-       else
-               write_lock(&kvm->mmu_lock);
-
-       rcu_read_lock();
-
-       return sp;
-}
-
 /* Note, the caller is responsible for initializing @sp. */
 static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
                                   struct kvm_mmu_page *sp, bool shared)
@@ -1454,7 +1426,22 @@ retry:
                        continue;
 
                if (!sp) {
-                       sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared);
+                       rcu_read_unlock();
+
+                       if (shared)
+                               read_unlock(&kvm->mmu_lock);
+                       else
+                               write_unlock(&kvm->mmu_lock);
+
+                       sp = tdp_mmu_alloc_sp_for_split();
+
+                       if (shared)
+                               read_lock(&kvm->mmu_lock);
+                       else
+                               write_lock(&kvm->mmu_lock);
+
+                       rcu_read_lock();
+
                        if (!sp) {
                                ret = -ENOMEM;
                                trace_kvm_mmu_split_huge_page(iter.gfn,
@@ -1463,6 +1450,7 @@ retry:
                                break;
                        }
 
+                       iter.yielded = true;
                        continue;
                }