From: David Matlack Date: Tue, 11 Jun 2024 22:05:11 +0000 (-0700) Subject: KVM: x86/mmu: Unnest TDP MMU helpers that allocate SPs for eager splitting X-Git-Tag: v6.11-rc1~89^2~13^2~1 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=3d4a5a45ca26f8de9e0a4f384a2fb0967b8566b5;p=thirdparty%2Fkernel%2Flinux.git KVM: x86/mmu: Unnest TDP MMU helpers that allocate SPs for eager splitting Move the implementation of tdp_mmu_alloc_sp_for_split() to its one and only caller to reduce unnecessary nesting and make it more clear why the eager split loop continues after allocating a new SP. Opportunistically drop the double-underscores from __tdp_mmu_alloc_sp_for_split() now that its parent is gone. No functional change intended. Suggested-by: Sean Christopherson Signed-off-by: David Matlack Link: https://lore.kernel.org/r/20240611220512.2426439-4-dmatlack@google.com Signed-off-by: Sean Christopherson --- diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c index 20ee1bc64f3c2..028f5a667482d 100644 --- a/arch/x86/kvm/mmu/tdp_mmu.c +++ b/arch/x86/kvm/mmu/tdp_mmu.c @@ -1339,7 +1339,7 @@ bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, return spte_set; } -static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void) +static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(void) { struct kvm_mmu_page *sp; @@ -1356,34 +1356,6 @@ static struct kvm_mmu_page *__tdp_mmu_alloc_sp_for_split(void) return sp; } -static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm, - struct tdp_iter *iter, - bool shared) -{ - struct kvm_mmu_page *sp; - - kvm_lockdep_assert_mmu_lock_held(kvm, shared); - - rcu_read_unlock(); - - if (shared) - read_unlock(&kvm->mmu_lock); - else - write_unlock(&kvm->mmu_lock); - - iter->yielded = true; - sp = __tdp_mmu_alloc_sp_for_split(); - - if (shared) - read_lock(&kvm->mmu_lock); - else - write_lock(&kvm->mmu_lock); - - rcu_read_lock(); - - return sp; -} - /* Note, the caller is responsible for initializing @sp. */ static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter, struct kvm_mmu_page *sp, bool shared) @@ -1454,7 +1426,22 @@ retry: continue; if (!sp) { - sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, shared); + rcu_read_unlock(); + + if (shared) + read_unlock(&kvm->mmu_lock); + else + write_unlock(&kvm->mmu_lock); + + sp = tdp_mmu_alloc_sp_for_split(); + + if (shared) + read_lock(&kvm->mmu_lock); + else + write_lock(&kvm->mmu_lock); + + rcu_read_lock(); + if (!sp) { ret = -ENOMEM; trace_kvm_mmu_split_huge_page(iter.gfn, @@ -1463,6 +1450,7 @@ retry: break; } + iter.yielded = true; continue; }