]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/mmu: Rename kvm_tdp_mmu_zap_sp() to better indicate its purpose
authorVipin Sharma <vipinsh@google.com>
Mon, 7 Jul 2025 22:47:15 +0000 (22:47 +0000)
committerSean Christopherson <seanjc@google.com>
Tue, 19 Aug 2025 14:40:25 +0000 (07:40 -0700)
kvm_tdp_mmu_zap_sp() is only used for NX huge page recovery, so rename
it to kvm_tdp_mmu_zap_possible_nx_huge_page(). In a future commit, this
function will be changed to include logic specific to NX huge page
recovery.

Signed-off-by: Vipin Sharma <vipinsh@google.com>
Signed-off-by: James Houghton <jthoughton@google.com>
https://lore.kernel.org/r/20250707224720.4016504-3-jthoughton@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c
arch/x86/kvm/mmu/tdp_mmu.h

index e0d6579db531c30cfbbddd847dcc0570515fe6b1..a9aafa88de2b7b7440b74e8775768d2fe64715f4 100644 (file)
@@ -7673,7 +7673,7 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm,
                if (slot && kvm_slot_dirty_track_enabled(slot))
                        unaccount_nx_huge_page(kvm, sp);
                else if (is_tdp_mmu)
-                       flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
+                       flush |= kvm_tdp_mmu_zap_possible_nx_huge_page(kvm, sp);
                else
                        kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
                WARN_ON_ONCE(sp->nx_huge_page_disallowed);
index 48b070f9f4e130e410f6f99846315c46f50b73f2..19907eb04a9c4dd089cce1ed9659393144dcf0ff 100644 (file)
@@ -925,7 +925,8 @@ static void tdp_mmu_zap_root(struct kvm *kvm, struct kvm_mmu_page *root,
        rcu_read_unlock();
 }
 
-bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+bool kvm_tdp_mmu_zap_possible_nx_huge_page(struct kvm *kvm,
+                                          struct kvm_mmu_page *sp)
 {
        u64 old_spte;
 
index 52acf99d40a0052354bad2f4f245010f42be46dc..bd62977c9199e4fcdca2ad5d6020cb8c5cb51672 100644 (file)
@@ -64,7 +64,8 @@ static inline struct kvm_mmu_page *tdp_mmu_get_root(struct kvm_vcpu *vcpu,
 }
 
 bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
-bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
+bool kvm_tdp_mmu_zap_possible_nx_huge_page(struct kvm *kvm,
+                                          struct kvm_mmu_page *sp);
 void kvm_tdp_mmu_zap_all(struct kvm *kvm);
 void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
                                  enum kvm_tdp_mmu_root_types root_types);