kvm_tdp_mmu_zap_sp() is only used for NX huge page recovery, so rename
it to kvm_tdp_mmu_zap_possible_nx_huge_page(). In a future commit, this
function will be changed to include logic specific to NX huge page
recovery.
Signed-off-by: Vipin Sharma <vipinsh@google.com>
Signed-off-by: James Houghton <jthoughton@google.com>
https://lore.kernel.org/r/
20250707224720.
4016504-3-jthoughton@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
if (slot && kvm_slot_dirty_track_enabled(slot))
unaccount_nx_huge_page(kvm, sp);
else if (is_tdp_mmu)
- flush |= kvm_tdp_mmu_zap_sp(kvm, sp);
+ flush |= kvm_tdp_mmu_zap_possible_nx_huge_page(kvm, sp);
else
kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
WARN_ON_ONCE(sp->nx_huge_page_disallowed);
rcu_read_unlock();
}
-bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
+bool kvm_tdp_mmu_zap_possible_nx_huge_page(struct kvm *kvm,
+ struct kvm_mmu_page *sp)
{
u64 old_spte;
}
bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
-bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
+bool kvm_tdp_mmu_zap_possible_nx_huge_page(struct kvm *kvm,
+ struct kvm_mmu_page *sp);
void kvm_tdp_mmu_zap_all(struct kvm *kvm);
void kvm_tdp_mmu_invalidate_roots(struct kvm *kvm,
enum kvm_tdp_mmu_root_types root_types);