]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
Revert "KVM: x86/tdp_mmu: Add a helper function to walk down the TDP MMU"
authorSean Christopherson <seanjc@google.com>
Thu, 30 Oct 2025 20:09:29 +0000 (13:09 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 5 Nov 2025 19:03:14 +0000 (11:03 -0800)
Remove the helper and exports that were added to allow TDX code to reuse
kvm_tdp_map_page() for its gmem post-populate flow now that a dedicated
TDP MMU API is provided to install a mapping given a gfn+pfn pair.

This reverts commit 2608f105760115e94a03efd9f12f8fbfd1f9af4b.

Reviewed-by: Rick Edgecombe <rick.p.edgecombe@intel.com>
Reviewed-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Kai Huang <kai.huang@intel.com>
Reviewed-by: Yan Zhao <yan.y.zhao@intel.com>
Tested-by: Yan Zhao <yan.y.zhao@intel.com>
Tested-by: Kai Huang <kai.huang@intel.com>
Link: https://patch.msgid.link/20251030200951.3402865-7-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu.h
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/tdp_mmu.c

index 2f108e381959f37fa3121eb6c5990f37c1a8cfef..9e5045a60d8b292d3b528ba04bf2bb1ae1aa18c2 100644 (file)
@@ -257,8 +257,6 @@ extern bool tdp_mmu_enabled;
 #define tdp_mmu_enabled false
 #endif
 
-bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa);
-int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level);
 int kvm_tdp_mmu_map_private_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, kvm_pfn_t pfn);
 
 static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
index d24fa59f872b9851d31c5e4fa8529daa0b0b1972..559c80c841b99d269755fe05a7f61a355a62a166 100644 (file)
@@ -4924,7 +4924,8 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
        return direct_page_fault(vcpu, fault);
 }
 
-int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level)
+static int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code,
+                           u8 *level)
 {
        int r;
 
@@ -4966,7 +4967,6 @@ int kvm_tdp_map_page(struct kvm_vcpu *vcpu, gpa_t gpa, u64 error_code, u8 *level
                return -EIO;
        }
 }
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_map_page);
 
 long kvm_arch_vcpu_pre_fault_memory(struct kvm_vcpu *vcpu,
                                    struct kvm_pre_fault_memory *range)
index 440fd8f803971f8df15e2734febf874d579999b6..e735d2f8367b4d517697a31346985fc3e951ec5d 100644 (file)
@@ -1941,13 +1941,16 @@ bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
  *
  * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
  */
-static int __kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
-                                 struct kvm_mmu_page *root)
+int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
+                        int *root_level)
 {
+       struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
        struct tdp_iter iter;
        gfn_t gfn = addr >> PAGE_SHIFT;
        int leaf = -1;
 
+       *root_level = vcpu->arch.mmu->root_role.level;
+
        for_each_tdp_pte(iter, vcpu->kvm, root, gfn, gfn + 1) {
                leaf = iter.level;
                sptes[leaf] = iter.old_spte;
@@ -1956,36 +1959,6 @@ static int __kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
        return leaf;
 }
 
-int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
-                        int *root_level)
-{
-       struct kvm_mmu_page *root = root_to_sp(vcpu->arch.mmu->root.hpa);
-       *root_level = vcpu->arch.mmu->root_role.level;
-
-       return __kvm_tdp_mmu_get_walk(vcpu, addr, sptes, root);
-}
-
-bool kvm_tdp_mmu_gpa_is_mapped(struct kvm_vcpu *vcpu, u64 gpa)
-{
-       struct kvm *kvm = vcpu->kvm;
-       bool is_direct = kvm_is_addr_direct(kvm, gpa);
-       hpa_t root = is_direct ? vcpu->arch.mmu->root.hpa :
-                                vcpu->arch.mmu->mirror_root_hpa;
-       u64 sptes[PT64_ROOT_MAX_LEVEL + 1], spte;
-       int leaf;
-
-       lockdep_assert_held(&kvm->mmu_lock);
-       rcu_read_lock();
-       leaf = __kvm_tdp_mmu_get_walk(vcpu, gpa, sptes, root_to_sp(root));
-       rcu_read_unlock();
-       if (leaf < 0)
-               return false;
-
-       spte = sptes[leaf];
-       return is_shadow_present_pte(spte) && is_last_spte(spte, leaf);
-}
-EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_tdp_mmu_gpa_is_mapped);
-
 /*
  * Returns the last level spte pointer of the shadow page walk for the given
  * gpa, and sets *spte to the spte value. This spte may be non-preset. If no