]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: x86/mmu: Drop @max_level from kvm_mmu_max_mapping_level()
authorDavid Matlack <dmatlack@google.com>
Fri, 23 Aug 2024 23:56:43 +0000 (16:56 -0700)
committerSean Christopherson <seanjc@google.com>
Wed, 30 Oct 2024 22:25:42 +0000 (15:25 -0700)
Drop the @max_level parameter from kvm_mmu_max_mapping_level(). All
callers pass in PG_LEVEL_NUM, so @max_level can be replaced with
PG_LEVEL_NUM in the function body.

No functional change intended.

Signed-off-by: David Matlack <dmatlack@google.com>
Link: https://lore.kernel.org/r/20240823235648.3236880-2-dmatlack@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/mmu_internal.h
arch/x86/kvm/mmu/tdp_mmu.c

index 266a25d96512dd1f37bdd9259db53b73a414ff9d..3c2322440d9c303cec16f9e4be4c88197564bc76 100644 (file)
@@ -3114,13 +3114,12 @@ static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
 }
 
 int kvm_mmu_max_mapping_level(struct kvm *kvm,
-                             const struct kvm_memory_slot *slot, gfn_t gfn,
-                             int max_level)
+                             const struct kvm_memory_slot *slot, gfn_t gfn)
 {
        bool is_private = kvm_slot_can_be_private(slot) &&
                          kvm_mem_is_private(kvm, gfn);
 
-       return __kvm_mmu_max_mapping_level(kvm, slot, gfn, max_level, is_private);
+       return __kvm_mmu_max_mapping_level(kvm, slot, gfn, PG_LEVEL_NUM, is_private);
 }
 
 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
@@ -6919,8 +6918,7 @@ restart:
                 * mapping if the indirect sp has level = 1.
                 */
                if (sp->role.direct &&
-                   sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn,
-                                                              PG_LEVEL_NUM)) {
+                   sp->role.level < kvm_mmu_max_mapping_level(kvm, slot, sp->gfn)) {
                        kvm_zap_one_rmap_spte(kvm, rmap_head, sptep);
 
                        if (kvm_available_flush_remote_tlbs_range())
index fabbea504a690c68b9ef43938744b87eeffba1f6..b00abbe3f6cfa88ff6ce0ef2525a80d21b0c490b 100644 (file)
@@ -346,8 +346,7 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
 }
 
 int kvm_mmu_max_mapping_level(struct kvm *kvm,
-                             const struct kvm_memory_slot *slot, gfn_t gfn,
-                             int max_level);
+                             const struct kvm_memory_slot *slot, gfn_t gfn);
 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);
 
index 672babbfe2a5807cca977c9673577e2ebf290d66..07ffd34150ccbfecf05e3386b62cd6a0e690571e 100644 (file)
@@ -1626,8 +1626,7 @@ retry:
                if (iter.gfn < start || iter.gfn >= end)
                        continue;
 
-               max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot,
-                                                             iter.gfn, PG_LEVEL_NUM);
+               max_mapping_level = kvm_mmu_max_mapping_level(kvm, slot, iter.gfn);
                if (max_mapping_level < iter.level)
                        continue;