]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
KVM: x86/mmu: Hoist guest_memfd max level/order helpers "up" in mmu.c
authorSean Christopherson <seanjc@google.com>
Tue, 29 Jul 2025 22:54:44 +0000 (15:54 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 27 Aug 2025 08:35:00 +0000 (04:35 -0400)
Move kvm_max_level_for_order() and kvm_max_private_mapping_level() up in
mmu.c so that they can be used by __kvm_mmu_max_mapping_level().

Opportunistically drop the "inline" from kvm_max_level_for_order().

No functional change intended.

Reviewed-by: Xiaoyao Li <xiaoyao.li@intel.com>
Reviewed-by: Ackerley Tng <ackerleytng@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Fuad Tabba <tabba@google.com>
Message-ID: <20250729225455.670324-14-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index b735611e8fcd389c6b7b3120e53d465394b50990..20dd9f64156e77401726a556d849f4fc9bada501 100644 (file)
@@ -3285,6 +3285,42 @@ out:
        return level;
 }
 
+static u8 kvm_max_level_for_order(int order)
+{
+       BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
+
+       KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
+                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
+                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
+
+       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
+               return PG_LEVEL_1G;
+
+       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
+               return PG_LEVEL_2M;
+
+       return PG_LEVEL_4K;
+}
+
+static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
+                                       u8 max_level, int gmem_order)
+{
+       u8 req_max_level;
+
+       if (max_level == PG_LEVEL_4K)
+               return PG_LEVEL_4K;
+
+       max_level = min(kvm_max_level_for_order(gmem_order), max_level);
+       if (max_level == PG_LEVEL_4K)
+               return PG_LEVEL_4K;
+
+       req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn);
+       if (req_max_level)
+               max_level = min(max_level, req_max_level);
+
+       return max_level;
+}
+
 static int __kvm_mmu_max_mapping_level(struct kvm *kvm,
                                       const struct kvm_memory_slot *slot,
                                       gfn_t gfn, int max_level, bool is_private)
@@ -4503,42 +4539,6 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
                vcpu->stat.pf_fixed++;
 }
 
-static inline u8 kvm_max_level_for_order(int order)
-{
-       BUILD_BUG_ON(KVM_MAX_HUGEPAGE_LEVEL > PG_LEVEL_1G);
-
-       KVM_MMU_WARN_ON(order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G) &&
-                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M) &&
-                       order != KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K));
-
-       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_1G))
-               return PG_LEVEL_1G;
-
-       if (order >= KVM_HPAGE_GFN_SHIFT(PG_LEVEL_2M))
-               return PG_LEVEL_2M;
-
-       return PG_LEVEL_4K;
-}
-
-static u8 kvm_max_private_mapping_level(struct kvm *kvm, kvm_pfn_t pfn,
-                                       u8 max_level, int gmem_order)
-{
-       u8 req_max_level;
-
-       if (max_level == PG_LEVEL_4K)
-               return PG_LEVEL_4K;
-
-       max_level = min(kvm_max_level_for_order(gmem_order), max_level);
-       if (max_level == PG_LEVEL_4K)
-               return PG_LEVEL_4K;
-
-       req_max_level = kvm_x86_call(gmem_max_mapping_level)(kvm, pfn);
-       if (req_max_level)
-               max_level = min(max_level, req_max_level);
-
-       return max_level;
-}
-
 static void kvm_mmu_finish_page_fault(struct kvm_vcpu *vcpu,
                                      struct kvm_page_fault *fault, int r)
 {