]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
KVM: x86/mmu: Move walk_slot_rmaps() up near for_each_slot_rmap_range()
authorSean Christopherson <seanjc@google.com>
Fri, 9 Aug 2024 19:43:22 +0000 (12:43 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 10 Sep 2024 03:22:01 +0000 (20:22 -0700)
Move walk_slot_rmaps() and friends up near for_each_slot_rmap_range() so
that the walkers can be used to handle mmu_notifier invalidations, and so
that similar function has some amount of locality in code.

No functional change intended.

Link: https://lore.kernel.org/r/20240809194335.1726916-11-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/kvm/mmu/mmu.c

index 330b87a1c80ad7a4d0ba27f49f7f5cc799e84c03..17edf1499be74b5159f655bfc753c8002cda58f2 100644 (file)
@@ -1516,6 +1516,59 @@ static void slot_rmap_walk_next(struct slot_rmap_walk_iterator *iterator)
             slot_rmap_walk_okay(_iter_);                               \
             slot_rmap_walk_next(_iter_))
 
+/* The return value indicates if tlb flush on all vcpus is needed. */
+typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
+                                   struct kvm_rmap_head *rmap_head,
+                                   const struct kvm_memory_slot *slot);
+
+static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
+                                             const struct kvm_memory_slot *slot,
+                                             slot_rmaps_handler fn,
+                                             int start_level, int end_level,
+                                             gfn_t start_gfn, gfn_t end_gfn,
+                                             bool flush_on_yield, bool flush)
+{
+       struct slot_rmap_walk_iterator iterator;
+
+       lockdep_assert_held_write(&kvm->mmu_lock);
+
+       for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
+                       end_gfn, &iterator) {
+               if (iterator.rmap)
+                       flush |= fn(kvm, iterator.rmap, slot);
+
+               if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
+                       if (flush && flush_on_yield) {
+                               kvm_flush_remote_tlbs_range(kvm, start_gfn,
+                                                           iterator.gfn - start_gfn + 1);
+                               flush = false;
+                       }
+                       cond_resched_rwlock_write(&kvm->mmu_lock);
+               }
+       }
+
+       return flush;
+}
+
+static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
+                                           const struct kvm_memory_slot *slot,
+                                           slot_rmaps_handler fn,
+                                           int start_level, int end_level,
+                                           bool flush_on_yield)
+{
+       return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
+                                slot->base_gfn, slot->base_gfn + slot->npages - 1,
+                                flush_on_yield, false);
+}
+
+static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
+                                              const struct kvm_memory_slot *slot,
+                                              slot_rmaps_handler fn,
+                                              bool flush_on_yield)
+{
+       return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
+}
+
 typedef bool (*rmap_handler_t)(struct kvm *kvm, struct kvm_rmap_head *rmap_head,
                               struct kvm_memory_slot *slot, gfn_t gfn,
                               int level);
@@ -6272,59 +6325,6 @@ void kvm_configure_mmu(bool enable_tdp, int tdp_forced_root_level,
 }
 EXPORT_SYMBOL_GPL(kvm_configure_mmu);
 
-/* The return value indicates if tlb flush on all vcpus is needed. */
-typedef bool (*slot_rmaps_handler) (struct kvm *kvm,
-                                   struct kvm_rmap_head *rmap_head,
-                                   const struct kvm_memory_slot *slot);
-
-static __always_inline bool __walk_slot_rmaps(struct kvm *kvm,
-                                             const struct kvm_memory_slot *slot,
-                                             slot_rmaps_handler fn,
-                                             int start_level, int end_level,
-                                             gfn_t start_gfn, gfn_t end_gfn,
-                                             bool flush_on_yield, bool flush)
-{
-       struct slot_rmap_walk_iterator iterator;
-
-       lockdep_assert_held_write(&kvm->mmu_lock);
-
-       for_each_slot_rmap_range(slot, start_level, end_level, start_gfn,
-                       end_gfn, &iterator) {
-               if (iterator.rmap)
-                       flush |= fn(kvm, iterator.rmap, slot);
-
-               if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
-                       if (flush && flush_on_yield) {
-                               kvm_flush_remote_tlbs_range(kvm, start_gfn,
-                                                           iterator.gfn - start_gfn + 1);
-                               flush = false;
-                       }
-                       cond_resched_rwlock_write(&kvm->mmu_lock);
-               }
-       }
-
-       return flush;
-}
-
-static __always_inline bool walk_slot_rmaps(struct kvm *kvm,
-                                           const struct kvm_memory_slot *slot,
-                                           slot_rmaps_handler fn,
-                                           int start_level, int end_level,
-                                           bool flush_on_yield)
-{
-       return __walk_slot_rmaps(kvm, slot, fn, start_level, end_level,
-                                slot->base_gfn, slot->base_gfn + slot->npages - 1,
-                                flush_on_yield, false);
-}
-
-static __always_inline bool walk_slot_rmaps_4k(struct kvm *kvm,
-                                              const struct kvm_memory_slot *slot,
-                                              slot_rmaps_handler fn,
-                                              bool flush_on_yield)
-{
-       return walk_slot_rmaps(kvm, slot, fn, PG_LEVEL_4K, PG_LEVEL_4K, flush_on_yield);
-}
-
 static void free_mmu_pages(struct kvm_mmu *mmu)
 {
        if (!tdp_enabled && mmu->pae_root)