]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Rename kvm_handle_hva_range()
authorJames Houghton <jthoughton@google.com>
Tue, 4 Feb 2025 00:40:28 +0000 (00:40 +0000)
committerSean Christopherson <seanjc@google.com>
Wed, 12 Feb 2025 19:09:30 +0000 (11:09 -0800)
Rename kvm_handle_hva_range() to kvm_age_hva_range(),
kvm_handle_hva_range_no_flush() to kvm_age_hva_range_no_flush(), and
__kvm_handle_hva_range() to kvm_handle_hva_range(), as
kvm_age_hva_range() will get more aging-specific functionality.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: James Houghton <jthoughton@google.com>
Link: https://lore.kernel.org/r/20250204004038.1680123-2-jthoughton@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
virt/kvm/kvm_main.c

index ba0327e2d0d3336c680be6543289b88ff22540e8..0f94349f99e27dac1919f15430251b7bd26f559e 100644 (file)
@@ -551,8 +551,8 @@ static void kvm_null_fn(void)
             node;                                                           \
             node = interval_tree_iter_next(node, start, last))      \
 
-static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
-                                                          const struct kvm_mmu_notifier_range *range)
+static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
+                                                        const struct kvm_mmu_notifier_range *range)
 {
        struct kvm_mmu_notifier_return r = {
                .ret = false,
@@ -633,7 +633,7 @@ mmu_unlock:
        return r;
 }
 
-static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
                                                unsigned long start,
                                                unsigned long end,
                                                gfn_handler_t handler,
@@ -649,15 +649,15 @@ static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
                .may_block      = false,
        };
 
-       return __kvm_handle_hva_range(kvm, &range).ret;
+       return kvm_handle_hva_range(kvm, &range).ret;
 }
 
-static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
-                                                        unsigned long start,
-                                                        unsigned long end,
-                                                        gfn_handler_t handler)
+static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
+                                                     unsigned long start,
+                                                     unsigned long end,
+                                                     gfn_handler_t handler)
 {
-       return kvm_handle_hva_range(mn, start, end, handler, false);
+       return kvm_age_hva_range(mn, start, end, handler, false);
 }
 
 void kvm_mmu_invalidate_begin(struct kvm *kvm)
@@ -752,7 +752,7 @@ static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
         * that guest memory has been reclaimed.  This needs to be done *after*
         * dropping mmu_lock, as x86's reclaim path is slooooow.
         */
-       if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
+       if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
                kvm_arch_guest_memory_reclaimed(kvm);
 
        return 0;
@@ -798,7 +798,7 @@ static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
        };
        bool wake;
 
-       __kvm_handle_hva_range(kvm, &hva_range);
+       kvm_handle_hva_range(kvm, &hva_range);
 
        /* Pairs with the increment in range_start(). */
        spin_lock(&kvm->mn_invalidate_lock);
@@ -822,8 +822,8 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
 {
        trace_kvm_age_hva(start, end);
 
-       return kvm_handle_hva_range(mn, start, end, kvm_age_gfn,
-                                   !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
+       return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
+                                !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
 }
 
 static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
@@ -846,7 +846,7 @@ static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
         * cadence. If we find this inaccurate, we might come up with a
         * more sophisticated heuristic later.
         */
-       return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
+       return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
 }
 
 static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
@@ -855,8 +855,8 @@ static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
 {
        trace_kvm_test_age_hva(address);
 
-       return kvm_handle_hva_range_no_flush(mn, address, address + 1,
-                                            kvm_test_age_gfn);
+       return kvm_age_hva_range_no_flush(mn, address, address + 1,
+                                         kvm_test_age_gfn);
 }
 
 static void kvm_mmu_notifier_release(struct mmu_notifier *mn,