node; \
node = interval_tree_iter_next(node, start, last)) \
-static __always_inline kvm_mn_ret_t __kvm_handle_hva_range(struct kvm *kvm,
- const struct kvm_mmu_notifier_range *range)
+static __always_inline kvm_mn_ret_t kvm_handle_hva_range(struct kvm *kvm,
+ const struct kvm_mmu_notifier_range *range)
{
struct kvm_mmu_notifier_return r = {
.ret = false,
return r;
}
-static __always_inline int kvm_handle_hva_range(struct mmu_notifier *mn,
+static __always_inline int kvm_age_hva_range(struct mmu_notifier *mn,
unsigned long start,
unsigned long end,
gfn_handler_t handler,
.may_block = false,
};
- return __kvm_handle_hva_range(kvm, &range).ret;
+ return kvm_handle_hva_range(kvm, &range).ret;
}
-static __always_inline int kvm_handle_hva_range_no_flush(struct mmu_notifier *mn,
- unsigned long start,
- unsigned long end,
- gfn_handler_t handler)
+static __always_inline int kvm_age_hva_range_no_flush(struct mmu_notifier *mn,
+ unsigned long start,
+ unsigned long end,
+ gfn_handler_t handler)
{
- return kvm_handle_hva_range(mn, start, end, handler, false);
+ return kvm_age_hva_range(mn, start, end, handler, false);
}
void kvm_mmu_invalidate_begin(struct kvm *kvm)
* that guest memory has been reclaimed. This needs to be done *after*
* dropping mmu_lock, as x86's reclaim path is slooooow.
*/
- if (__kvm_handle_hva_range(kvm, &hva_range).found_memslot)
+ if (kvm_handle_hva_range(kvm, &hva_range).found_memslot)
kvm_arch_guest_memory_reclaimed(kvm);
return 0;
};
bool wake;
- __kvm_handle_hva_range(kvm, &hva_range);
+ kvm_handle_hva_range(kvm, &hva_range);
/* Pairs with the increment in range_start(). */
spin_lock(&kvm->mn_invalidate_lock);
{
trace_kvm_age_hva(start, end);
- return kvm_handle_hva_range(mn, start, end, kvm_age_gfn,
- !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
+ return kvm_age_hva_range(mn, start, end, kvm_age_gfn,
+ !IS_ENABLED(CONFIG_KVM_ELIDE_TLB_FLUSH_IF_YOUNG));
}
static int kvm_mmu_notifier_clear_young(struct mmu_notifier *mn,
* cadence. If we find this inaccurate, we might come up with a
* more sophisticated heuristic later.
*/
- return kvm_handle_hva_range_no_flush(mn, start, end, kvm_age_gfn);
+ return kvm_age_hva_range_no_flush(mn, start, end, kvm_age_gfn);
}
static int kvm_mmu_notifier_test_young(struct mmu_notifier *mn,
{
trace_kvm_test_age_hva(address);
- return kvm_handle_hva_range_no_flush(mn, address, address + 1,
- kvm_test_age_gfn);
+ return kvm_age_hva_range_no_flush(mn, address, address + 1,
+ kvm_test_age_gfn);
}
static void kvm_mmu_notifier_release(struct mmu_notifier *mn,