]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Rename gfn_to_page_many_atomic() to kvm_prefetch_pages()
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:13 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 16:55:12 +0000 (12:55 -0400)
Rename gfn_to_page_many_atomic() to kvm_prefetch_pages() to try and
communicate its true purpose, as the "atomic" aspect is essentially a
side effect of the fact that x86 uses the API while holding mmu_lock.
E.g. even if mmu_lock weren't held, KVM wouldn't want to fault-in pages,
as the goal is to opportunistically grab surrounding pages that have
already been accessed and/or dirtied by the host, and to do so quickly.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-12-seanjc@google.com>

arch/x86/kvm/mmu/mmu.c
arch/x86/kvm/mmu/paging_tmpl.h
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 993eeba32487e473b750da884ed1370a7c452d32..37c2f8d11e051f1d59555cf7789d542f94989dc6 100644 (file)
@@ -2965,7 +2965,7 @@ static int direct_pte_prefetch_many(struct kvm_vcpu *vcpu,
        if (!slot)
                return -1;
 
-       ret = gfn_to_page_many_atomic(slot, gfn, pages, end - start);
+       ret = kvm_prefetch_pages(slot, gfn, pages, end - start);
        if (ret <= 0)
                return -1;
 
index 36b2607280f0fdcc6fe1f0ba951e3a4c254a397d..143b7e9f26dc598feaedb00a4d85757e4ef0fe3e 100644 (file)
@@ -549,7 +549,7 @@ FNAME(prefetch_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
        if (!slot)
                return false;
 
-       if (gfn_to_page_many_atomic(slot, gfn, &page, 1) != 1)
+       if (kvm_prefetch_pages(slot, gfn, &page, 1) != 1)
                return false;
 
        mmu_set_spte(vcpu, slot, spte, pte_access, gfn, page_to_pfn(page), NULL);
index 8de9acb0b35ec921a032597bd0401ad3128c7970..6a3976c1a2182ce13ae29e6a49079e647773ead7 100644 (file)
@@ -1207,8 +1207,8 @@ void kvm_arch_flush_shadow_all(struct kvm *kvm);
 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
                                   struct kvm_memory_slot *slot);
 
-int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
-                           struct page **pages, int nr_pages);
+int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+                      struct page **pages, int nr_pages);
 
 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
index e7561ca96a095a64a99634c10708803dad0c94b8..aa7ae0f0f90eaaf9b5e50163969bfa1cc02a7559 100644 (file)
@@ -3041,8 +3041,8 @@ kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn);
 
-int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
-                           struct page **pages, int nr_pages)
+int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
+                      struct page **pages, int nr_pages)
 {
        unsigned long addr;
        gfn_t entry = 0;
@@ -3056,7 +3056,7 @@ int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
 
        return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages);
 }
-EXPORT_SYMBOL_GPL(gfn_to_page_many_atomic);
+EXPORT_SYMBOL_GPL(kvm_prefetch_pages);
 
 /*
  * Do not use this helper unless you are absolutely certain the gfn _must_ be