]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Add kvm_faultin_pfn() to specifically service guest page faults
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:45 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:47 +0000 (13:00 -0400)
Add a new dedicated API, kvm_faultin_pfn(), for servicing guest page
faults, i.e. for getting pages/pfns that will be mapped into the guest via
an mmu_notifier-protected KVM MMU.  Keep struct kvm_follow_pfn buried in
internal code, as having __kvm_faultin_pfn() take "out" params is actually
cleaner for several architectures, e.g. it allows the caller to have its
own "page fault" structure without having to marshal data to/from
kvm_follow_pfn.

Long term, common KVM would ideally provide a kvm_page_fault structure, a
la x86's struct of the same name.  But all architectures need to be
converted to a common API before that can happen.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-44-seanjc@google.com>

include/linux/kvm_host.h
virt/kvm/kvm_main.c

index 8d35a36e7707a8f22111106fd4de2487b6c2addd..a63b0325d3e280d22bf6349cc0729166ec698e3b 100644 (file)
@@ -1231,6 +1231,18 @@ static inline void kvm_release_page_unused(struct page *page)
 void kvm_release_page_clean(struct page *page);
 void kvm_release_page_dirty(struct page *page);
 
+kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
+                           unsigned int foll, bool *writable,
+                           struct page **refcounted_page);
+
+static inline kvm_pfn_t kvm_faultin_pfn(struct kvm_vcpu *vcpu, gfn_t gfn,
+                                       bool write, bool *writable,
+                                       struct page **refcounted_page)
+{
+       return __kvm_faultin_pfn(kvm_vcpu_gfn_to_memslot(vcpu, gfn), gfn,
+                                write ? FOLL_WRITE : 0, writable, refcounted_page);
+}
+
 kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
 kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
                      bool *writable);
index becf640e369cb733bbfbf09eead2a686898c53e1..f79745d6500c2c6d0477ea5b1c643e3ad89de111 100644 (file)
@@ -3092,6 +3092,28 @@ kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_pfn);
 
+kvm_pfn_t __kvm_faultin_pfn(const struct kvm_memory_slot *slot, gfn_t gfn,
+                           unsigned int foll, bool *writable,
+                           struct page **refcounted_page)
+{
+       struct kvm_follow_pfn kfp = {
+               .slot = slot,
+               .gfn = gfn,
+               .flags = foll,
+               .map_writable = writable,
+               .refcounted_page = refcounted_page,
+       };
+
+       if (WARN_ON_ONCE(!writable || !refcounted_page))
+               return KVM_PFN_ERR_FAULT;
+
+       *writable = false;
+       *refcounted_page = NULL;
+
+       return kvm_follow_pfn(&kfp);
+}
+EXPORT_SYMBOL_GPL(__kvm_faultin_pfn);
+
 int kvm_prefetch_pages(struct kvm_memory_slot *slot, gfn_t gfn,
                       struct page **pages, int nr_pages)
 {