]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: Migrate kvm_vcpu_map() to kvm_follow_pfn()
authorDavid Stevens <stevensd@chromium.org>
Thu, 10 Oct 2024 18:23:32 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 16:57:59 +0000 (12:57 -0400)
Migrate kvm_vcpu_map() to kvm_follow_pfn(), and have it track whether or
not the map holds a refcounted struct page.  Precisely tracking struct
page references will eventually allow removing kvm_pfn_to_refcounted_page()
and its various wrappers.

Signed-off-by: David Stevens <stevensd@chromium.org>
[sean: use a pointer instead of a boolean]
Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-31-seanjc@google.com>

include/linux/kvm_host.h
virt/kvm/kvm_main.c

index cd6f5cc1930f5f3ebb4a3c660adc2360f96a9c7d..35e1beb017dd5f4bbdde19cccd5c38f6abe35a1f 100644 (file)
@@ -280,6 +280,7 @@ struct kvm_host_map {
         * can be used as guest memory but they are not managed by host
         * kernel).
         */
+       struct page *refcounted_page;
        struct page *page;
        void *hva;
        kvm_pfn_t pfn;
@@ -1238,7 +1239,6 @@ void kvm_release_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_dirty(kvm_pfn_t pfn);
 void kvm_set_pfn_accessed(kvm_pfn_t pfn);
 
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty);
 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
                        int len);
 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
index 64888257e30107698b7bf37ecf436e76620e545c..842a5d5f3120c15055361dd28f9f58f3bbf3edec 100644 (file)
@@ -3087,21 +3087,21 @@ struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(gfn_to_page);
 
-void kvm_release_pfn(kvm_pfn_t pfn, bool dirty)
-{
-       if (dirty)
-               kvm_release_pfn_dirty(pfn);
-       else
-               kvm_release_pfn_clean(pfn);
-}
-
 int kvm_vcpu_map(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map)
 {
+       struct kvm_follow_pfn kfp = {
+               .slot = gfn_to_memslot(vcpu->kvm, gfn),
+               .gfn = gfn,
+               .flags = FOLL_WRITE,
+               .refcounted_page = &map->refcounted_page,
+       };
+
+       map->refcounted_page = NULL;
        map->page = NULL;
        map->hva = NULL;
        map->gfn = gfn;
 
-       map->pfn = gfn_to_pfn(vcpu->kvm, gfn);
+       map->pfn = kvm_follow_pfn(&kfp);
        if (is_error_noslot_pfn(map->pfn))
                return -EINVAL;
 
@@ -3133,10 +3133,16 @@ void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty)
        if (dirty)
                kvm_vcpu_mark_page_dirty(vcpu, map->gfn);
 
-       kvm_release_pfn(map->pfn, dirty);
+       if (map->refcounted_page) {
+               if (dirty)
+                       kvm_release_page_dirty(map->refcounted_page);
+               else
+                       kvm_release_page_clean(map->refcounted_page);
+       }
 
        map->hva = NULL;
        map->page = NULL;
+       map->refcounted_page = NULL;
 }
 EXPORT_SYMBOL_GPL(kvm_vcpu_unmap);