]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: pfncache: Precisely track refcounted pages
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:31 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 16:57:59 +0000 (12:57 -0400)
Track refcounted struct page memory using kvm_follow_pfn.refcounted_page
instead of relying on kvm_release_pfn_clean() to correctly detect that the
pfn is associated with a struct page.

Tested-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-30-seanjc@google.com>

virt/kvm/pfncache.c

index 067daf9ad6efe3be71b37f520adaba83a1a0db20..728d2c1b488a8135be08610c4b02cc7448e64515 100644 (file)
@@ -159,11 +159,14 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
        kvm_pfn_t new_pfn = KVM_PFN_ERR_FAULT;
        void *new_khva = NULL;
        unsigned long mmu_seq;
+       struct page *page;
+
        struct kvm_follow_pfn kfp = {
                .slot = gpc->memslot,
                .gfn = gpa_to_gfn(gpc->gpa),
                .flags = FOLL_WRITE,
                .hva = gpc->uhva,
+               .refcounted_page = &page,
        };
 
        lockdep_assert_held(&gpc->refresh_lock);
@@ -198,7 +201,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
                        if (new_khva != old_khva)
                                gpc_unmap(new_pfn, new_khva);
 
-                       kvm_release_pfn_clean(new_pfn);
+                       kvm_release_page_unused(page);
 
                        cond_resched();
                }
@@ -218,7 +221,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
                        new_khva = gpc_map(new_pfn);
 
                if (!new_khva) {
-                       kvm_release_pfn_clean(new_pfn);
+                       kvm_release_page_unused(page);
                        goto out_error;
                }
 
@@ -236,11 +239,11 @@ static kvm_pfn_t hva_to_pfn_retry(struct gfn_to_pfn_cache *gpc)
        gpc->khva = new_khva + offset_in_page(gpc->uhva);
 
        /*
-        * Put the reference to the _new_ pfn.  The pfn is now tracked by the
+        * Put the reference to the _new_ page.  The page is now tracked by the
         * cache and can be safely migrated, swapped, etc... as the cache will
         * invalidate any mappings in response to relevant mmu_notifier events.
         */
-       kvm_release_pfn_clean(new_pfn);
+       kvm_release_page_clean(page);
 
        return 0;