]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults
authorSean Christopherson <seanjc@google.com>
Thu, 10 Oct 2024 18:23:56 +0000 (11:23 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 25 Oct 2024 17:00:48 +0000 (13:00 -0400)
Convert PPC e500 to use __kvm_faultin_pfn()+kvm_release_faultin_page(),
and continue the inexorable march towards the demise of
kvm_pfn_to_refcounted_page().

Signed-off-by: Sean Christopherson <seanjc@google.com>
Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20241010182427.1434605-55-seanjc@google.com>

arch/powerpc/kvm/e500_mmu_host.c

index 334dd96f8081470ed05858ee576ee90d4c78df99..e5a145b578a4768e158357114f80505da380a33f 100644 (file)
@@ -322,6 +322,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 {
        struct kvm_memory_slot *slot;
        unsigned long pfn = 0; /* silence GCC warning */
+       struct page *page = NULL;
        unsigned long hva;
        int pfnmap = 0;
        int tsize = BOOK3E_PAGESZ_4K;
@@ -443,7 +444,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 
        if (likely(!pfnmap)) {
                tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
-               pfn = gfn_to_pfn_memslot(slot, gfn);
+               pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
                if (is_error_noslot_pfn(pfn)) {
                        if (printk_ratelimit())
                                pr_err("%s: real page not found for gfn %lx\n",
@@ -488,8 +489,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                }
        }
        writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-       if (writable)
-               kvm_set_pfn_dirty(pfn);
 
        kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
                                ref, gvaddr, stlbe);
@@ -498,8 +497,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
        kvmppc_mmu_flush_icache(pfn);
 
 out:
-       /* Drop refcount on page, so that mmu notifiers can clear it */
-       kvm_release_pfn_clean(pfn);
+       kvm_release_faultin_page(kvm, page, !!ret, writable);
        spin_unlock(&kvm->mmu_lock);
        return ret;
 }