From: Greg Kroah-Hartman Date: Mon, 10 Mar 2025 15:51:47 +0000 (+0100) Subject: Revert "KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults" X-Git-Tag: v6.12.19~8 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=681b5823ed7955f351e7012f5c6f50ececa75ef1;p=thirdparty%2Fkernel%2Fstable.git Revert "KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults" This reverts commit 833f69be62ac366b5c23b4a6434389e470dd5c7f which is commit 419cfb983ca93e75e905794521afefcfa07988bb upstream. It should not have been applied. Link: https://lore.kernel.org/r/CABgObfb5U9zwTQBPkPB=mKu-vMrRspPCm4wfxoQpB+SyAnb5WQ@mail.gmail.com Reported-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c index e5a145b578a47..334dd96f80814 100644 --- a/arch/powerpc/kvm/e500_mmu_host.c +++ b/arch/powerpc/kvm/e500_mmu_host.c @@ -322,7 +322,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, { struct kvm_memory_slot *slot; unsigned long pfn = 0; /* silence GCC warning */ - struct page *page = NULL; unsigned long hva; int pfnmap = 0; int tsize = BOOK3E_PAGESZ_4K; @@ -444,7 +443,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, if (likely(!pfnmap)) { tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT); - pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page); + pfn = gfn_to_pfn_memslot(slot, gfn); if (is_error_noslot_pfn(pfn)) { if (printk_ratelimit()) pr_err("%s: real page not found for gfn %lx\n", @@ -489,6 +488,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, } } writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg); + if (writable) + kvm_set_pfn_dirty(pfn); kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize, ref, gvaddr, stlbe); @@ -497,7 +498,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, kvmppc_mmu_flush_icache(pfn); out: - kvm_release_faultin_page(kvm, page, !!ret, writable); + /* Drop refcount on page, so that mmu notifiers can clear it */ + kvm_release_pfn_clean(pfn); spin_unlock(&kvm->mmu_lock); return ret; }