--- /dev/null
+From bf00e1de664f67408f628c0297b981b63637de4b Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 10 Mar 2025 16:47:34 +0100
+Subject: Revert "KVM: e500: always restore irqs"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit b9d93eda1214985d1b3d00a0f9d4306282a5b189 which is
+commit 87ecfdbc699cc95fac73291b52650283ddcf929d upstream.
+
+It should not have been applied.
+
+Link: https://lore.kernel.org/r/CABgObfb5U9zwTQBPkPB=mKu-vMrRspPCm4wfxoQpB+SyAnb5WQ@mail.gmail.com
+Reported-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/e500_mmu_host.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -479,6 +479,7 @@ static inline int kvmppc_e500_shadow_map
+ if (pte_present(pte)) {
+ wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
+ MAS2_WIMGE_MASK;
++ local_irq_restore(flags);
+ } else {
+ local_irq_restore(flags);
+ pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
+@@ -487,9 +488,8 @@ static inline int kvmppc_e500_shadow_map
+ goto out;
+ }
+ }
+- local_irq_restore(flags);
+-
+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
++
+ kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ ref, gvaddr, stlbe);
+
--- /dev/null
+From 0d7b7b251a8eb351984096ae15cc5312c6954e91 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 10 Mar 2025 16:52:25 +0100
+Subject: Revert "KVM: PPC: e500: Mark "struct page" dirty in kvmppc_e500_shadow_map()"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 15d60c13b704f770ba45c58477380d4577cebfa3 which is
+commit c9be85dabb376299504e0d391d15662c0edf8273 upstream.
+
+It should not have been applied.
+
+Link: https://lore.kernel.org/r/CABgObfb5U9zwTQBPkPB=mKu-vMrRspPCm4wfxoQpB+SyAnb5WQ@mail.gmail.com
+Reported-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/e500_mmu_host.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struc
+ return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
+ }
+
+-static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
++static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ kvm_pfn_t pfn, unsigned int wimg)
+ {
+@@ -252,7 +252,11 @@ static inline bool kvmppc_e500_ref_setup
+ /* Use guest supplied MAS2_G and MAS2_E */
+ ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
+
+- return tlbe_is_writable(gtlbe);
++ /* Mark the page accessed */
++ kvm_set_pfn_accessed(pfn);
++
++ if (tlbe_is_writable(gtlbe))
++ kvm_set_pfn_dirty(pfn);
+ }
+
+ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
+@@ -333,7 +337,6 @@ static inline int kvmppc_e500_shadow_map
+ unsigned int wimg = 0;
+ pgd_t *pgdir;
+ unsigned long flags;
+- bool writable = false;
+
+ /* used to check for invalidations in progress */
+ mmu_seq = kvm->mmu_invalidate_seq;
+@@ -487,9 +490,7 @@ static inline int kvmppc_e500_shadow_map
+ goto out;
+ }
+ }
+- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
+- if (writable)
+- kvm_set_pfn_dirty(pfn);
++ kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
+
+ kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ ref, gvaddr, stlbe);
--- /dev/null
+From a9e4566b5b2edabd66d3f4398816e22bb0330406 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 10 Mar 2025 16:52:03 +0100
+Subject: Revert "KVM: PPC: e500: Mark "struct page" pfn accessed before dropping mmu_lock"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 59e21c4613b0a46f46eb124984928df46d88ad57 which is
+commit 84cf78dcd9d65c45ab73998d4ad50f433d53fb93 upstream.
+
+It should not have been applied.
+
+Link: https://lore.kernel.org/r/CABgObfb5U9zwTQBPkPB=mKu-vMrRspPCm4wfxoQpB+SyAnb5WQ@mail.gmail.com
+Reported-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/e500_mmu_host.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -498,9 +498,11 @@ static inline int kvmppc_e500_shadow_map
+ kvmppc_mmu_flush_icache(pfn);
+
+ out:
++ spin_unlock(&kvm->mmu_lock);
++
+ /* Drop refcount on page, so that mmu notifiers can clear it */
+ kvm_release_pfn_clean(pfn);
+- spin_unlock(&kvm->mmu_lock);
++
+ return ret;
+ }
+
--- /dev/null
+From db48a58c80e424f6211660062666693fb525e95e Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Mon, 10 Mar 2025 16:51:47 +0100
+Subject: Revert "KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit ba3cf83f4a5063edb6ee150633e641954ce30478 which is
+commit 419cfb983ca93e75e905794521afefcfa07988bb upstream.
+
+It should not have been applied.
+
+Link: https://lore.kernel.org/r/CABgObfb5U9zwTQBPkPB=mKu-vMrRspPCm4wfxoQpB+SyAnb5WQ@mail.gmail.com
+Reported-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/kvm/e500_mmu_host.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/arch/powerpc/kvm/e500_mmu_host.c
++++ b/arch/powerpc/kvm/e500_mmu_host.c
+@@ -322,7 +322,6 @@ static inline int kvmppc_e500_shadow_map
+ {
+ struct kvm_memory_slot *slot;
+ unsigned long pfn = 0; /* silence GCC warning */
+- struct page *page = NULL;
+ unsigned long hva;
+ int pfnmap = 0;
+ int tsize = BOOK3E_PAGESZ_4K;
+@@ -444,7 +443,7 @@ static inline int kvmppc_e500_shadow_map
+
+ if (likely(!pfnmap)) {
+ tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
+- pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
++ pfn = gfn_to_pfn_memslot(slot, gfn);
+ if (is_error_noslot_pfn(pfn)) {
+ if (printk_ratelimit())
+ pr_err("%s: real page not found for gfn %lx\n",
+@@ -489,6 +488,8 @@ static inline int kvmppc_e500_shadow_map
+ }
+ }
+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
++ if (writable)
++ kvm_set_pfn_dirty(pfn);
+
+ kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
+ ref, gvaddr, stlbe);
+@@ -497,7 +498,8 @@ static inline int kvmppc_e500_shadow_map
+ kvmppc_mmu_flush_icache(pfn);
+
+ out:
+- kvm_release_faultin_page(kvm, page, !!ret, writable);
++ /* Drop refcount on page, so that mmu notifiers can clear it */
++ kvm_release_pfn_clean(pfn);
+ spin_unlock(&kvm->mmu_lock);
+ return ret;
+ }
alsa-hda-realtek-fix-incorrect-is_reachable-usage.patch
riscv-fix-enabling-cbo.zero-when-running-in-m-mode.patch
riscv-save-restore-envcfg-csr-during-cpu-suspend.patch
+revert-kvm-e500-always-restore-irqs.patch
+revert-kvm-ppc-e500-use-__kvm_faultin_pfn-to-handle-page-faults.patch
+revert-kvm-ppc-e500-mark-struct-page-pfn-accessed-before-dropping-mmu_lock.patch
+revert-kvm-ppc-e500-mark-struct-page-dirty-in-kvmppc_e500_shadow_map.patch