+++ /dev/null
-From be1a506fe00dacca127594abab5ab9fa8d8d958b Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sun, 12 Jan 2025 10:34:44 +0100
-Subject: KVM: e500: always restore irqs
-
-From: Paolo Bonzini <pbonzini@redhat.com>
-
-[ Upstream commit 87ecfdbc699cc95fac73291b52650283ddcf929d ]
-
-If find_linux_pte fails, IRQs will not be restored. This is unlikely
-to happen in practice since it would have been reported as hanging
-hosts, but it should of course be fixed anyway.
-
-Cc: stable@vger.kernel.org
-Reported-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index b717c13b8090f..eea40dde9901b 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -479,7 +479,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- if (pte_present(pte)) {
- wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
- MAS2_WIMGE_MASK;
-- local_irq_restore(flags);
- } else {
- local_irq_restore(flags);
- pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
-@@ -488,8 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- goto out;
- }
- }
-- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ local_irq_restore(flags);
-
-+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
-
---
-2.39.5
-
+++ /dev/null
-From 679d98d15780bff60008f94e27de136d360942aa Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:54 -0700
-Subject: KVM: PPC: e500: Mark "struct page" dirty in kvmppc_e500_shadow_map()
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit c9be85dabb376299504e0d391d15662c0edf8273 ]
-
-Mark the underlying page as dirty in kvmppc_e500_ref_setup()'s sole
-caller, kvmppc_e500_shadow_map(), which will allow converting e500 to
-__kvm_faultin_pfn() + kvm_release_faultin_page() without having to do
-a weird dance between ref_setup() and shadow_map().
-
-Opportunistically drop the redundant kvm_set_pfn_accessed(), as
-shadow_map() puts the page via kvm_release_pfn_clean().
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-53-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 13 ++++++-------
- 1 file changed, 6 insertions(+), 7 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index ed0c9c43d0cf1..b439a93ad4868 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
- return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
- }
-
--static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
-+static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- struct kvm_book3e_206_tlb_entry *gtlbe,
- kvm_pfn_t pfn, unsigned int wimg)
- {
-@@ -252,11 +252,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- /* Use guest supplied MAS2_G and MAS2_E */
- ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
-
-- /* Mark the page accessed */
-- kvm_set_pfn_accessed(pfn);
--
-- if (tlbe_is_writable(gtlbe))
-- kvm_set_pfn_dirty(pfn);
-+ return tlbe_is_writable(gtlbe);
- }
-
- static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
-@@ -337,6 +333,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- unsigned int wimg = 0;
- pgd_t *pgdir;
- unsigned long flags;
-+ bool writable = false;
-
- /* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
-@@ -490,7 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- goto out;
- }
- }
-- kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ if (writable)
-+ kvm_set_pfn_dirty(pfn);
-
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
---
-2.39.5
-
+++ /dev/null
-From 5b7abd00d51786ca82fa7cccfbbb6bd41f757205 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:55 -0700
-Subject: KVM: PPC: e500: Mark "struct page" pfn accessed before dropping
- mmu_lock
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit 84cf78dcd9d65c45ab73998d4ad50f433d53fb93 ]
-
-Mark pages accessed before dropping mmu_lock when faulting in guest memory
-so that shadow_map() can convert to kvm_release_faultin_page() without
-tripping its lockdep assertion on mmu_lock being held. Marking pages
-accessed outside of mmu_lock is ok (not great, but safe), but marking
-pages _dirty_ outside of mmu_lock can make filesystems unhappy.
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-54-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 4 +---
- 1 file changed, 1 insertion(+), 3 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index b439a93ad4868..ae0b6e317ef1c 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -498,11 +498,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- kvmppc_mmu_flush_icache(pfn);
-
- out:
-- spin_unlock(&kvm->mmu_lock);
--
- /* Drop refcount on page, so that mmu notifiers can clear it */
- kvm_release_pfn_clean(pfn);
--
-+ spin_unlock(&kvm->mmu_lock);
- return ret;
- }
-
---
-2.39.5
-
+++ /dev/null
-From a39361c0ba5718380cbb9382441675d82d5d85f5 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:56 -0700
-Subject: KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit 419cfb983ca93e75e905794521afefcfa07988bb ]
-
-Convert PPC e500 to use __kvm_faultin_pfn()+kvm_release_faultin_page(),
-and continue the inexorable march towards the demise of
-kvm_pfn_to_refcounted_page().
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-55-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 8 +++-----
- 1 file changed, 3 insertions(+), 5 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index ae0b6e317ef1c..b717c13b8090f 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -322,6 +322,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- {
- struct kvm_memory_slot *slot;
- unsigned long pfn = 0; /* silence GCC warning */
-+ struct page *page = NULL;
- unsigned long hva;
- int pfnmap = 0;
- int tsize = BOOK3E_PAGESZ_4K;
-@@ -443,7 +444,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
-
- if (likely(!pfnmap)) {
- tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
-- pfn = gfn_to_pfn_memslot(slot, gfn);
-+ pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
- if (is_error_noslot_pfn(pfn)) {
- if (printk_ratelimit())
- pr_err("%s: real page not found for gfn %lx\n",
-@@ -488,8 +489,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- }
- }
- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-- if (writable)
-- kvm_set_pfn_dirty(pfn);
-
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
-@@ -498,8 +497,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- kvmppc_mmu_flush_icache(pfn);
-
- out:
-- /* Drop refcount on page, so that mmu notifiers can clear it */
-- kvm_release_pfn_clean(pfn);
-+ kvm_release_faultin_page(kvm, page, !!ret, writable);
- spin_unlock(&kvm->mmu_lock);
- return ret;
- }
---
-2.39.5
-
tipc-re-order-conditions-in-tipc_crypto_key_rcv.patch
selftests-net-ipsec-fix-null-pointer-dereference-in-.patch
input-allocate-keycode-for-phone-linking.patch
-kvm-ppc-e500-mark-struct-page-dirty-in-kvmppc_e500_s.patch
-kvm-ppc-e500-mark-struct-page-pfn-accessed-before-dr.patch
-kvm-ppc-e500-use-__kvm_faultin_pfn-to-handle-page-fa.patch
-kvm-e500-always-restore-irqs.patch
usb-chipidea-ci_hdrc_imx-use-dev_err_probe.patch
usb-chipidea-ci_hdrc_imx-convert-to-platform-remove-.patch
usb-chipidea-ci_hdrc_imx-decrement-device-s-refcount.patch
+++ /dev/null
-From 9d0855b0755840992d3ba4b1bdbd691654d76986 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sun, 12 Jan 2025 10:34:44 +0100
-Subject: KVM: e500: always restore irqs
-
-From: Paolo Bonzini <pbonzini@redhat.com>
-
-[ Upstream commit 87ecfdbc699cc95fac73291b52650283ddcf929d ]
-
-If find_linux_pte fails, IRQs will not be restored. This is unlikely
-to happen in practice since it would have been reported as hanging
-hosts, but it should of course be fixed anyway.
-
-Cc: stable@vger.kernel.org
-Reported-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index d3b0031c9586f..00037f270f4f0 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -479,7 +479,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- if (pte_present(pte)) {
- wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
- MAS2_WIMGE_MASK;
-- local_irq_restore(flags);
- } else {
- local_irq_restore(flags);
- pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
-@@ -488,8 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- goto out;
- }
- }
-- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ local_irq_restore(flags);
-
-+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
-
---
-2.39.5
-
+++ /dev/null
-From 22d64c57795ad335cb6ab47494ffb1bf08c8fbcf Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:54 -0700
-Subject: KVM: PPC: e500: Mark "struct page" dirty in kvmppc_e500_shadow_map()
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit c9be85dabb376299504e0d391d15662c0edf8273 ]
-
-Mark the underlying page as dirty in kvmppc_e500_ref_setup()'s sole
-caller, kvmppc_e500_shadow_map(), which will allow converting e500 to
-__kvm_faultin_pfn() + kvm_release_faultin_page() without having to do
-a weird dance between ref_setup() and shadow_map().
-
-Opportunistically drop the redundant kvm_set_pfn_accessed(), as
-shadow_map() puts the page via kvm_release_pfn_clean().
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-53-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 13 ++++++-------
- 1 file changed, 6 insertions(+), 7 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index 7f16afc331efd..738556b7f175c 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
- return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
- }
-
--static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
-+static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- struct kvm_book3e_206_tlb_entry *gtlbe,
- kvm_pfn_t pfn, unsigned int wimg)
- {
-@@ -252,11 +252,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- /* Use guest supplied MAS2_G and MAS2_E */
- ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
-
-- /* Mark the page accessed */
-- kvm_set_pfn_accessed(pfn);
--
-- if (tlbe_is_writable(gtlbe))
-- kvm_set_pfn_dirty(pfn);
-+ return tlbe_is_writable(gtlbe);
- }
-
- static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
-@@ -337,6 +333,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- unsigned int wimg = 0;
- pgd_t *pgdir;
- unsigned long flags;
-+ bool writable = false;
-
- /* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
-@@ -490,7 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- goto out;
- }
- }
-- kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ if (writable)
-+ kvm_set_pfn_dirty(pfn);
-
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
---
-2.39.5
-
+++ /dev/null
-From 61ba754bed089e86c5b4672d769b79d834e48c25 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:55 -0700
-Subject: KVM: PPC: e500: Mark "struct page" pfn accessed before dropping
- mmu_lock
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit 84cf78dcd9d65c45ab73998d4ad50f433d53fb93 ]
-
-Mark pages accessed before dropping mmu_lock when faulting in guest memory
-so that shadow_map() can convert to kvm_release_faultin_page() without
-tripping its lockdep assertion on mmu_lock being held. Marking pages
-accessed outside of mmu_lock is ok (not great, but safe), but marking
-pages _dirty_ outside of mmu_lock can make filesystems unhappy.
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-54-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 4 +---
- 1 file changed, 1 insertion(+), 3 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index 738556b7f175c..411385ad583b8 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -498,11 +498,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- kvmppc_mmu_flush_icache(pfn);
-
- out:
-- spin_unlock(&kvm->mmu_lock);
--
- /* Drop refcount on page, so that mmu notifiers can clear it */
- kvm_release_pfn_clean(pfn);
--
-+ spin_unlock(&kvm->mmu_lock);
- return ret;
- }
-
---
-2.39.5
-
+++ /dev/null
-From e8ee34eaa9f807316d3d23c39f4eaa064e2c8708 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:56 -0700
-Subject: KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit 419cfb983ca93e75e905794521afefcfa07988bb ]
-
-Convert PPC e500 to use __kvm_faultin_pfn()+kvm_release_faultin_page(),
-and continue the inexorable march towards the demise of
-kvm_pfn_to_refcounted_page().
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-55-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 8 +++-----
- 1 file changed, 3 insertions(+), 5 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index 411385ad583b8..d3b0031c9586f 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -322,6 +322,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- {
- struct kvm_memory_slot *slot;
- unsigned long pfn = 0; /* silence GCC warning */
-+ struct page *page = NULL;
- unsigned long hva;
- int pfnmap = 0;
- int tsize = BOOK3E_PAGESZ_4K;
-@@ -443,7 +444,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
-
- if (likely(!pfnmap)) {
- tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
-- pfn = gfn_to_pfn_memslot(slot, gfn);
-+ pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
- if (is_error_noslot_pfn(pfn)) {
- if (printk_ratelimit())
- pr_err("%s: real page not found for gfn %lx\n",
-@@ -488,8 +489,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- }
- }
- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-- if (writable)
-- kvm_set_pfn_dirty(pfn);
-
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
-@@ -498,8 +497,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- kvmppc_mmu_flush_icache(pfn);
-
- out:
-- /* Drop refcount on page, so that mmu notifiers can clear it */
-- kvm_release_pfn_clean(pfn);
-+ kvm_release_faultin_page(kvm, page, !!ret, writable);
- spin_unlock(&kvm->mmu_lock);
- return ret;
- }
---
-2.39.5
-
selftests-net-ipsec-fix-null-pointer-dereference-in-.patch
input-allocate-keycode-for-phone-linking.patch
platform-x86-acer-wmi-ignore-ac-events.patch
-kvm-ppc-e500-mark-struct-page-dirty-in-kvmppc_e500_s.patch
-kvm-ppc-e500-mark-struct-page-pfn-accessed-before-dr.patch
-kvm-ppc-e500-use-__kvm_faultin_pfn-to-handle-page-fa.patch
-kvm-e500-always-restore-irqs.patch
usb-chipidea-ci_hdrc_imx-use-dev_err_probe.patch
usb-chipidea-ci_hdrc_imx-convert-to-platform-remove-.patch
usb-chipidea-ci_hdrc_imx-decrement-device-s-refcount.patch
+++ /dev/null
-From 2cb3930e9823f06d7b4a09c7fa4ad8ca577dda41 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sun, 12 Jan 2025 10:34:44 +0100
-Subject: KVM: e500: always restore irqs
-
-From: Paolo Bonzini <pbonzini@redhat.com>
-
-[ Upstream commit 87ecfdbc699cc95fac73291b52650283ddcf929d ]
-
-If find_linux_pte fails, IRQs will not be restored. This is unlikely
-to happen in practice since it would have been reported as hanging
-hosts, but it should of course be fixed anyway.
-
-Cc: stable@vger.kernel.org
-Reported-by: Sean Christopherson <seanjc@google.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 4 ++--
- 1 file changed, 2 insertions(+), 2 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index 75a9e6132db66..21648d2589a3a 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -479,7 +479,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- if (pte_present(pte)) {
- wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
- MAS2_WIMGE_MASK;
-- local_irq_restore(flags);
- } else {
- local_irq_restore(flags);
- pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
-@@ -488,8 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- goto out;
- }
- }
-- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ local_irq_restore(flags);
-
-+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
-
---
-2.39.5
-
+++ /dev/null
-From 60e35439605af017370d267d54e82a8e5685e90a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:54 -0700
-Subject: KVM: PPC: e500: Mark "struct page" dirty in kvmppc_e500_shadow_map()
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit c9be85dabb376299504e0d391d15662c0edf8273 ]
-
-Mark the underlying page as dirty in kvmppc_e500_ref_setup()'s sole
-caller, kvmppc_e500_shadow_map(), which will allow converting e500 to
-__kvm_faultin_pfn() + kvm_release_faultin_page() without having to do
-a weird dance between ref_setup() and shadow_map().
-
-Opportunistically drop the redundant kvm_set_pfn_accessed(), as
-shadow_map() puts the page via kvm_release_pfn_clean().
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-53-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 13 ++++++-------
- 1 file changed, 6 insertions(+), 7 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index 7154bd424d243..b40d31cd44f7f 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -242,7 +242,7 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
- return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
- }
-
--static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
-+static inline bool kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- struct kvm_book3e_206_tlb_entry *gtlbe,
- kvm_pfn_t pfn, unsigned int wimg)
- {
-@@ -252,11 +252,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- /* Use guest supplied MAS2_G and MAS2_E */
- ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
-
-- /* Mark the page accessed */
-- kvm_set_pfn_accessed(pfn);
--
-- if (tlbe_is_writable(gtlbe))
-- kvm_set_pfn_dirty(pfn);
-+ return tlbe_is_writable(gtlbe);
- }
-
- static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
-@@ -337,6 +333,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- unsigned int wimg = 0;
- pgd_t *pgdir;
- unsigned long flags;
-+ bool writable = false;
-
- /* used to check for invalidations in progress */
- mmu_seq = kvm->mmu_notifier_seq;
-@@ -490,7 +487,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- goto out;
- }
- }
-- kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-+ if (writable)
-+ kvm_set_pfn_dirty(pfn);
-
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
---
-2.39.5
-
+++ /dev/null
-From 2816dd4eaca5820f3b1b03238cfb205b0778c159 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:55 -0700
-Subject: KVM: PPC: e500: Mark "struct page" pfn accessed before dropping
- mmu_lock
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit 84cf78dcd9d65c45ab73998d4ad50f433d53fb93 ]
-
-Mark pages accessed before dropping mmu_lock when faulting in guest memory
-so that shadow_map() can convert to kvm_release_faultin_page() without
-tripping its lockdep assertion on mmu_lock being held. Marking pages
-accessed outside of mmu_lock is ok (not great, but safe), but marking
-pages _dirty_ outside of mmu_lock can make filesystems unhappy.
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-54-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 4 +---
- 1 file changed, 1 insertion(+), 3 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index b40d31cd44f7f..80f0b30abc74c 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -498,11 +498,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- kvmppc_mmu_flush_icache(pfn);
-
- out:
-- spin_unlock(&kvm->mmu_lock);
--
- /* Drop refcount on page, so that mmu notifiers can clear it */
- kvm_release_pfn_clean(pfn);
--
-+ spin_unlock(&kvm->mmu_lock);
- return ret;
- }
-
---
-2.39.5
-
+++ /dev/null
-From 5ad6ec904ff0ce46f300f73476e9c49aa78cac99 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Thu, 10 Oct 2024 11:23:56 -0700
-Subject: KVM: PPC: e500: Use __kvm_faultin_pfn() to handle page faults
-
-From: Sean Christopherson <seanjc@google.com>
-
-[ Upstream commit 419cfb983ca93e75e905794521afefcfa07988bb ]
-
-Convert PPC e500 to use __kvm_faultin_pfn()+kvm_release_faultin_page(),
-and continue the inexorable march towards the demise of
-kvm_pfn_to_refcounted_page().
-
-Signed-off-by: Sean Christopherson <seanjc@google.com>
-Tested-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
-Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-Message-ID: <20241010182427.1434605-55-seanjc@google.com>
-Stable-dep-of: 87ecfdbc699c ("KVM: e500: always restore irqs")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- arch/powerpc/kvm/e500_mmu_host.c | 8 +++-----
- 1 file changed, 3 insertions(+), 5 deletions(-)
-
-diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
-index 80f0b30abc74c..75a9e6132db66 100644
---- a/arch/powerpc/kvm/e500_mmu_host.c
-+++ b/arch/powerpc/kvm/e500_mmu_host.c
-@@ -322,6 +322,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- {
- struct kvm_memory_slot *slot;
- unsigned long pfn = 0; /* silence GCC warning */
-+ struct page *page = NULL;
- unsigned long hva;
- int pfnmap = 0;
- int tsize = BOOK3E_PAGESZ_4K;
-@@ -443,7 +444,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
-
- if (likely(!pfnmap)) {
- tsize_pages = 1UL << (tsize + 10 - PAGE_SHIFT);
-- pfn = gfn_to_pfn_memslot(slot, gfn);
-+ pfn = __kvm_faultin_pfn(slot, gfn, FOLL_WRITE, NULL, &page);
- if (is_error_noslot_pfn(pfn)) {
- if (printk_ratelimit())
- pr_err("%s: real page not found for gfn %lx\n",
-@@ -488,8 +489,6 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- }
- }
- writable = kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
-- if (writable)
-- kvm_set_pfn_dirty(pfn);
-
- kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
-@@ -498,8 +497,7 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
- kvmppc_mmu_flush_icache(pfn);
-
- out:
-- /* Drop refcount on page, so that mmu notifiers can clear it */
-- kvm_release_pfn_clean(pfn);
-+ kvm_release_faultin_page(kvm, page, !!ret, writable);
- spin_unlock(&kvm->mmu_lock);
- return ret;
- }
---
-2.39.5
-
mfd-lpc_ich-add-another-gemini-lake-isa-bridge-pci-d.patch
hid-wacom-add-pci-wacom-device-support.patch
apei-ghes-have-ghes-honor-the-panic-setting.patch
-kvm-ppc-e500-mark-struct-page-dirty-in-kvmppc_e500_s.patch
-kvm-ppc-e500-mark-struct-page-pfn-accessed-before-dr.patch
-kvm-ppc-e500-use-__kvm_faultin_pfn-to-handle-page-fa.patch
-kvm-e500-always-restore-irqs.patch
tasklet-introduce-new-initialization-api.patch
net-usb-rtl8150-use-new-tasklet-api.patch
net-usb-rtl8150-enable-basic-endpoint-checking.patch