--- /dev/null
+From e3417ab75ab2e7dca6372a1bfa26b1be3ac5889e Mon Sep 17 00:00:00 2001
+From: Sean Christopherson <seanjc@google.com>
+Date: Mon, 5 May 2025 11:03:00 -0700
+Subject: KVM: SVM: Set/clear SRSO's BP_SPEC_REDUCE on 0 <=> 1 VM count transitions
+
+From: Sean Christopherson <seanjc@google.com>
+
+commit e3417ab75ab2e7dca6372a1bfa26b1be3ac5889e upstream.
+
+Set the magic BP_SPEC_REDUCE bit to mitigate SRSO when running VMs if and
+only if KVM has at least one active VM. Leaving the bit set at all times
+unfortunately degrades performance by a wee bit more than expected.
+
+Use a dedicated spinlock and counter instead of hooking virtualization
+enablement, as changing the behavior of kvm.enable_virt_at_load based on
+SRSO_BP_SPEC_REDUCE is painful, and has its own drawbacks, e.g. could
+result in performance issues for flows that are sensitive to VM creation
+latency.
+
+Defer setting BP_SPEC_REDUCE until VMRUN is imminent to avoid impacting
+performance on CPUs that aren't running VMs, e.g. if a setup is using
+housekeeping CPUs. Setting BP_SPEC_REDUCE in task context, i.e. without
+blasting IPIs to all CPUs, also helps avoid serializing 1<=>N transitions
+without incurring a gross amount of complexity (see the Link for details
+on how ugly coordinating via IPIs gets).
+
+Link: https://lore.kernel.org/all/aBOnzNCngyS_pQIW@google.com
+Fixes: 8442df2b49ed ("x86/bugs: KVM: Add support for SRSO_MSR_FIX")
+Reported-by: Michael Larabel <Michael@michaellarabel.com>
+Closes: https://www.phoronix.com/review/linux-615-amd-regression
+Cc: Borislav Petkov <bp@alien8.de>
+Tested-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20250505180300.973137-1-seanjc@google.com
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kvm/svm/svm.c | 71 ++++++++++++++++++++++++++++++++++++++++++++-----
+ arch/x86/kvm/svm/svm.h | 2 +
+ 2 files changed, 67 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -608,9 +608,6 @@ static void svm_disable_virtualization_c
+ kvm_cpu_svm_disable();
+
+ amd_pmu_disable_virt();
+-
+- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+- msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+ }
+
+ static int svm_enable_virtualization_cpu(void)
+@@ -688,9 +685,6 @@ static int svm_enable_virtualization_cpu
+ rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
+ }
+
+- if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
+- msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+-
+ return 0;
+ }
+
+@@ -1513,6 +1507,63 @@ static void svm_vcpu_free(struct kvm_vcp
+ __free_pages(virt_to_page(svm->msrpm), get_order(MSRPM_SIZE));
+ }
+
++#ifdef CONFIG_CPU_MITIGATIONS
++static DEFINE_SPINLOCK(srso_lock);
++static atomic_t srso_nr_vms;
++
++static void svm_srso_clear_bp_spec_reduce(void *ign)
++{
++ struct svm_cpu_data *sd = this_cpu_ptr(&svm_data);
++
++ if (!sd->bp_spec_reduce_set)
++ return;
++
++ msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
++ sd->bp_spec_reduce_set = false;
++}
++
++static void svm_srso_vm_destroy(void)
++{
++ if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
++ return;
++
++ if (atomic_dec_return(&srso_nr_vms))
++ return;
++
++ guard(spinlock)(&srso_lock);
++
++ /*
++ * Verify a new VM didn't come along, acquire the lock, and increment
++ * the count before this task acquired the lock.
++ */
++ if (atomic_read(&srso_nr_vms))
++ return;
++
++ on_each_cpu(svm_srso_clear_bp_spec_reduce, NULL, 1);
++}
++
++static void svm_srso_vm_init(void)
++{
++ if (!cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
++ return;
++
++ /*
++ * Acquire the lock on 0 => 1 transitions to ensure a potential 1 => 0
++ * transition, i.e. destroying the last VM, is fully complete, e.g. so
++ * that a delayed IPI doesn't clear BP_SPEC_REDUCE after a vCPU runs.
++ */
++ if (atomic_inc_not_zero(&srso_nr_vms))
++ return;
++
++ guard(spinlock)(&srso_lock);
++
++ atomic_inc(&srso_nr_vms);
++}
++#else
++static void svm_srso_vm_init(void) { }
++static void svm_srso_vm_destroy(void) { }
++#endif
++
+ static void svm_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+ {
+ struct vcpu_svm *svm = to_svm(vcpu);
+@@ -1545,6 +1596,11 @@ static void svm_prepare_switch_to_guest(
+ (!boot_cpu_has(X86_FEATURE_V_TSC_AUX) || !sev_es_guest(vcpu->kvm)))
+ kvm_set_user_return_msr(tsc_aux_uret_slot, svm->tsc_aux, -1ull);
+
++ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE) &&
++ !sd->bp_spec_reduce_set) {
++ sd->bp_spec_reduce_set = true;
++ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
++ }
+ svm->guest_state_loaded = true;
+ }
+
+@@ -5010,6 +5066,8 @@ static void svm_vm_destroy(struct kvm *k
+ {
+ avic_vm_destroy(kvm);
+ sev_vm_destroy(kvm);
++
++ svm_srso_vm_destroy();
+ }
+
+ static int svm_vm_init(struct kvm *kvm)
+@@ -5035,6 +5093,7 @@ static int svm_vm_init(struct kvm *kvm)
+ return ret;
+ }
+
++ svm_srso_vm_init();
+ return 0;
+ }
+
+--- a/arch/x86/kvm/svm/svm.h
++++ b/arch/x86/kvm/svm/svm.h
+@@ -335,6 +335,8 @@ struct svm_cpu_data {
+ u32 next_asid;
+ u32 min_asid;
+
++ bool bp_spec_reduce_set;
++
+ struct vmcb *save_area;
+ unsigned long save_area_pa;
+
--- /dev/null
+From stable+bounces-180814-greg=kroah.com@vger.kernel.org Sun Sep 21 16:40:07 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Sep 2025 10:39:53 -0400
+Subject: mm: add folio_expected_ref_count() for reference count calculation
+To: stable@vger.kernel.org
+Cc: Shivank Garg <shivankg@amd.com>, David Hildenbrand <david@redhat.com>, Matthew Wilcox <willy@infradead.org>, Alistair Popple <apopple@nvidia.com>, Dave Kleikamp <shaggy@kernel.org>, Donet Tom <donettom@linux.ibm.com>, Jane Chu <jane.chu@oracle.com>, Kefeng Wang <wangkefeng.wang@huawei.com>, Zi Yan <ziy@nvidia.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250921143954.2925079-1-sashal@kernel.org>
+
+From: Shivank Garg <shivankg@amd.com>
+
+[ Upstream commit 86ebd50224c0734d965843260d0dc057a9431c61 ]
+
+Patch series " JFS: Implement migrate_folio for jfs_metapage_aops" v5.
+
+This patchset addresses a warning that occurs during memory compaction due
+to JFS's missing migrate_folio operation. The warning was introduced by
+commit 7ee3647243e5 ("migrate: Remove call to ->writepage") which added
+explicit warnings when filesystem don't implement migrate_folio.
+
+The syzbot reported following [1]:
+ jfs_metapage_aops does not implement migrate_folio
+ WARNING: CPU: 1 PID: 5861 at mm/migrate.c:955 fallback_migrate_folio mm/migrate.c:953 [inline]
+ WARNING: CPU: 1 PID: 5861 at mm/migrate.c:955 move_to_new_folio+0x70e/0x840 mm/migrate.c:1007
+ Modules linked in:
+ CPU: 1 UID: 0 PID: 5861 Comm: syz-executor280 Not tainted 6.15.0-rc1-next-20250411-syzkaller #0 PREEMPT(full)
+ Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2025
+ RIP: 0010:fallback_migrate_folio mm/migrate.c:953 [inline]
+ RIP: 0010:move_to_new_folio+0x70e/0x840 mm/migrate.c:1007
+
+To fix this issue, this series implement metapage_migrate_folio() for JFS
+which handles both single and multiple metapages per page configurations.
+
+While most filesystems leverage existing migration implementations like
+filemap_migrate_folio(), buffer_migrate_folio_norefs() or
+buffer_migrate_folio() (which internally used folio_expected_refs()),
+JFS's metapage architecture requires special handling of its private data
+during migration. To support this, this series introduce the
+folio_expected_ref_count(), which calculates external references to a
+folio from page/swap cache, private data, and page table mappings.
+
+This standardized implementation replaces the previous ad-hoc
+folio_expected_refs() function and enables JFS to accurately determine
+whether a folio has unexpected references before attempting migration.
+
+Implement folio_expected_ref_count() to calculate expected folio reference
+counts from:
+- Page/swap cache (1 per page)
+- Private data (1)
+- Page table mappings (1 per map)
+
+While originally needed for page migration operations, this improved
+implementation standardizes reference counting by consolidating all
+refcount contributors into a single, reusable function that can benefit
+any subsystem needing to detect unexpected references to folios.
+
+The folio_expected_ref_count() returns the sum of these external
+references without including any reference the caller itself might hold.
+Callers comparing against the actual folio_ref_count() must account for
+their own references separately.
+
+Link: https://syzkaller.appspot.com/bug?extid=8bb6fd945af4e0ad9299 [1]
+Link: https://lkml.kernel.org/r/20250430100150.279751-1-shivankg@amd.com
+Link: https://lkml.kernel.org/r/20250430100150.279751-2-shivankg@amd.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Shivank Garg <shivankg@amd.com>
+Suggested-by: Matthew Wilcox <willy@infradead.org>
+Co-developed-by: David Hildenbrand <david@redhat.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Dave Kleikamp <shaggy@kernel.org>
+Cc: Donet Tom <donettom@linux.ibm.com>
+Cc: Jane Chu <jane.chu@oracle.com>
+Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 98c6d259319e ("mm/gup: check ref_count instead of lru before migration")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/mm.h | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++
+ mm/migrate.c | 22 +++------------------
+ 2 files changed, 59 insertions(+), 18 deletions(-)
+
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -2200,6 +2200,61 @@ static inline bool folio_likely_mapped_s
+ return atomic_read(&folio->_mapcount) > 0;
+ }
+
++/**
++ * folio_expected_ref_count - calculate the expected folio refcount
++ * @folio: the folio
++ *
++ * Calculate the expected folio refcount, taking references from the pagecache,
++ * swapcache, PG_private and page table mappings into account. Useful in
++ * combination with folio_ref_count() to detect unexpected references (e.g.,
++ * GUP or other temporary references).
++ *
++ * Does currently not consider references from the LRU cache. If the folio
++ * was isolated from the LRU (which is the case during migration or split),
++ * the LRU cache does not apply.
++ *
++ * Calling this function on an unmapped folio -- !folio_mapped() -- that is
++ * locked will return a stable result.
++ *
++ * Calling this function on a mapped folio will not result in a stable result,
++ * because nothing stops additional page table mappings from coming (e.g.,
++ * fork()) or going (e.g., munmap()).
++ *
++ * Calling this function without the folio lock will also not result in a
++ * stable result: for example, the folio might get dropped from the swapcache
++ * concurrently.
++ *
++ * However, even when called without the folio lock or on a mapped folio,
++ * this function can be used to detect unexpected references early (for example,
++ * if it makes sense to even lock the folio and unmap it).
++ *
++ * The caller must add any reference (e.g., from folio_try_get()) it might be
++ * holding itself to the result.
++ *
++ * Returns the expected folio refcount.
++ */
++static inline int folio_expected_ref_count(const struct folio *folio)
++{
++ const int order = folio_order(folio);
++ int ref_count = 0;
++
++ if (WARN_ON_ONCE(folio_test_slab(folio)))
++ return 0;
++
++ if (folio_test_anon(folio)) {
++ /* One reference per page from the swapcache. */
++ ref_count += folio_test_swapcache(folio) << order;
++ } else if (!((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS)) {
++ /* One reference per page from the pagecache. */
++ ref_count += !!folio->mapping << order;
++ /* One reference from PG_private. */
++ ref_count += folio_test_private(folio);
++ }
++
++ /* One reference per page table mapping. */
++ return ref_count + folio_mapcount(folio);
++}
++
+ #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE
+ static inline int arch_make_folio_accessible(struct folio *folio)
+ {
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -453,20 +453,6 @@ unlock:
+ }
+ #endif
+
+-static int folio_expected_refs(struct address_space *mapping,
+- struct folio *folio)
+-{
+- int refs = 1;
+- if (!mapping)
+- return refs;
+-
+- refs += folio_nr_pages(folio);
+- if (folio_test_private(folio))
+- refs++;
+-
+- return refs;
+-}
+-
+ /*
+ * Replace the folio in the mapping.
+ *
+@@ -609,7 +595,7 @@ static int __folio_migrate_mapping(struc
+ int folio_migrate_mapping(struct address_space *mapping,
+ struct folio *newfolio, struct folio *folio, int extra_count)
+ {
+- int expected_count = folio_expected_refs(mapping, folio) + extra_count;
++ int expected_count = folio_expected_ref_count(folio) + extra_count + 1;
+
+ if (folio_ref_count(folio) != expected_count)
+ return -EAGAIN;
+@@ -626,7 +612,7 @@ int migrate_huge_page_move_mapping(struc
+ struct folio *dst, struct folio *src)
+ {
+ XA_STATE(xas, &mapping->i_pages, folio_index(src));
+- int rc, expected_count = folio_expected_refs(mapping, src);
++ int rc, expected_count = folio_expected_ref_count(src) + 1;
+
+ if (folio_ref_count(src) != expected_count)
+ return -EAGAIN;
+@@ -756,7 +742,7 @@ static int __migrate_folio(struct addres
+ struct folio *src, void *src_private,
+ enum migrate_mode mode)
+ {
+- int rc, expected_count = folio_expected_refs(mapping, src);
++ int rc, expected_count = folio_expected_ref_count(src) + 1;
+
+ /* Check whether src does not have extra refs before we do more work */
+ if (folio_ref_count(src) != expected_count)
+@@ -844,7 +830,7 @@ static int __buffer_migrate_folio(struct
+ return migrate_folio(mapping, dst, src, mode);
+
+ /* Check whether page does not have extra refs before we do more work */
+- expected_count = folio_expected_refs(mapping, src);
++ expected_count = folio_expected_ref_count(src) + 1;
+ if (folio_ref_count(src) != expected_count)
+ return -EAGAIN;
+
--- /dev/null
+From stable+bounces-180815-greg=kroah.com@vger.kernel.org Sun Sep 21 16:40:08 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 21 Sep 2025 10:39:54 -0400
+Subject: mm/gup: check ref_count instead of lru before migration
+To: stable@vger.kernel.org
+Cc: Hugh Dickins <hughd@google.com>, Will Deacon <will@kernel.org>, Kiryl Shutsemau <kas@kernel.org>, David Hildenbrand <david@redhat.com>, "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>, Axel Rasmussen <axelrasmussen@google.com>, Chris Li <chrisl@kernel.org>, Christoph Hellwig <hch@infradead.org>, Jason Gunthorpe <jgg@ziepe.ca>, Johannes Weiner <hannes@cmpxchg.org>, John Hubbard <jhubbard@nvidia.com>, Keir Fraser <keirf@google.com>, Konstantin Khlebnikov <koct9i@gmail.com>, Li Zhe <lizhe.67@bytedance.com>, "Matthew Wilcox (Oracle)" <willy@infradead.org>, Peter Xu <peterx@redhat.com>, Rik van Riel <riel@surriel.com>, Shivank Garg <shivankg@amd.com>, Vlastimil Babka <vbabka@suse.cz>, Wei Xu <weixugc@google.com>, yangge <yangge1116@126.com>, Yuanchu Xie <yuanchu@google.com>, Yu Zhao <yuzhao@google.com>, Andrew Morton <akpm@linux-foundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250921143954.2925079-2-sashal@kernel.org>
+
+From: Hugh Dickins <hughd@google.com>
+
+[ Upstream commit 98c6d259319ecf6e8d027abd3f14b81324b8c0ad ]
+
+Patch series "mm: better GUP pin lru_add_drain_all()", v2.
+
+Series of lru_add_drain_all()-related patches, arising from recent mm/gup
+migration report from Will Deacon.
+
+This patch (of 5):
+
+Will Deacon reports:-
+
+When taking a longterm GUP pin via pin_user_pages(),
+__gup_longterm_locked() tries to migrate target folios that should not be
+longterm pinned, for example because they reside in a CMA region or
+movable zone. This is done by first pinning all of the target folios
+anyway, collecting all of the longterm-unpinnable target folios into a
+list, dropping the pins that were just taken and finally handing the list
+off to migrate_pages() for the actual migration.
+
+It is critically important that no unexpected references are held on the
+folios being migrated, otherwise the migration will fail and
+pin_user_pages() will return -ENOMEM to its caller. Unfortunately, it is
+relatively easy to observe migration failures when running pKVM (which
+uses pin_user_pages() on crosvm's virtual address space to resolve stage-2
+page faults from the guest) on a 6.15-based Pixel 6 device and this
+results in the VM terminating prematurely.
+
+In the failure case, 'crosvm' has called mlock(MLOCK_ONFAULT) on its
+mapping of guest memory prior to the pinning. Subsequently, when
+pin_user_pages() walks the page-table, the relevant 'pte' is not present
+and so the faulting logic allocates a new folio, mlocks it with
+mlock_folio() and maps it in the page-table.
+
+Since commit 2fbb0c10d1e8 ("mm/munlock: mlock_page() munlock_page() batch
+by pagevec"), mlock/munlock operations on a folio (formerly page), are
+deferred. For example, mlock_folio() takes an additional reference on the
+target folio before placing it into a per-cpu 'folio_batch' for later
+processing by mlock_folio_batch(), which drops the refcount once the
+operation is complete. Processing of the batches is coupled with the LRU
+batch logic and can be forcefully drained with lru_add_drain_all() but as
+long as a folio remains unprocessed on the batch, its refcount will be
+elevated.
+
+This deferred batching therefore interacts poorly with the pKVM pinning
+scenario as we can find ourselves in a situation where the migration code
+fails to migrate a folio due to the elevated refcount from the pending
+mlock operation.
+
+Hugh Dickins adds:-
+
+!folio_test_lru() has never been a very reliable way to tell if an
+lru_add_drain_all() is worth calling, to remove LRU cache references to
+make the folio migratable: the LRU flag may be set even while the folio is
+held with an extra reference in a per-CPU LRU cache.
+
+5.18 commit 2fbb0c10d1e8 may have made it more unreliable. Then 6.11
+commit 33dfe9204f29 ("mm/gup: clear the LRU flag of a page before adding
+to LRU batch") tried to make it reliable, by moving LRU flag clearing; but
+missed the mlock/munlock batches, so still unreliable as reported.
+
+And it turns out to be difficult to extend 33dfe9204f29's LRU flag
+clearing to the mlock/munlock batches: if they do benefit from batching,
+mlock/munlock cannot be so effective when easily suppressed while !LRU.
+
+Instead, switch to an expected ref_count check, which was more reliable
+all along: some more false positives (unhelpful drains) than before, and
+never a guarantee that the folio will prove migratable, but better.
+
+Note on PG_private_2: ceph and nfs are still using the deprecated
+PG_private_2 flag, with the aid of netfs and filemap support functions.
+Although it is consistently matched by an increment of folio ref_count,
+folio_expected_ref_count() intentionally does not recognize it, and ceph
+folio migration currently depends on that for PG_private_2 folios to be
+rejected. New references to the deprecated flag are discouraged, so do
+not add it into the collect_longterm_unpinnable_folios() calculation: but
+longterm pinning of transiently PG_private_2 ceph and nfs folios (an
+uncommon case) may invoke a redundant lru_add_drain_all(). And this makes
+easy the backport to earlier releases: up to and including 6.12, btrfs
+also used PG_private_2, but without a ref_count increment.
+
+Note for stable backports: requires 6.16 commit 86ebd50224c0 ("mm:
+add folio_expected_ref_count() for reference count calculation").
+
+Link: https://lkml.kernel.org/r/41395944-b0e3-c3ac-d648-8ddd70451d28@google.com
+Link: https://lkml.kernel.org/r/bd1f314a-fca1-8f19-cac0-b936c9614557@google.com
+Fixes: 9a4e9f3b2d73 ("mm: update get_user_pages_longterm to migrate pages allocated from CMA region")
+Signed-off-by: Hugh Dickins <hughd@google.com>
+Reported-by: Will Deacon <will@kernel.org>
+Closes: https://lore.kernel.org/linux-mm/20250815101858.24352-1-will@kernel.org/
+Acked-by: Kiryl Shutsemau <kas@kernel.org>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: "Aneesh Kumar K.V" <aneesh.kumar@kernel.org>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: Chris Li <chrisl@kernel.org>
+Cc: Christoph Hellwig <hch@infradead.org>
+Cc: Jason Gunthorpe <jgg@ziepe.ca>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: John Hubbard <jhubbard@nvidia.com>
+Cc: Keir Fraser <keirf@google.com>
+Cc: Konstantin Khlebnikov <koct9i@gmail.com>
+Cc: Li Zhe <lizhe.67@bytedance.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Shivank Garg <shivankg@amd.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Wei Xu <weixugc@google.com>
+Cc: yangge <yangge1116@126.com>
+Cc: Yuanchu Xie <yuanchu@google.com>
+Cc: Yu Zhao <yuzhao@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/gup.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/gup.c
++++ b/mm/gup.c
+@@ -2376,7 +2376,8 @@ static unsigned long collect_longterm_un
+ continue;
+ }
+
+- if (!folio_test_lru(folio) && drain_allow) {
++ if (drain_allow && folio_ref_count(folio) !=
++ folio_expected_ref_count(folio) + 1) {
+ lru_add_drain_all();
+ drain_allow = false;
+ }
--- /dev/null
+From stable+bounces-180710-greg=kroah.com@vger.kernel.org Fri Sep 19 23:52:47 2025
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Fri, 19 Sep 2025 23:52:23 +0200
+Subject: mptcp: pm: nl: announce deny-join-id0 flag
+To: stable@vger.kernel.org, gregkh@linuxfoundation.org, sashal@kernel.org
+Cc: MPTCP Upstream <mptcp@lists.linux.dev>, "Matthieu Baerts (NGI0)" <matttbe@kernel.org>, Marek Majkowski <marek@cloudflare.com>, Mat Martineau <martineau@kernel.org>, Jakub Kicinski <kuba@kernel.org>
+Message-ID: <20250919215222.3519719-2-matttbe@kernel.org>
+
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+
+commit 2293c57484ae64c9a3c847c8807db8c26a3a4d41 upstream.
+
+During the connection establishment, a peer can tell the other one that
+it cannot establish new subflows to the initial IP address and port by
+setting the 'C' flag [1]. Doing so makes sense when the sender is behind
+a strict NAT, operating behind a legacy Layer 4 load balancer, or using
+anycast IP address for example.
+
+When this 'C' flag is set, the path-managers must then not try to
+establish new subflows to the other peer's initial IP address and port.
+The in-kernel PM has access to this info, but the userspace PM didn't.
+
+The RFC8684 [1] is strict about that:
+
+ (...) therefore the receiver MUST NOT try to open any additional
+ subflows toward this address and port.
+
+So it is important to tell the userspace about that as it is responsible
+for the respect of this flag.
+
+When a new connection is created and established, the Netlink events
+now contain the existing but not currently used 'flags' attribute. When
+MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 is set, it means no other subflows
+to the initial IP address and port -- info that are also part of the
+event -- can be established.
+
+Link: https://datatracker.ietf.org/doc/html/rfc8684#section-3.1-20.6 [1]
+Fixes: 702c2f646d42 ("mptcp: netlink: allow userspace-driven subflow establishment")
+Reported-by: Marek Majkowski <marek@cloudflare.com>
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/532
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-2-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflicts in mptcp_pm.yaml, because the indentation has been modified
+ in commit ec362192aa9e ("netlink: specs: fix up indentation errors"),
+ which is not in this version. Applying the same modifications, but at
+ a different level. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/netlink/specs/mptcp_pm.yaml | 4 ++--
+ include/uapi/linux/mptcp.h | 2 ++
+ include/uapi/linux/mptcp_pm.h | 4 ++--
+ net/mptcp/pm_netlink.c | 7 +++++++
+ 4 files changed, 13 insertions(+), 4 deletions(-)
+
+--- a/Documentation/netlink/specs/mptcp_pm.yaml
++++ b/Documentation/netlink/specs/mptcp_pm.yaml
+@@ -28,13 +28,13 @@ definitions:
+ traffic-patterns it can take a long time until the
+ MPTCP_EVENT_ESTABLISHED is sent.
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+- dport, server-side.
++ dport, server-side, [flags].
+ -
+ name: established
+ doc: >-
+ A MPTCP connection is established (can start new subflows).
+ Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6, sport,
+- dport, server-side.
++ dport, server-side, [flags].
+ -
+ name: closed
+ doc: >-
+--- a/include/uapi/linux/mptcp.h
++++ b/include/uapi/linux/mptcp.h
+@@ -31,6 +31,8 @@
+ #define MPTCP_INFO_FLAG_FALLBACK _BITUL(0)
+ #define MPTCP_INFO_FLAG_REMOTE_KEY_RECEIVED _BITUL(1)
+
++#define MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 _BITUL(0)
++
+ #define MPTCP_PM_ADDR_FLAG_SIGNAL (1 << 0)
+ #define MPTCP_PM_ADDR_FLAG_SUBFLOW (1 << 1)
+ #define MPTCP_PM_ADDR_FLAG_BACKUP (1 << 2)
+--- a/include/uapi/linux/mptcp_pm.h
++++ b/include/uapi/linux/mptcp_pm.h
+@@ -16,10 +16,10 @@
+ * good time to allocate memory and send ADD_ADDR if needed. Depending on the
+ * traffic-patterns it can take a long time until the MPTCP_EVENT_ESTABLISHED
+ * is sent. Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- * sport, dport, server-side.
++ * sport, dport, server-side, [flags].
+ * @MPTCP_EVENT_ESTABLISHED: A MPTCP connection is established (can start new
+ * subflows). Attributes: token, family, saddr4 | saddr6, daddr4 | daddr6,
+- * sport, dport, server-side.
++ * sport, dport, server-side, [flags].
+ * @MPTCP_EVENT_CLOSED: A MPTCP connection has stopped. Attribute: token.
+ * @MPTCP_EVENT_ANNOUNCED: A new address has been announced by the peer.
+ * Attributes: token, rem_id, family, daddr4 | daddr6 [, dport].
+--- a/net/mptcp/pm_netlink.c
++++ b/net/mptcp/pm_netlink.c
+@@ -2211,6 +2211,7 @@ static int mptcp_event_created(struct sk
+ const struct sock *ssk)
+ {
+ int err = nla_put_u32(skb, MPTCP_ATTR_TOKEN, READ_ONCE(msk->token));
++ u16 flags = 0;
+
+ if (err)
+ return err;
+@@ -2218,6 +2219,12 @@ static int mptcp_event_created(struct sk
+ if (nla_put_u8(skb, MPTCP_ATTR_SERVER_SIDE, READ_ONCE(msk->pm.server_side)))
+ return -EMSGSIZE;
+
++ if (READ_ONCE(msk->pm.remote_deny_join_id0))
++ flags |= MPTCP_PM_EV_FLAG_DENY_JOIN_ID0;
++
++ if (flags && nla_put_u16(skb, MPTCP_ATTR_FLAGS, flags))
++ return -EMSGSIZE;
++
+ return mptcp_event_add_subflow(skb, ssk);
+ }
+
io_uring-fix-incorrect-io_kiocb-reference-in-io_link.patch
platform-x86-asus-wmi-fix-rog-button-mapping-tablet-mode-on-asus-rog-z13.patch
platform-x86-asus-wmi-re-add-extra-keys-to-ignore_key_wlan-quirk.patch
+x86-bugs-add-srso_user_kernel_no-support.patch
+x86-bugs-kvm-add-support-for-srso_msr_fix.patch
+kvm-svm-set-clear-srso-s-bp_spec_reduce-on-0-1-vm-count-transitions.patch
+vmxnet3-unregister-xdp-rxq-info-in-the-reset-path.patch
+mm-add-folio_expected_ref_count-for-reference-count-calculation.patch
+mm-gup-check-ref_count-instead-of-lru-before-migration.patch
+mptcp-pm-nl-announce-deny-join-id0-flag.patch
+usb-xhci-introduce-macro-for-ring-segment-list-iteration.patch
+usb-xhci-remove-option-to-change-a-default-ring-s-trb-cycle-bit.patch
+xhci-dbc-decouple-endpoint-allocation-from-initialization.patch
+xhci-dbc-fix-full-dbc-transfer-ring-after-several-reconnects.patch
--- /dev/null
+From sashal@kernel.org Wed Sep 17 14:39:12 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 08:39:06 -0400
+Subject: usb: xhci: introduce macro for ring segment list iteration
+To: stable@vger.kernel.org
+Cc: Niklas Neronin <niklas.neronin@linux.intel.com>, Andy Shevchenko <andriy.shevchenko@linux.intel.com>, Mathias Nyman <mathias.nyman@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250917123909.514131-1-sashal@kernel.org>
+
+From: Niklas Neronin <niklas.neronin@linux.intel.com>
+
+[ Upstream commit 3f970bd06c5295e742ef4f9cf7808a3cb74a6816 ]
+
+Add macro to streamline and standardize the iteration over ring
+segment list.
+
+xhci_for_each_ring_seg(): Iterates over the entire ring segment list.
+
+The xhci_free_segments_for_ring() function's while loop has not been
+updated to use the new macro. This function has some underlying issues,
+and as a result, it will be handled separately in a future patch.
+
+Suggested-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Reviewed-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Niklas Neronin <niklas.neronin@linux.intel.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20241106101459.775897-11-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: a5c98e8b1398 ("xhci: dbc: Fix full DbC transfer ring after several reconnects")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-debugfs.c | 5 +----
+ drivers/usb/host/xhci-mem.c | 24 +++++++-----------------
+ drivers/usb/host/xhci.c | 20 ++++++++------------
+ drivers/usb/host/xhci.h | 3 +++
+ 4 files changed, 19 insertions(+), 33 deletions(-)
+
+--- a/drivers/usb/host/xhci-debugfs.c
++++ b/drivers/usb/host/xhci-debugfs.c
+@@ -214,14 +214,11 @@ static void xhci_ring_dump_segment(struc
+
+ static int xhci_ring_trb_show(struct seq_file *s, void *unused)
+ {
+- int i;
+ struct xhci_ring *ring = *(struct xhci_ring **)s->private;
+ struct xhci_segment *seg = ring->first_seg;
+
+- for (i = 0; i < ring->num_segs; i++) {
++ xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_ring_dump_segment(s, seg);
+- seg = seg->next;
+- }
+
+ return 0;
+ }
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -224,7 +224,6 @@ static int xhci_update_stream_segment_ma
+ struct radix_tree_root *trb_address_map,
+ struct xhci_ring *ring,
+ struct xhci_segment *first_seg,
+- struct xhci_segment *last_seg,
+ gfp_t mem_flags)
+ {
+ struct xhci_segment *seg;
+@@ -234,28 +233,22 @@ static int xhci_update_stream_segment_ma
+ if (WARN_ON_ONCE(trb_address_map == NULL))
+ return 0;
+
+- seg = first_seg;
+- do {
++ xhci_for_each_ring_seg(first_seg, seg) {
+ ret = xhci_insert_segment_mapping(trb_address_map,
+ ring, seg, mem_flags);
+ if (ret)
+ goto remove_streams;
+- if (seg == last_seg)
+- return 0;
+- seg = seg->next;
+- } while (seg != first_seg);
++ }
+
+ return 0;
+
+ remove_streams:
+ failed_seg = seg;
+- seg = first_seg;
+- do {
++ xhci_for_each_ring_seg(first_seg, seg) {
+ xhci_remove_segment_mapping(trb_address_map, seg);
+ if (seg == failed_seg)
+ return ret;
+- seg = seg->next;
+- } while (seg != first_seg);
++ }
+
+ return ret;
+ }
+@@ -267,17 +260,14 @@ static void xhci_remove_stream_mapping(s
+ if (WARN_ON_ONCE(ring->trb_address_map == NULL))
+ return;
+
+- seg = ring->first_seg;
+- do {
++ xhci_for_each_ring_seg(ring->first_seg, seg)
+ xhci_remove_segment_mapping(ring->trb_address_map, seg);
+- seg = seg->next;
+- } while (seg != ring->first_seg);
+ }
+
+ static int xhci_update_stream_mapping(struct xhci_ring *ring, gfp_t mem_flags)
+ {
+ return xhci_update_stream_segment_mapping(ring->trb_address_map, ring,
+- ring->first_seg, ring->last_seg, mem_flags);
++ ring->first_seg, mem_flags);
+ }
+
+ /* XXX: Do we need the hcd structure in all these functions? */
+@@ -438,7 +428,7 @@ int xhci_ring_expansion(struct xhci_hcd
+
+ if (ring->type == TYPE_STREAM) {
+ ret = xhci_update_stream_segment_mapping(ring->trb_address_map,
+- ring, first, last, flags);
++ ring, first, flags);
+ if (ret)
+ goto free_segments;
+ }
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -41,15 +41,15 @@ MODULE_PARM_DESC(quirks, "Bit flags for
+
+ static bool td_on_ring(struct xhci_td *td, struct xhci_ring *ring)
+ {
+- struct xhci_segment *seg = ring->first_seg;
++ struct xhci_segment *seg;
+
+ if (!td || !td->start_seg)
+ return false;
+- do {
++
++ xhci_for_each_ring_seg(ring->first_seg, seg) {
+ if (seg == td->start_seg)
+ return true;
+- seg = seg->next;
+- } while (seg && seg != ring->first_seg);
++ }
+
+ return false;
+ }
+@@ -764,14 +764,10 @@ static void xhci_clear_command_ring(stru
+ struct xhci_segment *seg;
+
+ ring = xhci->cmd_ring;
+- seg = ring->deq_seg;
+- do {
+- memset(seg->trbs, 0,
+- sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
+- seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+- cpu_to_le32(~TRB_CYCLE);
+- seg = seg->next;
+- } while (seg != ring->deq_seg);
++ xhci_for_each_ring_seg(ring->deq_seg, seg) {
++ memset(seg->trbs, 0, sizeof(union xhci_trb) * (TRBS_PER_SEGMENT - 1));
++ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
++ }
+
+ xhci_initialize_ring_info(ring, 1);
+ /*
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1263,6 +1263,9 @@ static inline const char *xhci_trb_type_
+ #define AVOID_BEI_INTERVAL_MIN 8
+ #define AVOID_BEI_INTERVAL_MAX 32
+
++#define xhci_for_each_ring_seg(head, seg) \
++ for (seg = head; seg != NULL; seg = (seg->next != head ? seg->next : NULL))
++
+ struct xhci_segment {
+ union xhci_trb *trbs;
+ /* private to HCD */
--- /dev/null
+From sashal@kernel.org Wed Sep 17 14:39:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 08:39:07 -0400
+Subject: usb: xhci: remove option to change a default ring's TRB cycle bit
+To: stable@vger.kernel.org
+Cc: Niklas Neronin <niklas.neronin@linux.intel.com>, Mathias Nyman <mathias.nyman@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250917123909.514131-2-sashal@kernel.org>
+
+From: Niklas Neronin <niklas.neronin@linux.intel.com>
+
+[ Upstream commit e1b0fa863907a61e86acc19ce2d0633941907c8e ]
+
+The TRB cycle bit indicates TRB ownership by the Host Controller (HC) or
+Host Controller Driver (HCD). New rings are initialized with 'cycle_state'
+equal to one, and all its TRBs' cycle bits are set to zero. When handling
+ring expansion, set the source ring cycle bits to the same value as the
+destination ring.
+
+Move the cycle bit setting from xhci_segment_alloc() to xhci_link_rings(),
+and remove the 'cycle_state' argument from xhci_initialize_ring_info().
+The xhci_segment_alloc() function uses kzalloc_node() to allocate segments,
+ensuring that all TRB cycle bits are initialized to zero.
+
+Signed-off-by: Niklas Neronin <niklas.neronin@linux.intel.com>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20241106101459.775897-12-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: a5c98e8b1398 ("xhci: dbc: Fix full DbC transfer ring after several reconnects")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 2 -
+ drivers/usb/host/xhci-mem.c | 50 ++++++++++++++++++-----------------------
+ drivers/usb/host/xhci.c | 2 -
+ drivers/usb/host/xhci.h | 6 +---
+ 4 files changed, 27 insertions(+), 33 deletions(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -471,7 +471,7 @@ xhci_dbc_ring_alloc(struct device *dev,
+ trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+ }
+ INIT_LIST_HEAD(&ring->td_list);
+- xhci_initialize_ring_info(ring, 1);
++ xhci_initialize_ring_info(ring);
+ return ring;
+ dma_fail:
+ kfree(seg);
+--- a/drivers/usb/host/xhci-mem.c
++++ b/drivers/usb/host/xhci-mem.c
+@@ -27,14 +27,12 @@
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+ static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci,
+- unsigned int cycle_state,
+ unsigned int max_packet,
+ unsigned int num,
+ gfp_t flags)
+ {
+ struct xhci_segment *seg;
+ dma_addr_t dma;
+- int i;
+ struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+
+ seg = kzalloc_node(sizeof(*seg), flags, dev_to_node(dev));
+@@ -56,11 +54,6 @@ static struct xhci_segment *xhci_segment
+ return NULL;
+ }
+ }
+- /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
+- if (cycle_state == 0) {
+- for (i = 0; i < TRBS_PER_SEGMENT; i++)
+- seg->trbs[i].link.control = cpu_to_le32(TRB_CYCLE);
+- }
+ seg->num = num;
+ seg->dma = dma;
+ seg->next = NULL;
+@@ -138,6 +131,14 @@ static void xhci_link_rings(struct xhci_
+
+ chain_links = xhci_link_chain_quirk(xhci, ring->type);
+
++ /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs */
++ if (ring->cycle_state == 0) {
++ xhci_for_each_ring_seg(ring->first_seg, seg) {
++ for (int i = 0; i < TRBS_PER_SEGMENT; i++)
++ seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
++ }
++ }
++
+ next = ring->enq_seg->next;
+ xhci_link_segments(ring->enq_seg, first, ring->type, chain_links);
+ xhci_link_segments(last, next, ring->type, chain_links);
+@@ -287,8 +288,7 @@ void xhci_ring_free(struct xhci_hcd *xhc
+ kfree(ring);
+ }
+
+-void xhci_initialize_ring_info(struct xhci_ring *ring,
+- unsigned int cycle_state)
++void xhci_initialize_ring_info(struct xhci_ring *ring)
+ {
+ /* The ring is empty, so the enqueue pointer == dequeue pointer */
+ ring->enqueue = ring->first_seg->trbs;
+@@ -302,7 +302,7 @@ void xhci_initialize_ring_info(struct xh
+ * New rings are initialized with cycle state equal to 1; if we are
+ * handling ring expansion, set the cycle state equal to the old ring.
+ */
+- ring->cycle_state = cycle_state;
++ ring->cycle_state = 1;
+
+ /*
+ * Each segment has a link TRB, and leave an extra TRB for SW
+@@ -317,7 +317,6 @@ static int xhci_alloc_segments_for_ring(
+ struct xhci_segment **first,
+ struct xhci_segment **last,
+ unsigned int num_segs,
+- unsigned int cycle_state,
+ enum xhci_ring_type type,
+ unsigned int max_packet,
+ gfp_t flags)
+@@ -328,7 +327,7 @@ static int xhci_alloc_segments_for_ring(
+
+ chain_links = xhci_link_chain_quirk(xhci, type);
+
+- prev = xhci_segment_alloc(xhci, cycle_state, max_packet, num, flags);
++ prev = xhci_segment_alloc(xhci, max_packet, num, flags);
+ if (!prev)
+ return -ENOMEM;
+ num++;
+@@ -337,8 +336,7 @@ static int xhci_alloc_segments_for_ring(
+ while (num < num_segs) {
+ struct xhci_segment *next;
+
+- next = xhci_segment_alloc(xhci, cycle_state, max_packet, num,
+- flags);
++ next = xhci_segment_alloc(xhci, max_packet, num, flags);
+ if (!next)
+ goto free_segments;
+
+@@ -363,9 +361,8 @@ free_segments:
+ * Set the end flag and the cycle toggle bit on the last segment.
+ * See section 4.9.1 and figures 15 and 16.
+ */
+-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+- unsigned int num_segs, unsigned int cycle_state,
+- enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
++struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
++ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags)
+ {
+ struct xhci_ring *ring;
+ int ret;
+@@ -383,7 +380,7 @@ struct xhci_ring *xhci_ring_alloc(struct
+ return ring;
+
+ ret = xhci_alloc_segments_for_ring(xhci, &ring->first_seg, &ring->last_seg, num_segs,
+- cycle_state, type, max_packet, flags);
++ type, max_packet, flags);
+ if (ret)
+ goto fail;
+
+@@ -393,7 +390,7 @@ struct xhci_ring *xhci_ring_alloc(struct
+ ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+ cpu_to_le32(LINK_TOGGLE);
+ }
+- xhci_initialize_ring_info(ring, cycle_state);
++ xhci_initialize_ring_info(ring);
+ trace_xhci_ring_alloc(ring);
+ return ring;
+
+@@ -421,8 +418,8 @@ int xhci_ring_expansion(struct xhci_hcd
+ struct xhci_segment *last;
+ int ret;
+
+- ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->cycle_state,
+- ring->type, ring->bounce_buf_len, flags);
++ ret = xhci_alloc_segments_for_ring(xhci, &first, &last, num_new_segs, ring->type,
++ ring->bounce_buf_len, flags);
+ if (ret)
+ return -ENOMEM;
+
+@@ -632,8 +629,7 @@ struct xhci_stream_info *xhci_alloc_stre
+
+ for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+ stream_info->stream_rings[cur_stream] =
+- xhci_ring_alloc(xhci, 2, 1, TYPE_STREAM, max_packet,
+- mem_flags);
++ xhci_ring_alloc(xhci, 2, TYPE_STREAM, max_packet, mem_flags);
+ cur_ring = stream_info->stream_rings[cur_stream];
+ if (!cur_ring)
+ goto cleanup_rings;
+@@ -974,7 +970,7 @@ int xhci_alloc_virt_device(struct xhci_h
+ }
+
+ /* Allocate endpoint 0 ring */
+- dev->eps[0].ring = xhci_ring_alloc(xhci, 2, 1, TYPE_CTRL, 0, flags);
++ dev->eps[0].ring = xhci_ring_alloc(xhci, 2, TYPE_CTRL, 0, flags);
+ if (!dev->eps[0].ring)
+ goto fail;
+
+@@ -1457,7 +1453,7 @@ int xhci_endpoint_init(struct xhci_hcd *
+
+ /* Set up the endpoint ring */
+ virt_dev->eps[ep_index].new_ring =
+- xhci_ring_alloc(xhci, 2, 1, ring_type, max_packet, mem_flags);
++ xhci_ring_alloc(xhci, 2, ring_type, max_packet, mem_flags);
+ if (!virt_dev->eps[ep_index].new_ring)
+ return -ENOMEM;
+
+@@ -2266,7 +2262,7 @@ xhci_alloc_interrupter(struct xhci_hcd *
+ if (!ir)
+ return NULL;
+
+- ir->event_ring = xhci_ring_alloc(xhci, segs, 1, TYPE_EVENT, 0, flags);
++ ir->event_ring = xhci_ring_alloc(xhci, segs, TYPE_EVENT, 0, flags);
+ if (!ir->event_ring) {
+ xhci_warn(xhci, "Failed to allocate interrupter event ring\n");
+ kfree(ir);
+@@ -2472,7 +2468,7 @@ int xhci_mem_init(struct xhci_hcd *xhci,
+ goto fail;
+
+ /* Set up the command ring to have one segments for now. */
+- xhci->cmd_ring = xhci_ring_alloc(xhci, 1, 1, TYPE_COMMAND, 0, flags);
++ xhci->cmd_ring = xhci_ring_alloc(xhci, 1, TYPE_COMMAND, 0, flags);
+ if (!xhci->cmd_ring)
+ goto fail;
+ xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -769,7 +769,7 @@ static void xhci_clear_command_ring(stru
+ seg->trbs[TRBS_PER_SEGMENT - 1].link.control &= cpu_to_le32(~TRB_CYCLE);
+ }
+
+- xhci_initialize_ring_info(ring, 1);
++ xhci_initialize_ring_info(ring);
+ /*
+ * Reset the hardware dequeue pointer.
+ * Yes, this will need to be re-written after resume, but we're paranoid
+--- a/drivers/usb/host/xhci.h
++++ b/drivers/usb/host/xhci.h
+@@ -1803,14 +1803,12 @@ void xhci_slot_copy(struct xhci_hcd *xhc
+ int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
+ struct usb_device *udev, struct usb_host_endpoint *ep,
+ gfp_t mem_flags);
+-struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
+- unsigned int num_segs, unsigned int cycle_state,
++struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, unsigned int num_segs,
+ enum xhci_ring_type type, unsigned int max_packet, gfp_t flags);
+ void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
+ int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
+ unsigned int num_trbs, gfp_t flags);
+-void xhci_initialize_ring_info(struct xhci_ring *ring,
+- unsigned int cycle_state);
++void xhci_initialize_ring_info(struct xhci_ring *ring);
+ void xhci_free_endpoint_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_device *virt_dev,
+ unsigned int ep_index);
--- /dev/null
+From 0dd765fae295832934bf28e45dd5a355e0891ed4 Mon Sep 17 00:00:00 2001
+From: Sankararaman Jayaraman <sankararaman.jayaraman@broadcom.com>
+Date: Thu, 20 Mar 2025 10:25:22 +0530
+Subject: vmxnet3: unregister xdp rxq info in the reset path
+
+From: Sankararaman Jayaraman <sankararaman.jayaraman@broadcom.com>
+
+commit 0dd765fae295832934bf28e45dd5a355e0891ed4 upstream.
+
+vmxnet3 does not unregister xdp rxq info in the
+vmxnet3_reset_work() code path as vmxnet3_rq_destroy()
+is not invoked in this code path. So, we get below message with a
+backtrace.
+
+Missing unregister, handled but fix driver
+WARNING: CPU:48 PID: 500 at net/core/xdp.c:182
+__xdp_rxq_info_reg+0x93/0xf0
+
+This patch fixes the problem by moving the unregister
+code of XDP from vmxnet3_rq_destroy() to vmxnet3_rq_cleanup().
+
+Fixes: 54f00cce1178 ("vmxnet3: Add XDP support.")
+Signed-off-by: Sankararaman Jayaraman <sankararaman.jayaraman@broadcom.com>
+Signed-off-by: Ronak Doshi <ronak.doshi@broadcom.com>
+Link: https://patch.msgid.link/20250320045522.57892-1-sankararaman.jayaraman@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Ajay: Modified to apply on v6.6, v6.12 ]
+Signed-off-by: Ajay Kaher <ajay.kaher@broadcom.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/vmxnet3/vmxnet3_drv.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/vmxnet3/vmxnet3_drv.c
++++ b/drivers/net/vmxnet3/vmxnet3_drv.c
+@@ -2051,6 +2051,11 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_que
+
+ rq->comp_ring.gen = VMXNET3_INIT_GEN;
+ rq->comp_ring.next2proc = 0;
++
++ if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
++ xdp_rxq_info_unreg(&rq->xdp_rxq);
++ page_pool_destroy(rq->page_pool);
++ rq->page_pool = NULL;
+ }
+
+
+@@ -2091,11 +2096,6 @@ static void vmxnet3_rq_destroy(struct vm
+ }
+ }
+
+- if (xdp_rxq_info_is_reg(&rq->xdp_rxq))
+- xdp_rxq_info_unreg(&rq->xdp_rxq);
+- page_pool_destroy(rq->page_pool);
+- rq->page_pool = NULL;
+-
+ if (rq->data_ring.base) {
+ dma_free_coherent(&adapter->pdev->dev,
+ rq->rx_ring[0].size * rq->data_ring.desc_size,
--- /dev/null
+From 877818802c3e970f67ccb53012facc78bef5f97a Mon Sep 17 00:00:00 2001
+From: "Borislav Petkov (AMD)" <bp@alien8.de>
+Date: Mon, 11 Nov 2024 17:22:08 +0100
+Subject: x86/bugs: Add SRSO_USER_KERNEL_NO support
+
+From: Borislav Petkov (AMD) <bp@alien8.de>
+
+commit 877818802c3e970f67ccb53012facc78bef5f97a upstream.
+
+If the machine has:
+
+ CPUID Fn8000_0021_EAX[30] (SRSO_USER_KERNEL_NO) -- If this bit is 1,
+ it indicates the CPU is not subject to the SRSO vulnerability across
+ user/kernel boundaries.
+
+have it fall back to IBPB on VMEXIT only, in the case it is going to run
+VMs:
+
+ Speculative Return Stack Overflow: Mitigation: IBPB on VMEXIT only
+
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Reviewed-by: Nikolay Borisov <nik.borisov@suse.com>
+Link: https://lore.kernel.org/r/20241202120416.6054-2-bp@kernel.org
+[ Harshit: Conflicts resolved as this commit: 7c62c442b6eb ("x86/vmscape:
+ Enumerate VMSCAPE bug") has been applied already to 6.12.y ]
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/include/asm/cpufeatures.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 4 ++++
+ 2 files changed, 5 insertions(+)
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -464,6 +464,7 @@
+ #define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
+ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
++#define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
+
+ /*
+ * Extended auxiliary flags: Linux defined - for features scattered in various
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2810,6 +2810,9 @@ static void __init srso_select_mitigatio
+ break;
+
+ case SRSO_CMD_SAFE_RET:
++ if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
++ goto ibpb_on_vmexit;
++
+ if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
+ /*
+ * Enable the return thunk for generated code
+@@ -2861,6 +2864,7 @@ static void __init srso_select_mitigatio
+ }
+ break;
+
++ibpb_on_vmexit:
+ case SRSO_CMD_IBPB_ON_VMEXIT:
+ if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
+ if (has_microcode) {
--- /dev/null
+From 8442df2b49ed9bcd67833ad4f091d15ac91efd00 Mon Sep 17 00:00:00 2001
+From: Borislav Petkov <bp@alien8.de>
+Date: Tue, 18 Feb 2025 12:13:33 +0100
+Subject: x86/bugs: KVM: Add support for SRSO_MSR_FIX
+
+From: Borislav Petkov <bp@alien8.de>
+
+commit 8442df2b49ed9bcd67833ad4f091d15ac91efd00 upstream.
+
+Add support for
+
+ CPUID Fn8000_0021_EAX[31] (SRSO_MSR_FIX). If this bit is 1, it
+ indicates that software may use MSR BP_CFG[BpSpecReduce] to mitigate
+ SRSO.
+
+Enable BpSpecReduce to mitigate SRSO across guest/host boundaries.
+
+Switch back to enabling the bit when virtualization is enabled and to
+clear the bit when virtualization is disabled because using a MSR slot
+would clear the bit when the guest is exited and any training the guest
+has done, would potentially influence the host kernel when execution
+enters the kernel and hasn't VMRUN the guest yet.
+
+More detail on the public thread in Link below.
+
+Co-developed-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Sean Christopherson <seanjc@google.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/r/20241202120416.6054-1-bp@kernel.org
+Signed-off-by: Harshit Mogalapalli <harshit.m.mogalapalli@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Documentation/admin-guide/hw-vuln/srso.rst | 13 +++++++++++++
+ arch/x86/include/asm/cpufeatures.h | 4 ++++
+ arch/x86/include/asm/msr-index.h | 1 +
+ arch/x86/kernel/cpu/bugs.c | 24 ++++++++++++++++++++----
+ arch/x86/kvm/svm/svm.c | 6 ++++++
+ arch/x86/lib/msr.c | 2 ++
+ 6 files changed, 46 insertions(+), 4 deletions(-)
+
+--- a/Documentation/admin-guide/hw-vuln/srso.rst
++++ b/Documentation/admin-guide/hw-vuln/srso.rst
+@@ -104,7 +104,20 @@ The possible values in this file are:
+
+ (spec_rstack_overflow=ibpb-vmexit)
+
++ * 'Mitigation: Reduced Speculation':
+
++ This mitigation gets automatically enabled when the above one "IBPB on
++ VMEXIT" has been selected and the CPU supports the BpSpecReduce bit.
++
++ It gets automatically enabled on machines which have the
++ SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch
++ to the above =ibpb-vmexit mitigation because the user/kernel boundary is
++ not affected anymore and thus "safe RET" is not needed.
++
++ After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit
++ is detected (functionality present on all such machines) and that
++ practically overrides IBPB on VMEXIT as it has a lot less performance
++ impact and takes care of the guest->host attack vector too.
+
+ In order to exploit vulnerability, an attacker needs to:
+
+--- a/arch/x86/include/asm/cpufeatures.h
++++ b/arch/x86/include/asm/cpufeatures.h
+@@ -465,6 +465,10 @@
+ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
+ #define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
+ #define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
++#define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /*
++ * BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs.
++ * (SRSO_MSR_FIX in the official doc).
++ */
+
+ /*
+ * Extended auxiliary flags: Linux defined - for features scattered in various
+--- a/arch/x86/include/asm/msr-index.h
++++ b/arch/x86/include/asm/msr-index.h
+@@ -728,6 +728,7 @@
+
+ /* Zen4 */
+ #define MSR_ZEN4_BP_CFG 0xc001102e
++#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
+ #define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
+
+ /* Fam 19h MSRs */
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -2718,6 +2718,7 @@ enum srso_mitigation {
+ SRSO_MITIGATION_SAFE_RET,
+ SRSO_MITIGATION_IBPB,
+ SRSO_MITIGATION_IBPB_ON_VMEXIT,
++ SRSO_MITIGATION_BP_SPEC_REDUCE,
+ };
+
+ enum srso_mitigation_cmd {
+@@ -2735,7 +2736,8 @@ static const char * const srso_strings[]
+ [SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
+ [SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
+ [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
+- [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
++ [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
++ [SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
+ };
+
+ static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
+@@ -2774,7 +2776,7 @@ static void __init srso_select_mitigatio
+ srso_cmd == SRSO_CMD_OFF) {
+ if (boot_cpu_has(X86_FEATURE_SBPB))
+ x86_pred_cmd = PRED_CMD_SBPB;
+- return;
++ goto out;
+ }
+
+ if (has_microcode) {
+@@ -2786,7 +2788,7 @@ static void __init srso_select_mitigatio
+ */
+ if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+- return;
++ goto out;
+ }
+
+ if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
+@@ -2866,6 +2868,12 @@ static void __init srso_select_mitigatio
+
+ ibpb_on_vmexit:
+ case SRSO_CMD_IBPB_ON_VMEXIT:
++ if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
++ pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
++ srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
++ break;
++ }
++
+ if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
+ if (has_microcode) {
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+@@ -2887,7 +2895,15 @@ ibpb_on_vmexit:
+ }
+
+ out:
+- pr_info("%s\n", srso_strings[srso_mitigation]);
++ /*
++ * Clear the feature flag if this mitigation is not selected as that
++ * feature flag controls the BpSpecReduce MSR bit toggling in KVM.
++ */
++ if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
++ setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
++
++ if (srso_mitigation != SRSO_MITIGATION_NONE)
++ pr_info("%s\n", srso_strings[srso_mitigation]);
+ }
+
+ #undef pr_fmt
+--- a/arch/x86/kvm/svm/svm.c
++++ b/arch/x86/kvm/svm/svm.c
+@@ -608,6 +608,9 @@ static void svm_disable_virtualization_c
+ kvm_cpu_svm_disable();
+
+ amd_pmu_disable_virt();
++
++ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
++ msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
+ }
+
+ static int svm_enable_virtualization_cpu(void)
+@@ -685,6 +688,9 @@ static int svm_enable_virtualization_cpu
+ rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
+ }
+
++ if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
++ msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
++
+ return 0;
+ }
+
+--- a/arch/x86/lib/msr.c
++++ b/arch/x86/lib/msr.c
+@@ -103,6 +103,7 @@ int msr_set_bit(u32 msr, u8 bit)
+ {
+ return __flip_bit(msr, bit, true);
+ }
++EXPORT_SYMBOL_GPL(msr_set_bit);
+
+ /**
+ * msr_clear_bit - Clear @bit in a MSR @msr.
+@@ -118,6 +119,7 @@ int msr_clear_bit(u32 msr, u8 bit)
+ {
+ return __flip_bit(msr, bit, false);
+ }
++EXPORT_SYMBOL_GPL(msr_clear_bit);
+
+ #ifdef CONFIG_TRACEPOINTS
+ void do_trace_write_msr(unsigned int msr, u64 val, int failed)
--- /dev/null
+From sashal@kernel.org Wed Sep 17 14:39:14 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 08:39:08 -0400
+Subject: xhci: dbc: decouple endpoint allocation from initialization
+To: stable@vger.kernel.org
+Cc: Mathias Nyman <mathias.nyman@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250917123909.514131-3-sashal@kernel.org>
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+[ Upstream commit 220a0ffde02f962c13bc752b01aa570b8c65a37b ]
+
+Decouple allocation of endpoint ring buffer from initialization
+of the buffer, and initialization of endpoint context parts from
+from the rest of the contexts.
+
+It allows driver to clear up and reinitialize endpoint rings
+after disconnect without reallocating everything.
+
+This is a prerequisite for the next patch that prevents the transfer
+ring from filling up with cancelled (no-op) TRBs if a debug cable is
+reconnected several times without transferring anything.
+
+Cc: stable@vger.kernel.org
+Fixes: dfba2174dc42 ("usb: xhci: Add DbC support in xHCI driver")
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20250902105306.877476-2-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: a5c98e8b1398 ("xhci: dbc: Fix full DbC transfer ring after several reconnects")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 71 ++++++++++++++++++++++++++---------------
+ 1 file changed, 46 insertions(+), 25 deletions(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -101,13 +101,34 @@ static u32 xhci_dbc_populate_strings(str
+ return string_length;
+ }
+
++static void xhci_dbc_init_ep_contexts(struct xhci_dbc *dbc)
++{
++ struct xhci_ep_ctx *ep_ctx;
++ unsigned int max_burst;
++ dma_addr_t deq;
++
++ max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
++
++ /* Populate bulk out endpoint context: */
++ ep_ctx = dbc_bulkout_ctx(dbc);
++ deq = dbc_bulkout_enq(dbc);
++ ep_ctx->ep_info = 0;
++ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
++ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
++
++ /* Populate bulk in endpoint context: */
++ ep_ctx = dbc_bulkin_ctx(dbc);
++ deq = dbc_bulkin_enq(dbc);
++ ep_ctx->ep_info = 0;
++ ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
++ ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
++}
++
+ static void xhci_dbc_init_contexts(struct xhci_dbc *dbc, u32 string_length)
+ {
+ struct dbc_info_context *info;
+- struct xhci_ep_ctx *ep_ctx;
+ u32 dev_info;
+- dma_addr_t deq, dma;
+- unsigned int max_burst;
++ dma_addr_t dma;
+
+ if (!dbc)
+ return;
+@@ -121,20 +142,8 @@ static void xhci_dbc_init_contexts(struc
+ info->serial = cpu_to_le64(dma + DBC_MAX_STRING_LENGTH * 3);
+ info->length = cpu_to_le32(string_length);
+
+- /* Populate bulk out endpoint context: */
+- ep_ctx = dbc_bulkout_ctx(dbc);
+- max_burst = DBC_CTRL_MAXBURST(readl(&dbc->regs->control));
+- deq = dbc_bulkout_enq(dbc);
+- ep_ctx->ep_info = 0;
+- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_OUT_EP, 1024, max_burst);
+- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_out->cycle_state);
+-
+- /* Populate bulk in endpoint context: */
+- ep_ctx = dbc_bulkin_ctx(dbc);
+- deq = dbc_bulkin_enq(dbc);
+- ep_ctx->ep_info = 0;
+- ep_ctx->ep_info2 = dbc_epctx_info2(BULK_IN_EP, 1024, max_burst);
+- ep_ctx->deq = cpu_to_le64(deq | dbc->ring_in->cycle_state);
++ /* Populate bulk in and out endpoint contexts: */
++ xhci_dbc_init_ep_contexts(dbc);
+
+ /* Set DbC context and info registers: */
+ lo_hi_writeq(dbc->ctx->dma, &dbc->regs->dccp);
+@@ -435,6 +444,23 @@ dbc_alloc_ctx(struct device *dev, gfp_t
+ return ctx;
+ }
+
++static void xhci_dbc_ring_init(struct xhci_ring *ring)
++{
++ struct xhci_segment *seg = ring->first_seg;
++
++ /* clear all trbs on ring in case of old ring */
++ memset(seg->trbs, 0, TRB_SEGMENT_SIZE);
++
++ /* Only event ring does not use link TRB */
++ if (ring->type != TYPE_EVENT) {
++ union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
++
++ trb->link.segment_ptr = cpu_to_le64(ring->first_seg->dma);
++ trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
++ }
++ xhci_initialize_ring_info(ring);
++}
++
+ static struct xhci_ring *
+ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
+ {
+@@ -463,15 +489,10 @@ xhci_dbc_ring_alloc(struct device *dev,
+
+ seg->dma = dma;
+
+- /* Only event ring does not use link TRB */
+- if (type != TYPE_EVENT) {
+- union xhci_trb *trb = &seg->trbs[TRBS_PER_SEGMENT - 1];
+-
+- trb->link.segment_ptr = cpu_to_le64(dma);
+- trb->link.control = cpu_to_le32(LINK_TOGGLE | TRB_TYPE(TRB_LINK));
+- }
+ INIT_LIST_HEAD(&ring->td_list);
+- xhci_initialize_ring_info(ring);
++
++ xhci_dbc_ring_init(ring);
++
+ return ring;
+ dma_fail:
+ kfree(seg);
--- /dev/null
+From sashal@kernel.org Wed Sep 17 14:39:15 2025
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 08:39:09 -0400
+Subject: xhci: dbc: Fix full DbC transfer ring after several reconnects
+To: stable@vger.kernel.org
+Cc: Mathias Nyman <mathias.nyman@linux.intel.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20250917123909.514131-4-sashal@kernel.org>
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+[ Upstream commit a5c98e8b1398534ae1feb6e95e2d3ee5215538ed ]
+
+Pending requests will be flushed on disconnect, and the corresponding
+TRBs will be turned into No-op TRBs, which are ignored by the xHC
+controller once it starts processing the ring.
+
+If the USB debug cable repeatedly disconnects before ring is started
+then the ring will eventually be filled with No-op TRBs.
+No new transfers can be queued when the ring is full, and driver will
+print the following error message:
+
+ "xhci_hcd 0000:00:14.0: failed to queue trbs"
+
+This is a normal case for 'in' transfers where TRBs are always enqueued
+in advance, ready to take on incoming data. If no data arrives, and
+device is disconnected, then ring dequeue will remain at beginning of
+the ring while enqueue points to first free TRB after last cancelled
+No-op TRB.
+s
+Solve this by reinitializing the rings when the debug cable disconnects
+and DbC is leaving the configured state.
+Clear the whole ring buffer and set enqueue and dequeue to the beginning
+of ring, and set cycle bit to its initial state.
+
+Cc: stable@vger.kernel.org
+Fixes: dfba2174dc42 ("usb: xhci: Add DbC support in xHCI driver")
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Link: https://lore.kernel.org/r/20250902105306.877476-3-mathias.nyman@linux.intel.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/host/xhci-dbgcap.c | 23 +++++++++++++++++++++--
+ 1 file changed, 21 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/host/xhci-dbgcap.c
++++ b/drivers/usb/host/xhci-dbgcap.c
+@@ -461,6 +461,25 @@ static void xhci_dbc_ring_init(struct xh
+ xhci_initialize_ring_info(ring);
+ }
+
++static int xhci_dbc_reinit_ep_rings(struct xhci_dbc *dbc)
++{
++ struct xhci_ring *in_ring = dbc->eps[BULK_IN].ring;
++ struct xhci_ring *out_ring = dbc->eps[BULK_OUT].ring;
++
++ if (!in_ring || !out_ring || !dbc->ctx) {
++ dev_warn(dbc->dev, "Can't re-init unallocated endpoints\n");
++ return -ENODEV;
++ }
++
++ xhci_dbc_ring_init(in_ring);
++ xhci_dbc_ring_init(out_ring);
++
++ /* set ep context enqueue, dequeue, and cycle to initial values */
++ xhci_dbc_init_ep_contexts(dbc);
++
++ return 0;
++}
++
+ static struct xhci_ring *
+ xhci_dbc_ring_alloc(struct device *dev, enum xhci_ring_type type, gfp_t flags)
+ {
+@@ -884,7 +903,7 @@ static enum evtreturn xhci_dbc_do_handle
+ dev_info(dbc->dev, "DbC cable unplugged\n");
+ dbc->state = DS_ENABLED;
+ xhci_dbc_flush_requests(dbc);
+-
++ xhci_dbc_reinit_ep_rings(dbc);
+ return EVT_DISC;
+ }
+
+@@ -894,7 +913,7 @@ static enum evtreturn xhci_dbc_do_handle
+ writel(portsc, &dbc->regs->portsc);
+ dbc->state = DS_ENABLED;
+ xhci_dbc_flush_requests(dbc);
+-
++ xhci_dbc_reinit_ep_rings(dbc);
+ return EVT_DISC;
+ }
+