--- /dev/null
+From e18190b7e97e9db6546390e6e0ceddae606892b2 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Mon, 15 Sep 2025 20:15:49 -0700
+Subject: mm/damon/lru_sort: use param_ctx for damon_attrs staging
+
+From: SeongJae Park <sj@kernel.org>
+
+commit e18190b7e97e9db6546390e6e0ceddae606892b2 upstream.
+
+damon_lru_sort_apply_parameters() allocates a new DAMON context, stages
+user-specified DAMON parameters on it, and commits to running DAMON
+context at once, using damon_commit_ctx(). The code is, however, directly
+updating the monitoring attributes of the running context. And the
+attributes are over-written by later damon_commit_ctx() call. This means
+that the monitoring attributes parameters are not really working. Fix the
+wrong use of the parameter context.
+
+Link: https://lkml.kernel.org/r/20250916031549.115326-1-sj@kernel.org
+Fixes: a30969436428 ("mm/damon/lru_sort: use damon_commit_ctx()")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reviewed-by: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: <stable@vger.kernel.org> [6.11+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/lru_sort.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -203,7 +203,7 @@ static int damon_lru_sort_apply_paramete
+ goto out;
+ }
+
+- err = damon_set_attrs(ctx, &damon_lru_sort_mon_attrs);
++ err = damon_set_attrs(param_ctx, &damon_lru_sort_mon_attrs);
+ if (err)
+ goto out;
+
--- /dev/null
+From b93af2cc8e036754c0d9970d9ddc47f43cc94b9f Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Mon, 29 Sep 2025 17:44:09 -0700
+Subject: mm/damon/vaddr: do not repeat pte_offset_map_lock() until success
+
+From: SeongJae Park <sj@kernel.org>
+
+commit b93af2cc8e036754c0d9970d9ddc47f43cc94b9f upstream.
+
+DAMON's virtual address space operation set implementation (vaddr) calls
+pte_offset_map_lock() inside the page table walk callback function. This
+is for reading and writing page table accessed bits. If
+pte_offset_map_lock() fails, it retries by returning the page table walk
+callback function with ACTION_AGAIN.
+
+pte_offset_map_lock() can continuously fail if the target is a pmd
+migration entry, though. Hence it could cause an infinite page table walk
+if the migration cannot be done until the page table walk is finished.
+This indeed caused a soft lockup when CPU hotplugging and DAMON were
+running in parallel.
+
+Avoid the infinite loop by simply not retrying the page table walk. DAMON
+is promising only a best-effort accuracy, so missing access to such pages
+is no problem.
+
+Link: https://lkml.kernel.org/r/20250930004410.55228-1-sj@kernel.org
+Fixes: 7780d04046a2 ("mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Reported-by: Xinyu Zheng <zhengxinyu6@huawei.com>
+Closes: https://lore.kernel.org/20250918030029.2652607-1-zhengxinyu6@huawei.com
+Acked-by: Hugh Dickins <hughd@google.com>
+Cc: <stable@vger.kernel.org> [6.5+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/vaddr.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/mm/damon/vaddr.c
++++ b/mm/damon/vaddr.c
+@@ -328,10 +328,8 @@ static int damon_mkold_pmd_entry(pmd_t *
+ }
+
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+- if (!pte) {
+- walk->action = ACTION_AGAIN;
++ if (!pte)
+ return 0;
+- }
+ if (!pte_present(ptep_get(pte)))
+ goto out;
+ damon_ptep_mkold(pte, walk->vma, addr);
+@@ -481,10 +479,8 @@ regular_page:
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+ pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+- if (!pte) {
+- walk->action = ACTION_AGAIN;
++ if (!pte)
+ return 0;
+- }
+ ptent = ptep_get(pte);
+ if (!pte_present(ptent))
+ goto out;
--- /dev/null
+From b322e88b3d553e85b4e15779491c70022783faa4 Mon Sep 17 00:00:00 2001
+From: Li RongQing <lirongqing@baidu.com>
+Date: Thu, 14 Aug 2025 18:23:33 +0800
+Subject: mm/hugetlb: early exit from hugetlb_pages_alloc_boot() when max_huge_pages=0
+
+From: Li RongQing <lirongqing@baidu.com>
+
+commit b322e88b3d553e85b4e15779491c70022783faa4 upstream.
+
+Optimize hugetlb_pages_alloc_boot() to return immediately when
+max_huge_pages is 0, avoiding unnecessary CPU cycles and the below log
+message when hugepages aren't configured in the kernel command line.
+[ 3.702280] HugeTLB: allocation took 0ms with hugepage_allocation_threads=32
+
+Link: https://lkml.kernel.org/r/20250814102333.4428-1-lirongqing@baidu.com
+Signed-off-by: Li RongQing <lirongqing@baidu.com>
+Reviewed-by: Dev Jain <dev.jain@arm.com>
+Tested-by: Dev Jain <dev.jain@arm.com>
+Reviewed-by: Jane Chu <jane.chu@oracle.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Muchun Song <muchun.song@linux.dev>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/hugetlb.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -3654,6 +3654,9 @@ static void __init hugetlb_hstate_alloc_
+ return;
+ }
+
++ if (!h->max_huge_pages)
++ return;
++
+ /* do node specific alloc */
+ if (hugetlb_hstate_alloc_pages_specific_nodes(h))
+ return;
--- /dev/null
+From 6a204d4b14c99232e05d35305c27ebce1c009840 Mon Sep 17 00:00:00 2001
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Date: Thu, 14 Aug 2025 14:22:45 -0300
+Subject: mm/page_alloc: only set ALLOC_HIGHATOMIC for __GPF_HIGH allocations
+
+From: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+
+commit 6a204d4b14c99232e05d35305c27ebce1c009840 upstream.
+
+Commit 524c48072e56 ("mm/page_alloc: rename ALLOC_HIGH to
+ALLOC_MIN_RESERVE") is the start of a series that explains how __GFP_HIGH,
+which implies ALLOC_MIN_RESERVE, is going to be used instead of
+__GFP_ATOMIC for high atomic reserves.
+
+Commit eb2e2b425c69 ("mm/page_alloc: explicitly record high-order atomic
+allocations in alloc_flags") introduced ALLOC_HIGHATOMIC for such
+allocations of order higher than 0. It still used __GFP_ATOMIC, though.
+
+Then, commit 1ebbb21811b7 ("mm/page_alloc: explicitly define how
+__GFP_HIGH non-blocking allocations accesses reserves") just turned that
+check for !__GFP_DIRECT_RECLAIM, ignoring that high atomic reserves were
+expected to test for __GFP_HIGH.
+
+This leads to high atomic reserves being added for high-order GFP_NOWAIT
+allocations and others that clear __GFP_DIRECT_RECLAIM, which is
+unexpected. Later, those reserves lead to 0-order allocations going to
+the slow path and starting reclaim.
+
+From /proc/pagetypeinfo, without the patch:
+
+Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
+Node 0, zone DMA32, type HighAtomic 1 8 10 9 7 3 0 0 0 0 0
+Node 0, zone Normal, type HighAtomic 64 20 12 5 0 0 0 0 0 0 0
+
+With the patch:
+
+Node 0, zone DMA, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
+Node 0, zone DMA32, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
+Node 0, zone Normal, type HighAtomic 0 0 0 0 0 0 0 0 0 0 0
+
+Link: https://lkml.kernel.org/r/20250814172245.1259625-1-cascardo@igalia.com
+Fixes: 1ebbb21811b7 ("mm/page_alloc: explicitly define how __GFP_HIGH non-blocking allocations accesses reserves")
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@igalia.com>
+Tested-by: Helen Koike <koike@igalia.com>
+Reviewed-by: Vlastimil Babka <vbabka@suse.cz>
+Tested-by: Sergey Senozhatsky <senozhatsky@chromium.org>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: Mel Gorman <mgorman@techsingularity.net>
+Cc: Matthew Wilcox <willy@infradead.org>
+Cc: NeilBrown <neilb@suse.de>
+Cc: Thierry Reding <thierry.reding@gmail.com>
+Cc: Brendan Jackman <jackmanb@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -4408,7 +4408,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsig
+ if (!(gfp_mask & __GFP_NOMEMALLOC)) {
+ alloc_flags |= ALLOC_NON_BLOCK;
+
+- if (order > 0)
++ if (order > 0 && (alloc_flags & ALLOC_MIN_RESERVE))
+ alloc_flags |= ALLOC_HIGHATOMIC;
+ }
+
--- /dev/null
+From 9658d698a8a83540bf6a6c80d13c9a61590ee985 Mon Sep 17 00:00:00 2001
+From: Lance Yang <lance.yang@linux.dev>
+Date: Tue, 30 Sep 2025 16:10:40 +0800
+Subject: mm/rmap: fix soft-dirty and uffd-wp bit loss when remapping zero-filled mTHP subpage to shared zeropage
+
+From: Lance Yang <lance.yang@linux.dev>
+
+commit 9658d698a8a83540bf6a6c80d13c9a61590ee985 upstream.
+
+When splitting an mTHP and replacing a zero-filled subpage with the shared
+zeropage, try_to_map_unused_to_zeropage() currently drops several
+important PTE bits.
+
+For userspace tools like CRIU, which rely on the soft-dirty mechanism for
+incremental snapshots, losing the soft-dirty bit means modified pages are
+missed, leading to inconsistent memory state after restore.
+
+As pointed out by David, the more critical uffd-wp bit is also dropped.
+This breaks the userfaultfd write-protection mechanism, causing writes to
+be silently missed by monitoring applications, which can lead to data
+corruption.
+
+Preserve both the soft-dirty and uffd-wp bits from the old PTE when
+creating the new zeropage mapping to ensure they are correctly tracked.
+
+Link: https://lkml.kernel.org/r/20250930081040.80926-1-lance.yang@linux.dev
+Fixes: b1f202060afe ("mm: remap unused subpages to shared zeropage when splitting isolated thp")
+Signed-off-by: Lance Yang <lance.yang@linux.dev>
+Suggested-by: David Hildenbrand <david@redhat.com>
+Suggested-by: Dev Jain <dev.jain@arm.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Dev Jain <dev.jain@arm.com>
+Acked-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Reviewed-by: Harry Yoo <harry.yoo@oracle.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Mariano Pache <npache@redhat.com>
+Cc: Mathew Brost <matthew.brost@intel.com>
+Cc: Peter Xu <peterx@redhat.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Usama Arif <usamaarif642@gmail.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: Yu Zhao <yuzhao@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/migrate.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -297,8 +297,7 @@ bool isolate_folio_to_list(struct folio
+ }
+
+ static bool try_to_map_unused_to_zeropage(struct page_vma_mapped_walk *pvmw,
+- struct folio *folio,
+- unsigned long idx)
++ struct folio *folio, pte_t old_pte, unsigned long idx)
+ {
+ struct page *page = folio_page(folio, idx);
+ pte_t newpte;
+@@ -307,7 +306,7 @@ static bool try_to_map_unused_to_zeropag
+ return false;
+ VM_BUG_ON_PAGE(!PageAnon(page), page);
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+- VM_BUG_ON_PAGE(pte_present(ptep_get(pvmw->pte)), page);
++ VM_BUG_ON_PAGE(pte_present(old_pte), page);
+
+ if (folio_test_mlocked(folio) || (pvmw->vma->vm_flags & VM_LOCKED) ||
+ mm_forbids_zeropage(pvmw->vma->vm_mm))
+@@ -323,6 +322,12 @@ static bool try_to_map_unused_to_zeropag
+
+ newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
+ pvmw->vma->vm_page_prot));
++
++ if (pte_swp_soft_dirty(old_pte))
++ newpte = pte_mksoft_dirty(newpte);
++ if (pte_swp_uffd_wp(old_pte))
++ newpte = pte_mkuffd_wp(newpte);
++
+ set_pte_at(pvmw->vma->vm_mm, pvmw->address, pvmw->pte, newpte);
+
+ dec_mm_counter(pvmw->vma->vm_mm, mm_counter(folio));
+@@ -365,13 +370,13 @@ static bool remove_migration_pte(struct
+ continue;
+ }
+ #endif
++ old_pte = ptep_get(pvmw.pte);
+ if (rmap_walk_arg->map_unused_to_zeropage &&
+- try_to_map_unused_to_zeropage(&pvmw, folio, idx))
++ try_to_map_unused_to_zeropage(&pvmw, folio, old_pte, idx))
+ continue;
+
+ folio_get(folio);
+ pte = mk_pte(new, READ_ONCE(vma->vm_page_prot));
+- old_pte = ptep_get(pvmw.pte);
+
+ entry = pte_to_swp_entry(old_pte);
+ if (!is_migration_entry_young(entry))
--- /dev/null
+From 1ce6473d17e78e3cb9a40147658231731a551828 Mon Sep 17 00:00:00 2001
+From: Lance Yang <lance.yang@linux.dev>
+Date: Mon, 22 Sep 2025 10:14:58 +0800
+Subject: mm/thp: fix MTE tag mismatch when replacing zero-filled subpages
+
+From: Lance Yang <lance.yang@linux.dev>
+
+commit 1ce6473d17e78e3cb9a40147658231731a551828 upstream.
+
+When both THP and MTE are enabled, splitting a THP and replacing its
+zero-filled subpages with the shared zeropage can cause MTE tag mismatch
+faults in userspace.
+
+Remapping zero-filled subpages to the shared zeropage is unsafe, as the
+zeropage has a fixed tag of zero, which may not match the tag expected by
+the userspace pointer.
+
+KSM already avoids this problem by using memcmp_pages(), which on arm64
+intentionally reports MTE-tagged pages as non-identical to prevent unsafe
+merging.
+
+As suggested by David[1], this patch adopts the same pattern, replacing the
+memchr_inv() byte-level check with a call to pages_identical(). This
+leverages existing architecture-specific logic to determine if a page is
+truly identical to the shared zeropage.
+
+Having both the THP shrinker and KSM rely on pages_identical() makes the
+design more future-proof, IMO. Instead of handling quirks in generic code,
+we just let the architecture decide what makes two pages identical.
+
+[1] https://lore.kernel.org/all/ca2106a3-4bb2-4457-81af-301fd99fbef4@redhat.com
+
+Link: https://lkml.kernel.org/r/20250922021458.68123-1-lance.yang@linux.dev
+Fixes: b1f202060afe ("mm: remap unused subpages to shared zeropage when splitting isolated thp")
+Signed-off-by: Lance Yang <lance.yang@linux.dev>
+Reported-by: Qun-wei Lin <Qun-wei.Lin@mediatek.com>
+Closes: https://lore.kernel.org/all/a7944523fcc3634607691c35311a5d59d1a3f8d4.camel@mediatek.com
+Suggested-by: David Hildenbrand <david@redhat.com>
+Acked-by: Zi Yan <ziy@nvidia.com>
+Acked-by: David Hildenbrand <david@redhat.com>
+Acked-by: Usama Arif <usamaarif642@gmail.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Cc: Alistair Popple <apopple@nvidia.com>
+Cc: andrew.yang <andrew.yang@mediatek.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Byungchul Park <byungchul@sk.com>
+Cc: Charlie Jenkins <charlie@rivosinc.com>
+Cc: Chinwen Chang <chinwen.chang@mediatek.com>
+Cc: Dev Jain <dev.jain@arm.com>
+Cc: Domenico Cerasuolo <cerasuolodomenico@gmail.com>
+Cc: Gregory Price <gourry@gourry.net>
+Cc: "Huang, Ying" <ying.huang@linux.alibaba.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Joshua Hahn <joshua.hahnjy@gmail.com>
+Cc: Kairui Song <ryncsn@gmail.com>
+Cc: Kalesh Singh <kaleshsingh@google.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Mariano Pache <npache@redhat.com>
+Cc: Mathew Brost <matthew.brost@intel.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Mike Rapoport <rppt@kernel.org>
+Cc: Palmer Dabbelt <palmer@rivosinc.com>
+Cc: Rakie Kim <rakie.kim@sk.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Roman Gushchin <roman.gushchin@linux.dev>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Samuel Holland <samuel.holland@sifive.com>
+Cc: Shakeel Butt <shakeel.butt@linux.dev>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Yu Zhao <yuzhao@google.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c | 15 +++------------
+ mm/migrate.c | 8 +-------
+ 2 files changed, 4 insertions(+), 19 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -4115,32 +4115,23 @@ static unsigned long deferred_split_coun
+ static bool thp_underused(struct folio *folio)
+ {
+ int num_zero_pages = 0, num_filled_pages = 0;
+- void *kaddr;
+ int i;
+
+ if (khugepaged_max_ptes_none == HPAGE_PMD_NR - 1)
+ return false;
+
+ for (i = 0; i < folio_nr_pages(folio); i++) {
+- kaddr = kmap_local_folio(folio, i * PAGE_SIZE);
+- if (!memchr_inv(kaddr, 0, PAGE_SIZE)) {
+- num_zero_pages++;
+- if (num_zero_pages > khugepaged_max_ptes_none) {
+- kunmap_local(kaddr);
++ if (pages_identical(folio_page(folio, i), ZERO_PAGE(0))) {
++ if (++num_zero_pages > khugepaged_max_ptes_none)
+ return true;
+- }
+ } else {
+ /*
+ * Another path for early exit once the number
+ * of non-zero filled pages exceeds threshold.
+ */
+- num_filled_pages++;
+- if (num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none) {
+- kunmap_local(kaddr);
++ if (++num_filled_pages >= HPAGE_PMD_NR - khugepaged_max_ptes_none)
+ return false;
+- }
+ }
+- kunmap_local(kaddr);
+ }
+ return false;
+ }
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -301,9 +301,7 @@ static bool try_to_map_unused_to_zeropag
+ unsigned long idx)
+ {
+ struct page *page = folio_page(folio, idx);
+- bool contains_data;
+ pte_t newpte;
+- void *addr;
+
+ if (PageCompound(page))
+ return false;
+@@ -320,11 +318,7 @@ static bool try_to_map_unused_to_zeropag
+ * this subpage has been non present. If the subpage is only zero-filled
+ * then map it to the shared zeropage.
+ */
+- addr = kmap_local_page(page);
+- contains_data = memchr_inv(addr, 0, PAGE_SIZE);
+- kunmap_local(addr);
+-
+- if (contains_data)
++ if (!pages_identical(page, ZERO_PAGE(0)))
+ return false;
+
+ newpte = pte_mkspecial(pfn_pte(my_zero_pfn(pvmw->address),
--- /dev/null
+From 4b1ff850e0c1aacc23e923ed22989b827b9808f9 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Thu, 25 Sep 2025 12:32:36 +0200
+Subject: mptcp: pm: in-kernel: usable client side with C-flag
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 4b1ff850e0c1aacc23e923ed22989b827b9808f9 upstream.
+
+When servers set the C-flag in their MP_CAPABLE to tell clients not to
+create subflows to the initial address and port, clients will likely not
+use their other endpoints. That's because the in-kernel path-manager
+uses the 'subflow' endpoints to create subflows only to the initial
+address and port.
+
+If the limits have not been modified to accept ADD_ADDR, the client
+doesn't try to establish new subflows. If the limits accept ADD_ADDR,
+the routing routes will be used to select the source IP.
+
+The C-flag is typically set when the server is operating behind a legacy
+Layer 4 load balancer, or using anycast IP address. Clients having their
+different 'subflow' endpoints setup, don't end up creating multiple
+subflows as expected, and causing some deployment issues.
+
+A special case is then added here: when servers set the C-flag in the
+MPC and directly sends an ADD_ADDR, this single ADD_ADDR is accepted.
+The 'subflows' endpoints will then be used with this new remote IP and
+port. This exception is only allowed when the ADD_ADDR is sent
+immediately after the 3WHS, and makes the client switching to the 'fully
+established' mode. After that, 'select_local_address()' will not be able
+to find any subflows, because 'id_avail_bitmap' will be filled in
+mptcp_pm_create_subflow_or_signal_addr(), when switching to 'fully
+established' mode.
+
+Fixes: df377be38725 ("mptcp: add deny_join_id0 in mptcp_options_received")
+Cc: stable@vger.kernel.org
+Closes: https://github.com/multipath-tcp/mptcp_net-next/issues/536
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250925-net-next-mptcp-c-flag-laminar-v1-1-ad126cc47c6b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/pm.c | 7 +++++--
+ net/mptcp/pm_kernel.c | 50 +++++++++++++++++++++++++++++++++++++++++++++++++-
+ net/mptcp/protocol.h | 8 ++++++++
+ 3 files changed, 62 insertions(+), 3 deletions(-)
+
+--- a/net/mptcp/pm.c
++++ b/net/mptcp/pm.c
+@@ -617,9 +617,12 @@ void mptcp_pm_add_addr_received(const st
+ } else {
+ __MPTCP_INC_STATS(sock_net((struct sock *)msk), MPTCP_MIB_ADDADDRDROP);
+ }
+- /* id0 should not have a different address */
++ /* - id0 should not have a different address
++ * - special case for C-flag: linked to fill_local_addresses_vec()
++ */
+ } else if ((addr->id == 0 && !mptcp_pm_is_init_remote_addr(msk, addr)) ||
+- (addr->id > 0 && !READ_ONCE(pm->accept_addr))) {
++ (addr->id > 0 && !READ_ONCE(pm->accept_addr) &&
++ !mptcp_pm_add_addr_c_flag_case(msk))) {
+ mptcp_pm_announce_addr(msk, addr, true);
+ mptcp_pm_add_addr_send_ack(msk);
+ } else if (mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) {
+--- a/net/mptcp/pm_kernel.c
++++ b/net/mptcp/pm_kernel.c
+@@ -389,10 +389,12 @@ static unsigned int fill_local_addresses
+ struct mptcp_addr_info mpc_addr;
+ struct pm_nl_pernet *pernet;
+ unsigned int subflows_max;
++ bool c_flag_case;
+ int i = 0;
+
+ pernet = pm_nl_get_pernet_from_msk(msk);
+ subflows_max = mptcp_pm_get_subflows_max(msk);
++ c_flag_case = remote->id && mptcp_pm_add_addr_c_flag_case(msk);
+
+ mptcp_local_address((struct sock_common *)msk, &mpc_addr);
+
+@@ -405,12 +407,27 @@ static unsigned int fill_local_addresses
+ continue;
+
+ if (msk->pm.subflows < subflows_max) {
++ bool is_id0;
++
+ locals[i].addr = entry->addr;
+ locals[i].flags = entry->flags;
+ locals[i].ifindex = entry->ifindex;
+
++ is_id0 = mptcp_addresses_equal(&locals[i].addr,
++ &mpc_addr,
++ locals[i].addr.port);
++
++ if (c_flag_case &&
++ (entry->flags & MPTCP_PM_ADDR_FLAG_SUBFLOW)) {
++ __clear_bit(locals[i].addr.id,
++ msk->pm.id_avail_bitmap);
++
++ if (!is_id0)
++ msk->pm.local_addr_used++;
++ }
++
+ /* Special case for ID0: set the correct ID */
+- if (mptcp_addresses_equal(&locals[i].addr, &mpc_addr, locals[i].addr.port))
++ if (is_id0)
+ locals[i].addr.id = 0;
+
+ msk->pm.subflows++;
+@@ -419,6 +436,37 @@ static unsigned int fill_local_addresses
+ }
+ rcu_read_unlock();
+
++ /* Special case: peer sets the C flag, accept one ADD_ADDR if default
++ * limits are used -- accepting no ADD_ADDR -- and use subflow endpoints
++ */
++ if (!i && c_flag_case) {
++ unsigned int local_addr_max = mptcp_pm_get_local_addr_max(msk);
++
++ while (msk->pm.local_addr_used < local_addr_max &&
++ msk->pm.subflows < subflows_max) {
++ struct mptcp_pm_local *local = &locals[i];
++
++ if (!select_local_address(pernet, msk, local))
++ break;
++
++ __clear_bit(local->addr.id, msk->pm.id_avail_bitmap);
++
++ if (!mptcp_pm_addr_families_match(sk, &local->addr,
++ remote))
++ continue;
++
++ if (mptcp_addresses_equal(&local->addr, &mpc_addr,
++ local->addr.port))
++ continue;
++
++ msk->pm.local_addr_used++;
++ msk->pm.subflows++;
++ i++;
++ }
++
++ return i;
++ }
++
+ /* If the array is empty, fill in the single
+ * 'IPADDRANY' local address
+ */
+--- a/net/mptcp/protocol.h
++++ b/net/mptcp/protocol.h
+@@ -1201,6 +1201,14 @@ static inline void mptcp_pm_close_subflo
+ spin_unlock_bh(&msk->pm.lock);
+ }
+
++static inline bool mptcp_pm_add_addr_c_flag_case(struct mptcp_sock *msk)
++{
++ return READ_ONCE(msk->pm.remote_deny_join_id0) &&
++ msk->pm.local_addr_used == 0 &&
++ mptcp_pm_get_add_addr_accept_max(msk) == 0 &&
++ msk->pm.subflows < mptcp_pm_get_subflows_max(msk);
++}
++
+ void mptcp_sockopt_sync_locked(struct mptcp_sock *msk, struct sock *ssk);
+
+ static inline struct mptcp_ext *mptcp_get_ext(const struct sk_buff *skb)
--- /dev/null
+From 833d4313bc1e9e194814917d23e8874d6b651649 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Thu, 18 Sep 2025 10:50:18 +0200
+Subject: mptcp: reset blackhole on success with non-loopback ifaces
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 833d4313bc1e9e194814917d23e8874d6b651649 upstream.
+
+When a first MPTCP connection gets successfully established after a
+blackhole period, 'active_disable_times' was supposed to be reset when
+this connection was done via any non-loopback interfaces.
+
+Unfortunately, the opposite condition was checked: only reset when the
+connection was established via a loopback interface. Fixing this by
+simply looking at the opposite.
+
+This is similar to what is done with TCP FastOpen, see
+tcp_fastopen_active_disable_ofo_check().
+
+This patch is a follow-up of a previous discussion linked to commit
+893c49a78d9f ("mptcp: Use __sk_dst_get() and dst_dev_rcu() in
+mptcp_active_enable()."), see [1].
+
+Fixes: 27069e7cb3d1 ("mptcp: disable active MPTCP in case of blackhole")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/4209a283-8822-47bd-95b7-87e96d9b7ea3@kernel.org [1]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250918-net-next-mptcp-blackhole-reset-loopback-v1-1-bf5818326639@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mptcp/ctrl.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/mptcp/ctrl.c
++++ b/net/mptcp/ctrl.c
+@@ -507,7 +507,7 @@ void mptcp_active_enable(struct sock *sk
+ rcu_read_lock();
+ dst = __sk_dst_get(sk);
+ dev = dst ? dst_dev_rcu(dst) : NULL;
+- if (dev && (dev->flags & IFF_LOOPBACK))
++ if (!(dev && (dev->flags & IFF_LOOPBACK)))
+ atomic_set(&pernet->active_disable_times, 0);
+ rcu_read_unlock();
+ }
--- /dev/null
+From e4f574ca9c6dfa66695bb054ff5df43ecea873ec Mon Sep 17 00:00:00 2001
+From: Scott Mayhew <smayhew@redhat.com>
+Date: Wed, 6 Aug 2025 15:15:43 -0400
+Subject: nfsd: decouple the xprtsec policy check from check_nfsd_access()
+
+From: Scott Mayhew <smayhew@redhat.com>
+
+commit e4f574ca9c6dfa66695bb054ff5df43ecea873ec upstream.
+
+A while back I had reported that an NFSv3 client could successfully
+mount using '-o xprtsec=none' an export that had been exported with
+'xprtsec=tls:mtls'. By "successfully" I mean that the mount command
+would succeed and the mount would show up in /proc/mount. Attempting
+to do anything futher with the mount would be met with NFS3ERR_ACCES.
+
+This was fixed (albeit accidentally) by commit bb4f07f2409c ("nfsd:
+Fix NFSD_MAY_BYPASS_GSS and NFSD_MAY_BYPASS_GSS_ON_ROOT") and was
+subsequently re-broken by commit 0813c5f01249 ("nfsd: fix access
+checking for NLM under XPRTSEC policies").
+
+Transport Layer Security isn't an RPC security flavor or pseudo-flavor,
+so we shouldn't be conflating them when determining whether the access
+checks can be bypassed. Split check_nfsd_access() into two helpers, and
+have __fh_verify() call the helpers directly since __fh_verify() has
+logic that allows one or both of the checks to be skipped. All other
+sites will continue to call check_nfsd_access().
+
+Link: https://lore.kernel.org/linux-nfs/ZjO3Qwf_G87yNXb2@aion/
+Fixes: 9280c5774314 ("NFSD: Handle new xprtsec= export option")
+Cc: stable@vger.kernel.org
+Signed-off-by: Scott Mayhew <smayhew@redhat.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/export.c | 82 ++++++++++++++++++++++++++++++++++++++-----------------
+ fs/nfsd/export.h | 3 ++
+ fs/nfsd/nfsfh.c | 24 +++++++++++++++-
+ 3 files changed, 83 insertions(+), 26 deletions(-)
+
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -1082,50 +1082,62 @@ static struct svc_export *exp_find(struc
+ }
+
+ /**
+- * check_nfsd_access - check if access to export is allowed.
++ * check_xprtsec_policy - check if access to export is allowed by the
++ * xprtsec policy
+ * @exp: svc_export that is being accessed.
+- * @rqstp: svc_rqst attempting to access @exp (will be NULL for LOCALIO).
+- * @may_bypass_gss: reduce strictness of authorization check
++ * @rqstp: svc_rqst attempting to access @exp.
++ *
++ * Helper function for check_nfsd_access(). Note that callers should be
++ * using check_nfsd_access() instead of calling this function directly. The
++ * one exception is __fh_verify() since it has logic that may result in one
++ * or both of the helpers being skipped.
+ *
+ * Return values:
+ * %nfs_ok if access is granted, or
+ * %nfserr_wrongsec if access is denied
+ */
+-__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
+- bool may_bypass_gss)
++__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp)
+ {
+- struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
+- struct svc_xprt *xprt;
+-
+- /*
+- * If rqstp is NULL, this is a LOCALIO request which will only
+- * ever use a filehandle/credential pair for which access has
+- * been affirmed (by ACCESS or OPEN NFS requests) over the
+- * wire. So there is no need for further checks here.
+- */
+- if (!rqstp)
+- return nfs_ok;
+-
+- xprt = rqstp->rq_xprt;
++ struct svc_xprt *xprt = rqstp->rq_xprt;
+
+ if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_NONE) {
+ if (!test_bit(XPT_TLS_SESSION, &xprt->xpt_flags))
+- goto ok;
++ return nfs_ok;
+ }
+ if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_TLS) {
+ if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
+ !test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
+- goto ok;
++ return nfs_ok;
+ }
+ if (exp->ex_xprtsec_modes & NFSEXP_XPRTSEC_MTLS) {
+ if (test_bit(XPT_TLS_SESSION, &xprt->xpt_flags) &&
+ test_bit(XPT_PEER_AUTH, &xprt->xpt_flags))
+- goto ok;
++ return nfs_ok;
+ }
+- if (!may_bypass_gss)
+- goto denied;
++ return nfserr_wrongsec;
++}
++
++/**
++ * check_security_flavor - check if access to export is allowed by the
++ * security flavor
++ * @exp: svc_export that is being accessed.
++ * @rqstp: svc_rqst attempting to access @exp.
++ * @may_bypass_gss: reduce strictness of authorization check
++ *
++ * Helper function for check_nfsd_access(). Note that callers should be
++ * using check_nfsd_access() instead of calling this function directly. The
++ * one exception is __fh_verify() since it has logic that may result in one
++ * or both of the helpers being skipped.
++ *
++ * Return values:
++ * %nfs_ok if access is granted, or
++ * %nfserr_wrongsec if access is denied
++ */
++__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
++ bool may_bypass_gss)
++{
++ struct exp_flavor_info *f, *end = exp->ex_flavors + exp->ex_nflavors;
+
+-ok:
+ /* legacy gss-only clients are always OK: */
+ if (exp->ex_client == rqstp->rq_gssclient)
+ return nfs_ok;
+@@ -1167,10 +1179,30 @@ ok:
+ }
+ }
+
+-denied:
+ return nfserr_wrongsec;
+ }
+
++/**
++ * check_nfsd_access - check if access to export is allowed.
++ * @exp: svc_export that is being accessed.
++ * @rqstp: svc_rqst attempting to access @exp.
++ * @may_bypass_gss: reduce strictness of authorization check
++ *
++ * Return values:
++ * %nfs_ok if access is granted, or
++ * %nfserr_wrongsec if access is denied
++ */
++__be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
++ bool may_bypass_gss)
++{
++ __be32 status;
++
++ status = check_xprtsec_policy(exp, rqstp);
++ if (status != nfs_ok)
++ return status;
++ return check_security_flavor(exp, rqstp, may_bypass_gss);
++}
++
+ /*
+ * Uses rq_client and rq_gssclient to find an export; uses rq_client (an
+ * auth_unix client) if it's available and has secinfo information;
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -101,6 +101,9 @@ struct svc_expkey {
+
+ struct svc_cred;
+ int nfsexp_flags(struct svc_cred *cred, struct svc_export *exp);
++__be32 check_xprtsec_policy(struct svc_export *exp, struct svc_rqst *rqstp);
++__be32 check_security_flavor(struct svc_export *exp, struct svc_rqst *rqstp,
++ bool may_bypass_gss);
+ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
+ bool may_bypass_gss);
+
+--- a/fs/nfsd/nfsfh.c
++++ b/fs/nfsd/nfsfh.c
+@@ -364,10 +364,30 @@ __fh_verify(struct svc_rqst *rqstp,
+ if (error)
+ goto out;
+
++ /*
++ * If rqstp is NULL, this is a LOCALIO request which will only
++ * ever use a filehandle/credential pair for which access has
++ * been affirmed (by ACCESS or OPEN NFS requests) over the
++ * wire. Skip both the xprtsec policy and the security flavor
++ * checks.
++ */
++ if (!rqstp)
++ goto check_permissions;
++
+ if ((access & NFSD_MAY_NLM) && (exp->ex_flags & NFSEXP_NOAUTHNLM))
+ /* NLM is allowed to fully bypass authentication */
+ goto out;
+
++ /*
++ * NLM is allowed to bypass the xprtsec policy check because lockd
++ * doesn't support xprtsec.
++ */
++ if (!(access & NFSD_MAY_NLM)) {
++ error = check_xprtsec_policy(exp, rqstp);
++ if (error)
++ goto out;
++ }
++
+ if (access & NFSD_MAY_BYPASS_GSS)
+ may_bypass_gss = true;
+ /*
+@@ -379,13 +399,15 @@ __fh_verify(struct svc_rqst *rqstp,
+ && exp->ex_path.dentry == dentry)
+ may_bypass_gss = true;
+
+- error = check_nfsd_access(exp, rqstp, may_bypass_gss);
++ error = check_security_flavor(exp, rqstp, may_bypass_gss);
+ if (error)
+ goto out;
++
+ /* During LOCALIO call to fh_verify will be called with a NULL rqstp */
+ if (rqstp)
+ svc_xprt_set_valid(rqstp->rq_xprt);
+
++check_permissions:
+ /* Finally, check access permissions. */
+ error = nfsd_permission(cred, exp, dentry, access);
+ out:
--- /dev/null
+From ab1c282c010c4f327bd7addc3c0035fd8e3c1721 Mon Sep 17 00:00:00 2001
+From: Thorsten Blum <thorsten.blum@linux.dev>
+Date: Wed, 6 Aug 2025 03:10:01 +0200
+Subject: NFSD: Fix destination buffer size in nfsd4_ssc_setup_dul()
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+commit ab1c282c010c4f327bd7addc3c0035fd8e3c1721 upstream.
+
+Commit 5304877936c0 ("NFSD: Fix strncpy() fortify warning") replaced
+strncpy(,, sizeof(..)) with strlcpy(,, sizeof(..) - 1), but strlcpy()
+already guaranteed NUL-termination of the destination buffer and
+subtracting one byte potentially truncated the source string.
+
+The incorrect size was then carried over in commit 72f78ae00a8e ("NFSD:
+move from strlcpy with unused retval to strscpy") when switching from
+strlcpy() to strscpy().
+
+Fix this off-by-one error by using the full size of the destination
+buffer again.
+
+Cc: stable@vger.kernel.org
+Fixes: 5304877936c0 ("NFSD: Fix strncpy() fortify warning")
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4proc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/nfsd/nfs4proc.c
++++ b/fs/nfsd/nfs4proc.c
+@@ -1498,7 +1498,7 @@ try_again:
+ return 0;
+ }
+ if (work) {
+- strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr) - 1);
++ strscpy(work->nsui_ipaddr, ipaddr, sizeof(work->nsui_ipaddr));
+ refcount_set(&work->nsui_refcnt, 2);
+ work->nsui_busy = true;
+ list_add_tail(&work->nsui_list, &nn->nfsd_ssc_mount_list);
--- /dev/null
+From a082e4b4d08a4a0e656d90c2c05da85f23e6d0c9 Mon Sep 17 00:00:00 2001
+From: Olga Kornievskaia <okorniev@redhat.com>
+Date: Thu, 21 Aug 2025 16:31:46 -0400
+Subject: nfsd: nfserr_jukebox in nlm_fopen should lead to a retry
+
+From: Olga Kornievskaia <okorniev@redhat.com>
+
+commit a082e4b4d08a4a0e656d90c2c05da85f23e6d0c9 upstream.
+
+When v3 NLM request finds a conflicting delegation, it triggers
+a delegation recall and nfsd_open fails with EAGAIN. nfsd_open
+then translates EAGAIN into nfserr_jukebox. In nlm_fopen, instead
+of returning nlm_failed for when there is a conflicting delegation,
+drop this NLM request so that the client retries. Once delegation
+is recalled and if a local lock is claimed, a retry would lead to
+nfsd returning a nlm_lck_blocked error or a successful nlm lock.
+
+Fixes: d343fce148a4 ("[PATCH] knfsd: Allow lockd to drop replies as appropriate")
+Cc: stable@vger.kernel.org # v6.6
+Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/lockd.c | 15 +++++++++++++++
+ 1 file changed, 15 insertions(+)
+
+--- a/fs/nfsd/lockd.c
++++ b/fs/nfsd/lockd.c
+@@ -57,6 +57,21 @@ nlm_fopen(struct svc_rqst *rqstp, struct
+ switch (nfserr) {
+ case nfs_ok:
+ return 0;
++ case nfserr_jukebox:
++ /* this error can indicate a presence of a conflicting
++ * delegation to an NLM lock request. Options are:
++ * (1) For now, drop this request and make the client
++ * retry. When delegation is returned, client's lock retry
++ * will complete.
++ * (2) NLM4_DENIED as per "spec" signals to the client
++ * that the lock is unavailable now but client can retry.
++ * Linux client implementation does not. It treats
++ * NLM4_DENIED same as NLM4_FAILED and errors the request.
++ * (3) For the future, treat this as blocked lock and try
++ * to callback when the delegation is returned but might
++ * not have a proper lock request to block on.
++ */
++ fallthrough;
+ case nfserr_dropit:
+ return nlm_drop_reply;
+ case nfserr_stale:
--- /dev/null
+From fa7a0a53eeb7e16402f82c3d5a9ef4bf5efe9357 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Fri, 26 Sep 2025 15:39:10 +0200
+Subject: s390: Add -Wno-pointer-sign to KBUILD_CFLAGS_DECOMPRESSOR
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit fa7a0a53eeb7e16402f82c3d5a9ef4bf5efe9357 upstream.
+
+If the decompressor is compiled with clang this can lead to the following
+warning:
+
+In file included from arch/s390/boot/startup.c:4:
+...
+In file included from ./include/linux/pgtable.h:6:
+./arch/s390/include/asm/pgtable.h:2065:48: warning: passing 'unsigned long *' to parameter of type
+ 'long *' converts between pointers to integer types with different sign [-Wpointer-sign]
+ 2065 | value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
+
+Add -Wno-pointer-sign to the decompressor compile flags, like it is also
+done for the kernel. This is similar to what was done for x86 to address
+the same problem [1].
+
+[1] commit dca5203e3fe2 ("x86/boot: Add -Wno-pointer-sign to KBUILD_CFLAGS")
+
+Cc: stable@vger.kernel.org
+Reported-by: Gerd Bayer <gbayer@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/s390/Makefile | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/s390/Makefile
++++ b/arch/s390/Makefile
+@@ -25,6 +25,7 @@ endif
+ KBUILD_CFLAGS_DECOMPRESSOR := $(CLANG_FLAGS) -m64 -O2 -mpacked-stack -std=gnu11
+ KBUILD_CFLAGS_DECOMPRESSOR += -DDISABLE_BRANCH_PROFILING -D__NO_FORTIFY
+ KBUILD_CFLAGS_DECOMPRESSOR += -D__DECOMPRESSOR
++KBUILD_CFLAGS_DECOMPRESSOR += -Wno-pointer-sign
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-delete-null-pointer-checks -msoft-float -mbackchain
+ KBUILD_CFLAGS_DECOMPRESSOR += -fno-asynchronous-unwind-tables
+ KBUILD_CFLAGS_DECOMPRESSOR += -ffreestanding
--- /dev/null
+From f0edc8f113a39d1c9f8cf83e865c32b0668d80e0 Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <hca@linux.ibm.com>
+Date: Thu, 25 Sep 2025 10:45:17 +0200
+Subject: s390/cio/ioasm: Fix __xsch() condition code handling
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+commit f0edc8f113a39d1c9f8cf83e865c32b0668d80e0 upstream.
+
+For the __xsch() inline assembly the conversion to flag output macros is
+incomplete. Only the conditional shift of the return value was added, while
+the required changes to the inline assembly itself are missing.
+
+If compiled with GCC versions before 14.2 this leads to a double shift of
+the cc output operand and therefore the returned value of __xsch() is
+incorrectly always zero, instead of the expected condition code.
+
+Fixes: e200565d434b ("s390/cio/ioasm: Convert to use flag output macros")
+Cc: stable@vger.kernel.org
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Acked-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Reviewed-by: Juergen Christ <jchrist@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/cio/ioasm.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/s390/cio/ioasm.c b/drivers/s390/cio/ioasm.c
+index a540045b64a6..8b06b234e110 100644
+--- a/drivers/s390/cio/ioasm.c
++++ b/drivers/s390/cio/ioasm.c
+@@ -253,11 +253,10 @@ static inline int __xsch(struct subchannel_id schid)
+ asm volatile(
+ " lgr 1,%[r1]\n"
+ " xsch\n"
+- " ipm %[cc]\n"
+- " srl %[cc],28\n"
+- : [cc] "=&d" (ccode)
++ CC_IPM(cc)
++ : CC_OUT(cc, ccode)
+ : [r1] "d" (r1)
+- : "cc", "1");
++ : CC_CLOBBER_LIST("1"));
+ return CC_TRANSFORM(ccode);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 130e6de62107116eba124647116276266be0f84c Mon Sep 17 00:00:00 2001
+From: Jaehoon Kim <jhkim@linux.ibm.com>
+Date: Thu, 25 Sep 2025 17:47:08 +0200
+Subject: s390/dasd: enforce dma_alignment to ensure proper buffer validation
+
+From: Jaehoon Kim <jhkim@linux.ibm.com>
+
+commit 130e6de62107116eba124647116276266be0f84c upstream.
+
+The block layer validates buffer alignment using the device's
+dma_alignment value. If dma_alignment is smaller than
+logical_block_size(bp_block) -1, misaligned buffer incorrectly pass
+validation and propagate to the lower-level driver.
+
+This patch adjusts dma_alignment to be at least logical_block_size -1,
+ensuring that misalignment buffers are properly rejected at the block
+layer and do not reach the DASD driver unnecessarily.
+
+Fixes: 2a07bb64d801 ("s390/dasd: Remove DMA alignment")
+Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
+Cc: stable@vger.kernel.org #6.11+
+Signed-off-by: Jaehoon Kim <jhkim@linux.ibm.com>
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/block/dasd.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -334,6 +334,11 @@ static int dasd_state_basic_to_ready(str
+ lim.max_dev_sectors = device->discipline->max_sectors(block);
+ lim.max_hw_sectors = lim.max_dev_sectors;
+ lim.logical_block_size = block->bp_block;
++ /*
++ * Adjust dma_alignment to match block_size - 1
++ * to ensure proper buffer alignment checks in the block layer.
++ */
++ lim.dma_alignment = lim.logical_block_size - 1;
+
+ if (device->discipline->has_discard) {
+ unsigned int max_bytes;
--- /dev/null
+From 8f4ed0ce4857ceb444174503fc9058720d4faaa1 Mon Sep 17 00:00:00 2001
+From: Jaehoon Kim <jhkim@linux.ibm.com>
+Date: Thu, 25 Sep 2025 17:47:07 +0200
+Subject: s390/dasd: Return BLK_STS_INVAL for EINVAL from do_dasd_request
+
+From: Jaehoon Kim <jhkim@linux.ibm.com>
+
+commit 8f4ed0ce4857ceb444174503fc9058720d4faaa1 upstream.
+
+Currently, if CCW request creation fails with -EINVAL, the DASD driver
+returns BLK_STS_IOERR to the block layer.
+
+This can happen, for example, when a user-space application such as QEMU
+passes a misaligned buffer, but the original cause of the error is
+masked as a generic I/O error.
+
+This patch changes the behavior so that -EINVAL is returned as
+BLK_STS_INVAL, allowing user space to properly detect alignment issues
+instead of interpreting them as I/O errors.
+
+Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
+Cc: stable@vger.kernel.org #6.11+
+Signed-off-by: Jaehoon Kim <jhkim@linux.ibm.com>
+Signed-off-by: Stefan Haberland <sth@linux.ibm.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/s390/block/dasd.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/drivers/s390/block/dasd.c
++++ b/drivers/s390/block/dasd.c
+@@ -3119,12 +3119,14 @@ static blk_status_t do_dasd_request(stru
+ PTR_ERR(cqr) == -ENOMEM ||
+ PTR_ERR(cqr) == -EAGAIN) {
+ rc = BLK_STS_RESOURCE;
+- goto out;
++ } else if (PTR_ERR(cqr) == -EINVAL) {
++ rc = BLK_STS_INVAL;
++ } else {
++ DBF_DEV_EVENT(DBF_ERR, basedev,
++ "CCW creation failed (rc=%ld) on request %p",
++ PTR_ERR(cqr), req);
++ rc = BLK_STS_IOERR;
+ }
+- DBF_DEV_EVENT(DBF_ERR, basedev,
+- "CCW creation failed (rc=%ld) on request %p",
+- PTR_ERR(cqr), req);
+- rc = BLK_STS_IOERR;
+ goto out;
+ }
+ /*
--- /dev/null
+From 008385efd05e04d8dff299382df2e8be0f91d8a0 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Thu, 25 Sep 2025 12:32:37 +0200
+Subject: selftests: mptcp: join: validate C-flag + def limit
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 008385efd05e04d8dff299382df2e8be0f91d8a0 upstream.
+
+The previous commit adds an exception for the C-flag case. The
+'mptcp_join.sh' selftest is extended to validate this case.
+
+In this subtest, there is a typical CDN deployment with a client where
+MPTCP endpoints have been 'automatically' configured:
+
+- the server set net.mptcp.allow_join_initial_addr_port=0
+
+- the client has multiple 'subflow' endpoints, and the default limits:
+ not accepting ADD_ADDRs.
+
+Without the parent patch, the client is not able to establish new
+subflows using its 'subflow' endpoints. The parent commit fixes that.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: df377be38725 ("mptcp: add deny_join_id0 in mptcp_options_received")
+Cc: stable@vger.kernel.org
+Reviewed-by: Geliang Tang <geliang@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250925-net-next-mptcp-c-flag-laminar-v1-2-ad126cc47c6b@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3187,6 +3187,17 @@ deny_join_id0_tests()
+ run_tests $ns1 $ns2 10.0.1.1
+ chk_join_nr 1 1 1
+ fi
++
++ # default limits, server deny join id 0 + signal
++ if reset_with_allow_join_id0 "default limits, server deny join id 0" 0 1; then
++ pm_nl_set_limits $ns1 0 2
++ pm_nl_set_limits $ns2 0 2
++ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
++ pm_nl_add_endpoint $ns2 10.0.3.2 flags subflow
++ pm_nl_add_endpoint $ns2 10.0.4.2 flags subflow
++ run_tests $ns1 $ns2 10.0.1.1
++ chk_join_nr 2 2 2
++ fi
+ }
+
+ fullmesh_tests()
x86-fred-remove-endbr64-from-fred-entry-points.patch
x86-umip-check-that-the-instruction-opcode-is-at-least-two-bytes.patch
x86-umip-fix-decoding-of-register-forms-of-0f-01-sgdt-and-sidt-aliases.patch
+mptcp-pm-in-kernel-usable-client-side-with-c-flag.patch
+mptcp-reset-blackhole-on-success-with-non-loopback-ifaces.patch
+selftests-mptcp-join-validate-c-flag-def-limit.patch
+s390-cio-ioasm-fix-__xsch-condition-code-handling.patch
+s390-dasd-enforce-dma_alignment-to-ensure-proper-buffer-validation.patch
+s390-dasd-return-blk_sts_inval-for-einval-from-do_dasd_request.patch
+s390-add-wno-pointer-sign-to-kbuild_cflags_decompressor.patch
+slab-prevent-warnings-when-slab-obj_exts-vector-allocation-fails.patch
+slab-mark-slab-obj_exts-allocation-failures-unconditionally.patch
+wifi-ath11k-hal-srng-don-t-deinitialize-and-re-initialize-again.patch
+wifi-iwlwifi-fix-dentry-reference-leak-in-iwl_mld_add_link_debugfs.patch
+wifi-rtw89-avoid-possible-tx-wait-initialization-race.patch
+wifi-mt76-mt7925u-add-vid-pid-for-netgear-a9000.patch
+wifi-mt76-mt7921u-add-vid-pid-for-netgear-a7500.patch
+mm-thp-fix-mte-tag-mismatch-when-replacing-zero-filled-subpages.patch
+mm-rmap-fix-soft-dirty-and-uffd-wp-bit-loss-when-remapping-zero-filled-mthp-subpage-to-shared-zeropage.patch
+mm-page_alloc-only-set-alloc_highatomic-for-__gpf_high-allocations.patch
+mm-hugetlb-early-exit-from-hugetlb_pages_alloc_boot-when-max_huge_pages-0.patch
+mm-damon-vaddr-do-not-repeat-pte_offset_map_lock-until-success.patch
+mm-damon-lru_sort-use-param_ctx-for-damon_attrs-staging.patch
+nfsd-decouple-the-xprtsec-policy-check-from-check_nfsd_access.patch
+nfsd-fix-destination-buffer-size-in-nfsd4_ssc_setup_dul.patch
+nfsd-nfserr_jukebox-in-nlm_fopen-should-lead-to-a-retry.patch
--- /dev/null
+From f7381b9116407ba2a429977c80ff8df953ea9354 Mon Sep 17 00:00:00 2001
+From: Suren Baghdasaryan <surenb@google.com>
+Date: Mon, 15 Sep 2025 13:09:18 -0700
+Subject: slab: mark slab->obj_exts allocation failures unconditionally
+
+From: Suren Baghdasaryan <surenb@google.com>
+
+commit f7381b9116407ba2a429977c80ff8df953ea9354 upstream.
+
+alloc_slab_obj_exts() should mark failed obj_exts vector allocations
+independent on whether the vector is being allocated for a new or an
+existing slab. Current implementation skips doing this for existing
+slabs. Fix this by marking failed allocations unconditionally.
+
+Fixes: 09c46563ff6d ("codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations")
+Reported-by: Shakeel Butt <shakeel.butt@linux.dev>
+Closes: https://lore.kernel.org/all/avhakjldsgczmq356gkwmvfilyvf7o6temvcmtt5lqd4fhp5rk@47gp2ropyixg/
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Cc: stable@vger.kernel.org # v6.10+
+Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slub.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/mm/slub.c
++++ b/mm/slub.c
+@@ -2034,8 +2034,7 @@ int alloc_slab_obj_exts(struct slab *sla
+ slab_nid(slab));
+ if (!vec) {
+ /* Mark vectors which failed to allocate */
+- if (new_slab)
+- mark_failed_objexts_alloc(slab);
++ mark_failed_objexts_alloc(slab);
+
+ return -ENOMEM;
+ }
--- /dev/null
+From 4038016397da5c1cebb10e7c85a36d06123724a8 Mon Sep 17 00:00:00 2001
+From: Suren Baghdasaryan <surenb@google.com>
+Date: Mon, 15 Sep 2025 13:09:17 -0700
+Subject: slab: prevent warnings when slab obj_exts vector allocation fails
+
+From: Suren Baghdasaryan <surenb@google.com>
+
+commit 4038016397da5c1cebb10e7c85a36d06123724a8 upstream.
+
+When object extension vector allocation fails, we set slab->obj_exts to
+OBJEXTS_ALLOC_FAIL to indicate the failure. Later, once the vector is
+successfully allocated, we will use this flag to mark codetag references
+stored in that vector as empty to avoid codetag warnings.
+
+slab_obj_exts() used to retrieve the slab->obj_exts vector pointer checks
+slab->obj_exts for being either NULL or a pointer with MEMCG_DATA_OBJEXTS
+bit set. However it does not handle the case when slab->obj_exts equals
+OBJEXTS_ALLOC_FAIL. Add the missing condition to avoid extra warning.
+
+Fixes: 09c46563ff6d ("codetag: debug: introduce OBJEXTS_ALLOC_FAIL to mark failed slab_ext allocations")
+Reported-by: Shakeel Butt <shakeel.butt@linux.dev>
+Closes: https://lore.kernel.org/all/jftidhymri2af5u3xtcqry3cfu6aqzte3uzlznhlaylgrdztsi@5vpjnzpsemf5/
+Signed-off-by: Suren Baghdasaryan <surenb@google.com>
+Cc: stable@vger.kernel.org # v6.10+
+Acked-by: Shakeel Butt <shakeel.butt@linux.dev>
+Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/slab.h | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/mm/slab.h
++++ b/mm/slab.h
+@@ -526,8 +526,12 @@ static inline struct slabobj_ext *slab_o
+ unsigned long obj_exts = READ_ONCE(slab->obj_exts);
+
+ #ifdef CONFIG_MEMCG
+- VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS),
+- slab_page(slab));
++ /*
++ * obj_exts should be either NULL, a valid pointer with
++ * MEMCG_DATA_OBJEXTS bit set or be equal to OBJEXTS_ALLOC_FAIL.
++ */
++ VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS) &&
++ obj_exts != OBJEXTS_ALLOC_FAIL, slab_page(slab));
+ VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
+ #endif
+ return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
--- /dev/null
+From 32be3ca4cf78b309dfe7ba52fe2d7cc3c23c5634 Mon Sep 17 00:00:00 2001
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Date: Tue, 22 Jul 2025 10:31:21 +0500
+Subject: wifi: ath11k: HAL SRNG: don't deinitialize and re-initialize again
+
+From: Muhammad Usama Anjum <usama.anjum@collabora.com>
+
+commit 32be3ca4cf78b309dfe7ba52fe2d7cc3c23c5634 upstream.
+
+Don't deinitialize and reinitialize the HAL helpers. The dma memory is
+deallocated and there is high possibility that we'll not be able to get
+the same memory allocated from dma when there is high memory pressure.
+
+Tested-on: WCN6855 hw2.0 PCI WLAN.HSP.1.1-03926.13-QCAHSPSWPL_V2_SILICONZ_CE-2.52297.6
+
+Fixes: d5c65159f289 ("ath11k: driver for Qualcomm IEEE 802.11ax devices")
+Cc: stable@vger.kernel.org
+Cc: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Reviewed-by: Baochen Qiang <baochen.qiang@oss.qualcomm.com>
+Signed-off-by: Muhammad Usama Anjum <usama.anjum@collabora.com>
+Link: https://patch.msgid.link/20250722053121.1145001-1-usama.anjum@collabora.com
+Signed-off-by: Jeff Johnson <jeff.johnson@oss.qualcomm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/ath11k/core.c | 6 +-----
+ drivers/net/wireless/ath/ath11k/hal.c | 16 ++++++++++++++++
+ drivers/net/wireless/ath/ath11k/hal.h | 1 +
+ 3 files changed, 18 insertions(+), 5 deletions(-)
+
+--- a/drivers/net/wireless/ath/ath11k/core.c
++++ b/drivers/net/wireless/ath/ath11k/core.c
+@@ -2215,14 +2215,10 @@ static int ath11k_core_reconfigure_on_cr
+ mutex_unlock(&ab->core_lock);
+
+ ath11k_dp_free(ab);
+- ath11k_hal_srng_deinit(ab);
++ ath11k_hal_srng_clear(ab);
+
+ ab->free_vdev_map = (1LL << (ab->num_radios * TARGET_NUM_VDEVS(ab))) - 1;
+
+- ret = ath11k_hal_srng_init(ab);
+- if (ret)
+- return ret;
+-
+ clear_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags);
+
+ ret = ath11k_core_qmi_firmware_ready(ab);
+--- a/drivers/net/wireless/ath/ath11k/hal.c
++++ b/drivers/net/wireless/ath/ath11k/hal.c
+@@ -1386,6 +1386,22 @@ void ath11k_hal_srng_deinit(struct ath11
+ }
+ EXPORT_SYMBOL(ath11k_hal_srng_deinit);
+
++void ath11k_hal_srng_clear(struct ath11k_base *ab)
++{
++ /* No need to memset rdp and wrp memory since each individual
++ * segment would get cleared in ath11k_hal_srng_src_hw_init()
++ * and ath11k_hal_srng_dst_hw_init().
++ */
++ memset(ab->hal.srng_list, 0,
++ sizeof(ab->hal.srng_list));
++ memset(ab->hal.shadow_reg_addr, 0,
++ sizeof(ab->hal.shadow_reg_addr));
++ ab->hal.avail_blk_resource = 0;
++ ab->hal.current_blk_index = 0;
++ ab->hal.num_shadow_reg_configured = 0;
++}
++EXPORT_SYMBOL(ath11k_hal_srng_clear);
++
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
+ {
+ struct hal_srng *srng;
+--- a/drivers/net/wireless/ath/ath11k/hal.h
++++ b/drivers/net/wireless/ath/ath11k/hal.h
+@@ -965,6 +965,7 @@ int ath11k_hal_srng_setup(struct ath11k_
+ struct hal_srng_params *params);
+ int ath11k_hal_srng_init(struct ath11k_base *ath11k);
+ void ath11k_hal_srng_deinit(struct ath11k_base *ath11k);
++void ath11k_hal_srng_clear(struct ath11k_base *ab);
+ void ath11k_hal_dump_srng_stats(struct ath11k_base *ab);
+ void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
+ u32 **cfg, u32 *len);
--- /dev/null
+From ff46e2e7034c78489fa7a6bc35f7c9dd8ab82905 Mon Sep 17 00:00:00 2001
+From: Miaoqian Lin <linmq006@gmail.com>
+Date: Tue, 2 Sep 2025 12:09:49 +0800
+Subject: wifi: iwlwifi: Fix dentry reference leak in iwl_mld_add_link_debugfs
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+commit ff46e2e7034c78489fa7a6bc35f7c9dd8ab82905 upstream.
+
+The debugfs_lookup() function increases the dentry reference count.
+Add missing dput() call to release the reference when the "iwlmld"
+directory already exists.
+
+Fixes: d1e879ec600f ("wifi: iwlwifi: add iwlmld sub-driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Link: https://patch.msgid.link/20250902040955.2362472-1-linmq006@gmail.com
+Signed-off-by: Miri Korenblit <miriam.rachel.korenblit@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mld/debugfs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+index cc052b0aa53f..372204bf8452 100644
+--- a/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
++++ b/drivers/net/wireless/intel/iwlwifi/mld/debugfs.c
+@@ -1001,8 +1001,12 @@ void iwl_mld_add_link_debugfs(struct ieee80211_hw *hw,
+ * If not, this is a per-link dir of a MLO vif, add in it the iwlmld
+ * dir.
+ */
+- if (!mld_link_dir)
++ if (!mld_link_dir) {
+ mld_link_dir = debugfs_create_dir("iwlmld", dir);
++ } else {
++ /* Release the reference from debugfs_lookup */
++ dput(mld_link_dir);
++ }
+ }
+
+ static ssize_t _iwl_dbgfs_fixed_rate_write(struct iwl_mld *mld, char *buf,
+--
+2.51.0
+
--- /dev/null
+From fc6627ca8a5f811b601aea74e934cf8a048c88ac Mon Sep 17 00:00:00 2001
+From: Nick Morrow <morrownr@gmail.com>
+Date: Fri, 12 Sep 2025 15:45:56 -0500
+Subject: wifi: mt76: mt7921u: Add VID/PID for Netgear A7500
+
+From: Nick Morrow <morrownr@gmail.com>
+
+commit fc6627ca8a5f811b601aea74e934cf8a048c88ac upstream.
+
+Add VID/PID 0846/9065 for Netgear A7500.
+
+Reported-by: Autumn Dececco <autumndececco@gmail.com>
+Tested-by: Autumn Dececco <autumndececco@gmail.com>
+Signed-off-by: Nick Morrow <morrownr@gmail.com>
+Cc: stable@vger.kernel.org
+Acked-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/80bacfd6-6073-4ce5-be32-ae9580832337@gmail.com
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7921/usb.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7921/usb.c
+@@ -21,6 +21,9 @@ static const struct usb_device_id mt7921
+ /* Netgear, Inc. [A8000,AXE3000] */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9060, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
++ /* Netgear, Inc. A7500 */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9065, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
+ /* TP-Link TXE50UH */
+ { USB_DEVICE_AND_INTERFACE_INFO(0x35bc, 0x0107, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7921_FIRMWARE_WM },
--- /dev/null
+From f6159b2051e157550d7609e19d04471609c6050b Mon Sep 17 00:00:00 2001
+From: Nick Morrow <morrownr@gmail.com>
+Date: Tue, 8 Jul 2025 16:40:42 -0500
+Subject: wifi: mt76: mt7925u: Add VID/PID for Netgear A9000
+
+From: Nick Morrow <morrownr@gmail.com>
+
+commit f6159b2051e157550d7609e19d04471609c6050b upstream.
+
+Add VID/PID 0846/9072 for recently released Netgear A9000.
+
+Signed-off-by: Nick Morrow <morrownr@gmail.com>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/7afd3c3c-e7cf-4bd9-801d-bdfc76def506@gmail.com
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/usb.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/usb.c
+@@ -12,6 +12,9 @@
+ static const struct usb_device_id mt7925u_device_table[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(0x0e8d, 0x7925, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
++ /* Netgear, Inc. A9000 */
++ { USB_DEVICE_AND_INTERFACE_INFO(0x0846, 0x9072, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)MT7925_FIRMWARE_WM },
+ { },
+ };
+
--- /dev/null
+From c24248ed78f33ea299ea61d105355ba47157d49f Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Sat, 20 Sep 2025 00:08:48 +0300
+Subject: wifi: rtw89: avoid possible TX wait initialization race
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit c24248ed78f33ea299ea61d105355ba47157d49f upstream.
+
+The value of skb_data->wait indicates whether skb is passed on to the
+core mac80211 stack or released by the driver itself. Make sure that by
+the time skb is added to txwd queue and becomes visible to the completing
+side, it has already allocated and initialized TX wait related data (in
+case it's needed).
+
+This is found by code review and addresses a possible race scenario
+described below:
+
+ Waiting thread Completing thread
+
+rtw89_core_send_nullfunc()
+ rtw89_core_tx_write_link()
+ ...
+ rtw89_pci_txwd_submit()
+ skb_data->wait = NULL
+ /* add skb to the queue */
+ skb_queue_tail(&txwd->queue, skb)
+
+ /* another thread (e.g. rtw89_ops_tx) performs TX kick off for the same queue */
+
+ rtw89_pci_napi_poll()
+ ...
+ rtw89_pci_release_txwd_skb()
+ /* get skb from the queue */
+ skb_unlink(skb, &txwd->queue)
+ rtw89_pci_tx_status()
+ rtw89_core_tx_wait_complete()
+ /* use incorrect skb_data->wait */
+ rtw89_core_tx_kick_off_and_wait()
+ /* assign skb_data->wait but too late */
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 1ae5ca615285 ("wifi: rtw89: add function to wait for completion of TX skbs")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Acked-by: Ping-Ke Shih <pkshih@realtek.com>
+Signed-off-by: Ping-Ke Shih <pkshih@realtek.com>
+Link: https://patch.msgid.link/20250919210852.823912-3-pchelkin@ispras.ru
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/realtek/rtw89/core.c | 39 ++++++++++++++++--------------
+ drivers/net/wireless/realtek/rtw89/core.h | 3 +-
+ drivers/net/wireless/realtek/rtw89/pci.c | 2 -
+ 3 files changed, 24 insertions(+), 20 deletions(-)
+
+--- a/drivers/net/wireless/realtek/rtw89/core.c
++++ b/drivers/net/wireless/realtek/rtw89/core.c
+@@ -1091,25 +1091,14 @@ void rtw89_core_tx_kick_off(struct rtw89
+ }
+
+ int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+- int qsel, unsigned int timeout)
++ struct rtw89_tx_wait_info *wait, int qsel,
++ unsigned int timeout)
+ {
+- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+- struct rtw89_tx_wait_info *wait;
+ unsigned long time_left;
+ int ret = 0;
+
+ lockdep_assert_wiphy(rtwdev->hw->wiphy);
+
+- wait = kzalloc(sizeof(*wait), GFP_KERNEL);
+- if (!wait) {
+- rtw89_core_tx_kick_off(rtwdev, qsel);
+- return 0;
+- }
+-
+- init_completion(&wait->completion);
+- wait->skb = skb;
+- rcu_assign_pointer(skb_data->wait, wait);
+-
+ rtw89_core_tx_kick_off(rtwdev, qsel);
+ time_left = wait_for_completion_timeout(&wait->completion,
+ msecs_to_jiffies(timeout));
+@@ -1172,10 +1161,12 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwde
+ static int rtw89_core_tx_write_link(struct rtw89_dev *rtwdev,
+ struct rtw89_vif_link *rtwvif_link,
+ struct rtw89_sta_link *rtwsta_link,
+- struct sk_buff *skb, int *qsel, bool sw_mld)
++ struct sk_buff *skb, int *qsel, bool sw_mld,
++ struct rtw89_tx_wait_info *wait)
+ {
+ struct ieee80211_sta *sta = rtwsta_link_to_sta_safe(rtwsta_link);
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
++ struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+ struct rtw89_vif *rtwvif = rtwvif_link->rtwvif;
+ struct rtw89_core_tx_request tx_req = {};
+ int ret;
+@@ -1192,6 +1183,8 @@ static int rtw89_core_tx_write_link(stru
+ rtw89_core_tx_update_desc_info(rtwdev, &tx_req);
+ rtw89_core_tx_wake(rtwdev, &tx_req);
+
++ rcu_assign_pointer(skb_data->wait, wait);
++
+ ret = rtw89_hci_tx_write(rtwdev, &tx_req);
+ if (ret) {
+ rtw89_err(rtwdev, "failed to transmit skb to HCI\n");
+@@ -1228,7 +1221,8 @@ int rtw89_core_tx_write(struct rtw89_dev
+ }
+ }
+
+- return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false);
++ return rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, qsel, false,
++ NULL);
+ }
+
+ static __le32 rtw89_build_txwd_body0(struct rtw89_tx_desc_info *desc_info)
+@@ -3426,6 +3420,7 @@ int rtw89_core_send_nullfunc(struct rtw8
+ struct ieee80211_vif *vif = rtwvif_link_to_vif(rtwvif_link);
+ int link_id = ieee80211_vif_is_mld(vif) ? rtwvif_link->link_id : -1;
+ struct rtw89_sta_link *rtwsta_link;
++ struct rtw89_tx_wait_info *wait;
+ struct ieee80211_sta *sta;
+ struct ieee80211_hdr *hdr;
+ struct rtw89_sta *rtwsta;
+@@ -3435,6 +3430,12 @@ int rtw89_core_send_nullfunc(struct rtw8
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->cfg.assoc)
+ return 0;
+
++ wait = kzalloc(sizeof(*wait), GFP_KERNEL);
++ if (!wait)
++ return -ENOMEM;
++
++ init_completion(&wait->completion);
++
+ rcu_read_lock();
+ sta = ieee80211_find_sta(vif, vif->cfg.ap_addr);
+ if (!sta) {
+@@ -3449,6 +3450,8 @@ int rtw89_core_send_nullfunc(struct rtw8
+ goto out;
+ }
+
++ wait->skb = skb;
++
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (ps)
+ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
+@@ -3460,7 +3463,8 @@ int rtw89_core_send_nullfunc(struct rtw8
+ goto out;
+ }
+
+- ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true);
++ ret = rtw89_core_tx_write_link(rtwdev, rtwvif_link, rtwsta_link, skb, &qsel, true,
++ wait);
+ if (ret) {
+ rtw89_warn(rtwdev, "nullfunc transmit failed: %d\n", ret);
+ dev_kfree_skb_any(skb);
+@@ -3469,10 +3473,11 @@ int rtw89_core_send_nullfunc(struct rtw8
+
+ rcu_read_unlock();
+
+- return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, qsel,
++ return rtw89_core_tx_kick_off_and_wait(rtwdev, skb, wait, qsel,
+ timeout);
+ out:
+ rcu_read_unlock();
++ kfree(wait);
+
+ return ret;
+ }
+--- a/drivers/net/wireless/realtek/rtw89/core.h
++++ b/drivers/net/wireless/realtek/rtw89/core.h
+@@ -7389,7 +7389,8 @@ int rtw89_h2c_tx(struct rtw89_dev *rtwde
+ struct sk_buff *skb, bool fwdl);
+ void rtw89_core_tx_kick_off(struct rtw89_dev *rtwdev, u8 qsel);
+ int rtw89_core_tx_kick_off_and_wait(struct rtw89_dev *rtwdev, struct sk_buff *skb,
+- int qsel, unsigned int timeout);
++ struct rtw89_tx_wait_info *wait, int qsel,
++ unsigned int timeout);
+ void rtw89_core_fill_txdesc(struct rtw89_dev *rtwdev,
+ struct rtw89_tx_desc_info *desc_info,
+ void *txdesc);
+--- a/drivers/net/wireless/realtek/rtw89/pci.c
++++ b/drivers/net/wireless/realtek/rtw89/pci.c
+@@ -1372,7 +1372,6 @@ static int rtw89_pci_txwd_submit(struct
+ struct pci_dev *pdev = rtwpci->pdev;
+ struct sk_buff *skb = tx_req->skb;
+ struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb);
+- struct rtw89_tx_skb_data *skb_data = RTW89_TX_SKB_CB(skb);
+ bool en_wd_info = desc_info->en_wd_info;
+ u32 txwd_len;
+ u32 txwp_len;
+@@ -1388,7 +1387,6 @@ static int rtw89_pci_txwd_submit(struct
+ }
+
+ tx_data->dma = dma;
+- rcu_assign_pointer(skb_data->wait, NULL);
+
+ txwp_len = sizeof(*txwp_info);
+ txwd_len = chip->txwd_body_size;