]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for all trees
authorSasha Levin <sashal@kernel.org>
Wed, 26 Nov 2025 20:35:29 +0000 (15:35 -0500)
committerSasha Levin <sashal@kernel.org>
Wed, 26 Nov 2025 20:35:29 +0000 (15:35 -0500)
Signed-off-by: Sasha Levin <sashal@kernel.org>
23 files changed:
queue-5.10/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch [new file with mode: 0644]
queue-5.10/series
queue-5.15/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch [new file with mode: 0644]
queue-5.15/mm-mprotect-use-long-for-page-accountings-and-retval.patch [new file with mode: 0644]
queue-5.15/mm-secretmem-fix-use-after-free-race-in-fault-handle.patch [new file with mode: 0644]
queue-5.15/series
queue-5.4/mm-page_alloc-fix-hash-table-order-logging-in-alloc_.patch [new file with mode: 0644]
queue-5.4/series
queue-6.1/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch [new file with mode: 0644]
queue-6.1/mm-secretmem-fix-use-after-free-race-in-fault-handle.patch [new file with mode: 0644]
queue-6.1/series
queue-6.12/alsa-usb-audio-fix-missing-unlock-at-error-path-of-m.patch [new file with mode: 0644]
queue-6.12/drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch [new file with mode: 0644]
queue-6.12/kvm-arm64-make-all-32bit-id-registers-fully-writable.patch [new file with mode: 0644]
queue-6.12/revert-rdma-irdma-update-kconfig.patch [new file with mode: 0644]
queue-6.12/s390-mm-fix-__ptep_rdp-inline-assembly.patch [new file with mode: 0644]
queue-6.12/series
queue-6.17/drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch [new file with mode: 0644]
queue-6.17/series
queue-6.6/f2fs-compress-change-the-first-parameter-of-page_arr.patch [new file with mode: 0644]
queue-6.6/f2fs-compress-fix-uaf-of-f2fs_inode_info-in-f2fs_fre.patch [new file with mode: 0644]
queue-6.6/s390-mm-fix-__ptep_rdp-inline-assembly.patch [new file with mode: 0644]
queue-6.6/series

diff --git a/queue-5.10/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch b/queue-5.10/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch
new file mode 100644 (file)
index 0000000..7da9f4b
--- /dev/null
@@ -0,0 +1,55 @@
+From d671391fb6c0c1afe8f27f9e3aeb783a8db31f9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 21:40:41 +0200
+Subject: mm/mm_init: fix hash table order logging in alloc_large_system_hash()
+
+From: Isaac J. Manjarres <isaacmanjarres@google.com>
+
+[ Upstream commit 0d6c356dd6547adac2b06b461528e3573f52d953 ]
+
+When emitting the order of the allocation for a hash table,
+alloc_large_system_hash() unconditionally subtracts PAGE_SHIFT from log
+base 2 of the allocation size.  This is not correct if the allocation size
+is smaller than a page, and yields a negative value for the order as seen
+below:
+
+TCP established hash table entries: 32 (order: -4, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: -2, 1024 bytes, linear)
+
+Use get_order() to compute the order when emitting the hash table
+information to correctly handle cases where the allocation size is smaller
+than a page:
+
+TCP established hash table entries: 32 (order: 0, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: 0, 1024 bytes, linear)
+
+Link: https://lkml.kernel.org/r/20251028191020.413002-1-isaacmanjarres@google.com
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Isaac J. Manjarres <isaacmanjarres@google.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 0d6c356dd6547adac2b06b461528e3573f52d953)
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index d906c6b961815..495a350c90a52 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -8372,7 +8372,7 @@ void *__init alloc_large_system_hash(const char *tablename,
+               panic("Failed to allocate %s hash table\n", tablename);
+       pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
+-              tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
++              tablename, 1UL << log2qty, get_order(size), size,
+               virt ? "vmalloc" : "linear");
+       if (_hash_shift)
+-- 
+2.51.0
+
index bb84108286832f8f03f9872fa3993a7de942fc17..60f6d285d2367b236e6f0c701c19733ca3547679 100644 (file)
@@ -232,3 +232,4 @@ scsi-core-fix-a-regression-triggered-by-scsi_host_bu.patch
 net-tls-cancel-rx-async-resync-request-on-rcd_delta-.patch
 kconfig-mconf-initialize-the-default-locale-at-start.patch
 kconfig-nconf-initialize-the-default-locale-at-start.patch
+mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch
diff --git a/queue-5.15/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch b/queue-5.15/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch
new file mode 100644 (file)
index 0000000..a570475
--- /dev/null
@@ -0,0 +1,55 @@
+From ba052959a898381d2ac3177e835aa5cd2645fd39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 21:36:25 +0200
+Subject: mm/mm_init: fix hash table order logging in alloc_large_system_hash()
+
+From: Isaac J. Manjarres <isaacmanjarres@google.com>
+
+[ Upstream commit 0d6c356dd6547adac2b06b461528e3573f52d953 ]
+
+When emitting the order of the allocation for a hash table,
+alloc_large_system_hash() unconditionally subtracts PAGE_SHIFT from log
+base 2 of the allocation size.  This is not correct if the allocation size
+is smaller than a page, and yields a negative value for the order as seen
+below:
+
+TCP established hash table entries: 32 (order: -4, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: -2, 1024 bytes, linear)
+
+Use get_order() to compute the order when emitting the hash table
+information to correctly handle cases where the allocation size is smaller
+than a page:
+
+TCP established hash table entries: 32 (order: 0, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: 0, 1024 bytes, linear)
+
+Link: https://lkml.kernel.org/r/20251028191020.413002-1-isaacmanjarres@google.com
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Isaac J. Manjarres <isaacmanjarres@google.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 0d6c356dd6547adac2b06b461528e3573f52d953)
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 63e131dc2b43e..0a5e9a4b923cb 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -8921,7 +8921,7 @@ void *__init alloc_large_system_hash(const char *tablename,
+               panic("Failed to allocate %s hash table\n", tablename);
+       pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
+-              tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
++              tablename, 1UL << log2qty, get_order(size), size,
+               virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
+       if (_hash_shift)
+-- 
+2.51.0
+
diff --git a/queue-5.15/mm-mprotect-use-long-for-page-accountings-and-retval.patch b/queue-5.15/mm-mprotect-use-long-for-page-accountings-and-retval.patch
new file mode 100644 (file)
index 0000000..1aae617
--- /dev/null
@@ -0,0 +1,232 @@
+From 696fdaaedd996b3a8b61ceea3c10b3a3ac375943 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Nov 2025 13:46:45 +0900
+Subject: mm/mprotect: use long for page accountings and retval
+
+From: Peter Xu <peterx@redhat.com>
+
+commit a79390f5d6a78647fd70856bd42b22d994de0ba2 upstream.
+
+Switch to use type "long" for page accountings and retval across the whole
+procedure of change_protection().
+
+The change should have shrinked the possible maximum page number to be
+half comparing to previous (ULONG_MAX / 2), but it shouldn't overflow on
+any system either because the maximum possible pages touched by change
+protection should be ULONG_MAX / PAGE_SIZE.
+
+Two reasons to switch from "unsigned long" to "long":
+
+  1. It suites better on count_vm_numa_events(), whose 2nd parameter takes
+     a long type.
+
+  2. It paves way for returning negative (error) values in the future.
+
+Currently the only caller that consumes this retval is change_prot_numa(),
+where the unsigned long was converted to an int.  Since at it, touching up
+the numa code to also take a long, so it'll avoid any possible overflow
+too during the int-size convertion.
+
+Link: https://lkml.kernel.org/r/20230104225207.1066932-3-peterx@redhat.com
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Acked-by: Mike Kravetz <mike.kravetz@oracle.com>
+Acked-by: James Houghton <jthoughton@google.com>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: Axel Rasmussen <axelrasmussen@google.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: Muchun Song <songmuchun@bytedance.com>
+Cc: Nadav Amit <nadav.amit@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ Adjust context ]
+Signed-off-by: Harry Yoo <harry.yoo@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/hugetlb.h |  4 ++--
+ include/linux/mm.h      |  2 +-
+ mm/hugetlb.c            |  4 ++--
+ mm/mempolicy.c          |  2 +-
+ mm/mprotect.c           | 26 +++++++++++++-------------
+ 5 files changed, 19 insertions(+), 19 deletions(-)
+
+diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
+index 60572d423586e..ca26849a8e359 100644
+--- a/include/linux/hugetlb.h
++++ b/include/linux/hugetlb.h
+@@ -208,7 +208,7 @@ struct page *follow_huge_pgd(struct mm_struct *mm, unsigned long address,
+ int pmd_huge(pmd_t pmd);
+ int pud_huge(pud_t pud);
+-unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
++long hugetlb_change_protection(struct vm_area_struct *vma,
+               unsigned long address, unsigned long end, pgprot_t newprot);
+ bool is_hugetlb_entry_migration(pte_t pte);
+@@ -379,7 +379,7 @@ static inline void move_hugetlb_state(struct page *oldpage,
+ {
+ }
+-static inline unsigned long hugetlb_change_protection(
++static inline long hugetlb_change_protection(
+                       struct vm_area_struct *vma, unsigned long address,
+                       unsigned long end, pgprot_t newprot)
+ {
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index 071dd864a7b2b..4a9ebd495ec91 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1910,7 +1910,7 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ #define  MM_CP_UFFD_WP_ALL                 (MM_CP_UFFD_WP | \
+                                           MM_CP_UFFD_WP_RESOLVE)
+-extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
++extern long change_protection(struct vm_area_struct *vma, unsigned long start,
+                             unsigned long end, pgprot_t newprot,
+                             unsigned long cp_flags);
+ extern int mprotect_fixup(struct vm_area_struct *vma,
+diff --git a/mm/hugetlb.c b/mm/hugetlb.c
+index 70ceac102a8db..d583f9394be5f 100644
+--- a/mm/hugetlb.c
++++ b/mm/hugetlb.c
+@@ -5644,7 +5644,7 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
+       return i ? i : err;
+ }
+-unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
++long hugetlb_change_protection(struct vm_area_struct *vma,
+               unsigned long address, unsigned long end, pgprot_t newprot)
+ {
+       struct mm_struct *mm = vma->vm_mm;
+@@ -5652,7 +5652,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
+       pte_t *ptep;
+       pte_t pte;
+       struct hstate *h = hstate_vma(vma);
+-      unsigned long pages = 0;
++      long pages = 0;
+       bool shared_pmd = false;
+       struct mmu_notifier_range range;
+diff --git a/mm/mempolicy.c b/mm/mempolicy.c
+index f089de8564cad..3d984d070e3fe 100644
+--- a/mm/mempolicy.c
++++ b/mm/mempolicy.c
+@@ -634,7 +634,7 @@ static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
+ unsigned long change_prot_numa(struct vm_area_struct *vma,
+                       unsigned long addr, unsigned long end)
+ {
+-      int nr_updated;
++      long nr_updated;
+       nr_updated = change_protection(vma, addr, end, PAGE_NONE, MM_CP_PROT_NUMA);
+       if (nr_updated)
+diff --git a/mm/mprotect.c b/mm/mprotect.c
+index ed18dc49533f6..58822900c6d65 100644
+--- a/mm/mprotect.c
++++ b/mm/mprotect.c
+@@ -35,13 +35,13 @@
+ #include "internal.h"
+-static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
++static long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
+               unsigned long addr, unsigned long end, pgprot_t newprot,
+               unsigned long cp_flags)
+ {
+       pte_t *pte, oldpte;
+       spinlock_t *ptl;
+-      unsigned long pages = 0;
++      long pages = 0;
+       int target_node = NUMA_NO_NODE;
+       bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
+       bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
+@@ -219,13 +219,13 @@ static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
+       return 0;
+ }
+-static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
++static inline long change_pmd_range(struct vm_area_struct *vma,
+               pud_t *pud, unsigned long addr, unsigned long end,
+               pgprot_t newprot, unsigned long cp_flags)
+ {
+       pmd_t *pmd;
+       unsigned long next;
+-      unsigned long pages = 0;
++      long pages = 0;
+       unsigned long nr_huge_updates = 0;
+       struct mmu_notifier_range range;
+@@ -233,7 +233,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+       pmd = pmd_offset(pud, addr);
+       do {
+-              unsigned long this_pages;
++              long this_pages;
+               next = pmd_addr_end(addr, end);
+@@ -291,13 +291,13 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
+       return pages;
+ }
+-static inline unsigned long change_pud_range(struct vm_area_struct *vma,
++static inline long change_pud_range(struct vm_area_struct *vma,
+               p4d_t *p4d, unsigned long addr, unsigned long end,
+               pgprot_t newprot, unsigned long cp_flags)
+ {
+       pud_t *pud;
+       unsigned long next;
+-      unsigned long pages = 0;
++      long pages = 0;
+       pud = pud_offset(p4d, addr);
+       do {
+@@ -311,13 +311,13 @@ static inline unsigned long change_pud_range(struct vm_area_struct *vma,
+       return pages;
+ }
+-static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
++static inline long change_p4d_range(struct vm_area_struct *vma,
+               pgd_t *pgd, unsigned long addr, unsigned long end,
+               pgprot_t newprot, unsigned long cp_flags)
+ {
+       p4d_t *p4d;
+       unsigned long next;
+-      unsigned long pages = 0;
++      long pages = 0;
+       p4d = p4d_offset(pgd, addr);
+       do {
+@@ -331,7 +331,7 @@ static inline unsigned long change_p4d_range(struct vm_area_struct *vma,
+       return pages;
+ }
+-static unsigned long change_protection_range(struct vm_area_struct *vma,
++static long change_protection_range(struct vm_area_struct *vma,
+               unsigned long addr, unsigned long end, pgprot_t newprot,
+               unsigned long cp_flags)
+ {
+@@ -339,7 +339,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
+       pgd_t *pgd;
+       unsigned long next;
+       unsigned long start = addr;
+-      unsigned long pages = 0;
++      long pages = 0;
+       BUG_ON(addr >= end);
+       pgd = pgd_offset(mm, addr);
+@@ -361,11 +361,11 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
+       return pages;
+ }
+-unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
++long change_protection(struct vm_area_struct *vma, unsigned long start,
+                      unsigned long end, pgprot_t newprot,
+                      unsigned long cp_flags)
+ {
+-      unsigned long pages;
++      long pages;
+       BUG_ON((cp_flags & MM_CP_UFFD_WP_ALL) == MM_CP_UFFD_WP_ALL);
+-- 
+2.51.0
+
diff --git a/queue-5.15/mm-secretmem-fix-use-after-free-race-in-fault-handle.patch b/queue-5.15/mm-secretmem-fix-use-after-free-race-in-fault-handle.patch
new file mode 100644 (file)
index 0000000..197be54
--- /dev/null
@@ -0,0 +1,69 @@
+From 79dd1827156f782214231c026f4d98ddf0bb60cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 21:38:21 +0200
+Subject: mm/secretmem: fix use-after-free race in fault handler
+
+From: Lance Yang <lance.yang@linux.dev>
+
+[ Upstream commit 6f86d0534fddfbd08687fa0f01479d4226bc3c3d ]
+
+When a page fault occurs in a secret memory file created with
+`memfd_secret(2)`, the kernel will allocate a new page for it, mark the
+underlying page as not-present in the direct map, and add it to the file
+mapping.
+
+If two tasks cause a fault in the same page concurrently, both could end
+up allocating a page and removing the page from the direct map, but only
+one would succeed in adding the page to the file mapping.  The task that
+failed undoes the effects of its attempt by (a) freeing the page again
+and (b) putting the page back into the direct map.  However, by doing
+these two operations in this order, the page becomes available to the
+allocator again before it is placed back in the direct mapping.
+
+If another task attempts to allocate the page between (a) and (b), and the
+kernel tries to access it via the direct map, it would result in a
+supervisor not-present page fault.
+
+Fix the ordering to restore the direct map before the page is freed.
+
+Link: https://lkml.kernel.org/r/20251031120955.92116-1-lance.yang@linux.dev
+Fixes: 1507f51255c9 ("mm: introduce memfd_secret system call to create "secret" memory areas")
+Signed-off-by: Lance Yang <lance.yang@linux.dev>
+Reported-by: Google Big Sleep <big-sleep-vuln-reports@google.com>
+Closes: https://lore.kernel.org/linux-mm/CAEXGt5QeDpiHTu3K9tvjUTPqo+d-=wuCNYPa+6sWKrdQJ-ATdg@mail.gmail.com/
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 6f86d0534fddfbd08687fa0f01479d4226bc3c3d)
+[rppt: replaced folio with page in the patch and in the changelog]
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/secretmem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/secretmem.c b/mm/secretmem.c
+index 624663a948083..0c86133ad33fe 100644
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -82,13 +82,13 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
+               __SetPageUptodate(page);
+               err = add_to_page_cache_lru(page, mapping, offset, gfp);
+               if (unlikely(err)) {
+-                      put_page(page);
+                       /*
+                        * If a split of large page was required, it
+                        * already happened when we marked the page invalid
+                        * which guarantees that this call won't fail
+                        */
+                       set_direct_map_default_noflush(page);
++                      put_page(page);
+                       if (err == -EEXIST)
+                               goto retry;
+-- 
+2.51.0
+
index 4b2d3769057e03c46bf863979425c1a697711387..eb98984699cb7cb7c6c66f7fa5a69c26c735e3e1 100644 (file)
@@ -305,3 +305,6 @@ selftests-net-use-bash-for-bareudp-testing.patch
 net-tls-cancel-rx-async-resync-request-on-rcd_delta-.patch
 kconfig-mconf-initialize-the-default-locale-at-start.patch
 kconfig-nconf-initialize-the-default-locale-at-start.patch
+mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch
+mm-mprotect-use-long-for-page-accountings-and-retval.patch
+mm-secretmem-fix-use-after-free-race-in-fault-handle.patch
diff --git a/queue-5.4/mm-page_alloc-fix-hash-table-order-logging-in-alloc_.patch b/queue-5.4/mm-page_alloc-fix-hash-table-order-logging-in-alloc_.patch
new file mode 100644 (file)
index 0000000..b9df266
--- /dev/null
@@ -0,0 +1,56 @@
+From 497beb312fb8433946b9909eca8f15a1cd7430b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 21:34:22 +0200
+Subject: mm/page_alloc: fix hash table order logging in
+ alloc_large_system_hash()
+
+From: Isaac J. Manjarres <isaacmanjarres@google.com>
+
+[ Upstream commit 0d6c356dd6547adac2b06b461528e3573f52d953 ]
+
+When emitting the order of the allocation for a hash table,
+alloc_large_system_hash() unconditionally subtracts PAGE_SHIFT from log
+base 2 of the allocation size.  This is not correct if the allocation size
+is smaller than a page, and yields a negative value for the order as seen
+below:
+
+TCP established hash table entries: 32 (order: -4, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: -2, 1024 bytes, linear)
+
+Use get_order() to compute the order when emitting the hash table
+information to correctly handle cases where the allocation size is smaller
+than a page:
+
+TCP established hash table entries: 32 (order: 0, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: 0, 1024 bytes, linear)
+
+Link: https://lkml.kernel.org/r/20251028191020.413002-1-isaacmanjarres@google.com
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Isaac J. Manjarres <isaacmanjarres@google.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 0d6c356dd6547adac2b06b461528e3573f52d953)
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 66e4b78786a97..111f50054fc18 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -8275,7 +8275,7 @@ void *__init alloc_large_system_hash(const char *tablename,
+               panic("Failed to allocate %s hash table\n", tablename);
+       pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
+-              tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
++              tablename, 1UL << log2qty, get_order(size), size,
+               virt ? "vmalloc" : "linear");
+       if (_hash_shift)
+-- 
+2.51.0
+
index 9afe85e63638a7c11e880229efe416a3ef05b7de..57d0a1726fd5f8bdc3757cf493003792c0ef6d7d 100644 (file)
@@ -169,3 +169,4 @@ s390-ctcm-fix-double-kfree.patch
 vsock-ignore-signal-timeout-on-connect-if-already-es.patch
 kconfig-mconf-initialize-the-default-locale-at-start.patch
 kconfig-nconf-initialize-the-default-locale-at-start.patch
+mm-page_alloc-fix-hash-table-order-logging-in-alloc_.patch
diff --git a/queue-6.1/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch b/queue-6.1/mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch
new file mode 100644 (file)
index 0000000..75c1678
--- /dev/null
@@ -0,0 +1,55 @@
+From 49dbcc4c4b4954090026a6048f806d1d3d40f3b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 21:42:22 +0200
+Subject: mm/mm_init: fix hash table order logging in alloc_large_system_hash()
+
+From: Isaac J. Manjarres <isaacmanjarres@google.com>
+
+[ Upstream commit 0d6c356dd6547adac2b06b461528e3573f52d953 ]
+
+When emitting the order of the allocation for a hash table,
+alloc_large_system_hash() unconditionally subtracts PAGE_SHIFT from log
+base 2 of the allocation size.  This is not correct if the allocation size
+is smaller than a page, and yields a negative value for the order as seen
+below:
+
+TCP established hash table entries: 32 (order: -4, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: -2, 1024 bytes, linear)
+
+Use get_order() to compute the order when emitting the hash table
+information to correctly handle cases where the allocation size is smaller
+than a page:
+
+TCP established hash table entries: 32 (order: 0, 256 bytes, linear) TCP
+bind hash table entries: 32 (order: 0, 1024 bytes, linear)
+
+Link: https://lkml.kernel.org/r/20251028191020.413002-1-isaacmanjarres@google.com
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Isaac J. Manjarres <isaacmanjarres@google.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+(cherry picked from commit 0d6c356dd6547adac2b06b461528e3573f52d953)
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/page_alloc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/page_alloc.c b/mm/page_alloc.c
+index 86066a2cf258a..d760b96604eca 100644
+--- a/mm/page_alloc.c
++++ b/mm/page_alloc.c
+@@ -9225,7 +9225,7 @@ void *__init alloc_large_system_hash(const char *tablename,
+               panic("Failed to allocate %s hash table\n", tablename);
+       pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
+-              tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
++              tablename, 1UL << log2qty, get_order(size), size,
+               virt ? (huge ? "vmalloc hugepage" : "vmalloc") : "linear");
+       if (_hash_shift)
+-- 
+2.51.0
+
diff --git a/queue-6.1/mm-secretmem-fix-use-after-free-race-in-fault-handle.patch b/queue-6.1/mm-secretmem-fix-use-after-free-race-in-fault-handle.patch
new file mode 100644 (file)
index 0000000..f116b00
--- /dev/null
@@ -0,0 +1,68 @@
+From 7fedea9d2c8998bb830310da4501570a044322da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Nov 2025 21:15:47 +0200
+Subject: mm/secretmem: fix use-after-free race in fault handler
+
+From: Lance Yang <lance.yang@linux.dev>
+
+[ Upstream commit 6f86d0534fddfbd08687fa0f01479d4226bc3c3d ]
+
+When a page fault occurs in a secret memory file created with
+`memfd_secret(2)`, the kernel will allocate a new page for it, mark the
+underlying page as not-present in the direct map, and add it to the file
+mapping.
+
+If two tasks cause a fault in the same page concurrently, both could end
+up allocating a page and removing the page from the direct map, but only
+one would succeed in adding the page to the file mapping.  The task that
+failed undoes the effects of its attempt by (a) freeing the page again
+and (b) putting the page back into the direct map.  However, by doing
+these two operations in this order, the page becomes available to the
+allocator again before it is placed back in the direct mapping.
+
+If another task attempts to allocate the page between (a) and (b), and the
+kernel tries to access it via the direct map, it would result in a
+supervisor not-present page fault.
+
+Fix the ordering to restore the direct map before the page is freed.
+
+Link: https://lkml.kernel.org/r/20251031120955.92116-1-lance.yang@linux.dev
+Fixes: 1507f51255c9 ("mm: introduce memfd_secret system call to create "secret" memory areas")
+Signed-off-by: Lance Yang <lance.yang@linux.dev>
+Reported-by: Google Big Sleep <big-sleep-vuln-reports@google.com>
+Closes: https://lore.kernel.org/linux-mm/CAEXGt5QeDpiHTu3K9tvjUTPqo+d-=wuCNYPa+6sWKrdQJ-ATdg@mail.gmail.com/
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[rppt: replaced folio with page in the patch and in the changelog]
+Signed-off-by: Mike Rapoport (Microsoft) <rppt@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/secretmem.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/secretmem.c b/mm/secretmem.c
+index 18954eae995fc..b570a6e25b6be 100644
+--- a/mm/secretmem.c
++++ b/mm/secretmem.c
+@@ -82,13 +82,13 @@ static vm_fault_t secretmem_fault(struct vm_fault *vmf)
+               __SetPageUptodate(page);
+               err = add_to_page_cache_lru(page, mapping, offset, gfp);
+               if (unlikely(err)) {
+-                      put_page(page);
+                       /*
+                        * If a split of large page was required, it
+                        * already happened when we marked the page invalid
+                        * which guarantees that this call won't fail
+                        */
+                       set_direct_map_default_noflush(page);
++                      put_page(page);
+                       if (err == -EEXIST)
+                               goto retry;
+-- 
+2.51.0
+
index d608ff9b4a5f0fb0682aff608b2285ea0be097ab..116c765d1802b01090b2235fc7e675e0bebb4753 100644 (file)
@@ -471,3 +471,5 @@ selftests-net-use-bash-for-bareudp-testing.patch
 net-tls-cancel-rx-async-resync-request-on-rcd_delta-.patch
 kconfig-mconf-initialize-the-default-locale-at-start.patch
 kconfig-nconf-initialize-the-default-locale-at-start.patch
+mm-secretmem-fix-use-after-free-race-in-fault-handle.patch
+mm-mm_init-fix-hash-table-order-logging-in-alloc_lar.patch
diff --git a/queue-6.12/alsa-usb-audio-fix-missing-unlock-at-error-path-of-m.patch b/queue-6.12/alsa-usb-audio-fix-missing-unlock-at-error-path-of-m.patch
new file mode 100644 (file)
index 0000000..0cb4f12
--- /dev/null
@@ -0,0 +1,44 @@
+From 5f192312f29bd2ab0e70cc1ed79af7ea76883289 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 26 Nov 2025 11:08:31 +0100
+Subject: ALSA: usb-audio: Fix missing unlock at error path of maxpacksize
+ check
+
+From: Takashi Iwai <tiwai@suse.de>
+
+The recent backport of the upstream commit 05a1fc5efdd8 ("ALSA:
+usb-audio: Fix potential overflow of PCM transfer buffer") on the
+older stable kernels like 6.12.y was broken since it doesn't consider
+the mutex unlock, where the upstream code manages with guard().
+In the older code, we still need an explicit unlock.
+
+This is a fix that corrects the error path, applied only on old stable
+trees.
+
+Reported-by: Pavel Machek <pavel@denx.de>
+Closes: https://lore.kernel.org/aSWtH0AZH5+aeb+a@duo.ucw.cz
+Fixes: 98e9d5e33bda ("ALSA: usb-audio: Fix potential overflow of PCM transfer buffer")
+Reviewed-by: Pavel Machek <pavel@denx.de>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/endpoint.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
+index 7238f65cbcfff..aa201e4744bf6 100644
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -1389,7 +1389,8 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
+       if (ep->packsize[1] > ep->maxpacksize) {
+               usb_audio_dbg(chip, "Too small maxpacksize %u for rate %u / pps %u\n",
+                             ep->maxpacksize, ep->cur_rate, ep->pps);
+-              return -EINVAL;
++              err = -EINVAL;
++              goto unlock;
+       }
+       /* calculate the frequency in 16.16 format */
+-- 
+2.51.0
+
diff --git a/queue-6.12/drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch b/queue-6.12/drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch
new file mode 100644 (file)
index 0000000..008920f
--- /dev/null
@@ -0,0 +1,69 @@
+From 7c90692a8ffc9d96be2d1c2f5e05d49ed7aa49c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Nov 2025 22:28:28 +0000
+Subject: drm/xe: Prevent BIT() overflow when handling invalid prefetch region
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit d52dea485cd3c98cfeeb474cf66cf95df2ab142f ]
+
+If user provides a large value (such as 0x80) for parameter
+prefetch_mem_region_instance in vm_bind ioctl, it will cause
+BIT(prefetch_region) overflow as below:
+"
+ ------------[ cut here ]------------
+ UBSAN: shift-out-of-bounds in drivers/gpu/drm/xe/xe_vm.c:3414:7
+ shift exponent 128 is too large for 64-bit type 'long unsigned int'
+ CPU: 8 UID: 0 PID: 53120 Comm: xe_exec_system_ Tainted: G        W           6.18.0-rc1-lgci-xe-kernel+ #200 PREEMPT(voluntary)
+ Tainted: [W]=WARN
+ Hardware name: ASUS System Product Name/PRIME Z790-P WIFI, BIOS 0812 02/24/2023
+ Call Trace:
+  <TASK>
+  dump_stack_lvl+0xa0/0xc0
+  dump_stack+0x10/0x20
+  ubsan_epilogue+0x9/0x40
+  __ubsan_handle_shift_out_of_bounds+0x10e/0x170
+  ? mutex_unlock+0x12/0x20
+  xe_vm_bind_ioctl.cold+0x20/0x3c [xe]
+ ...
+"
+Fix it by validating prefetch_region before the BIT() usage.
+
+v2: Add Closes and Cc stable kernels. (Matt)
+
+Reported-by: Koen Koning <koen.koning@intel.com>
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6478
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patch.msgid.link/20251112181005.2120521-2-shuicheng.lin@intel.com
+(cherry picked from commit 8f565bdd14eec5611cc041dba4650e42ccdf71d9)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+(cherry picked from commit d52dea485cd3c98cfeeb474cf66cf95df2ab142f)
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index fc5f0e1351932..30625ce691fa2 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -2903,8 +2903,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe,
+                                op == DRM_XE_VM_BIND_OP_PREFETCH) ||
+                   XE_IOCTL_DBG(xe, prefetch_region &&
+                                op != DRM_XE_VM_BIND_OP_PREFETCH) ||
+-                  XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
+-                                     xe->info.mem_region_mask)) ||
++                  XE_IOCTL_DBG(xe, prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
++                               !(BIT(prefetch_region) & xe->info.mem_region_mask)) ||
+                   XE_IOCTL_DBG(xe, obj &&
+                                op == DRM_XE_VM_BIND_OP_UNMAP)) {
+                       err = -EINVAL;
+-- 
+2.51.0
+
diff --git a/queue-6.12/kvm-arm64-make-all-32bit-id-registers-fully-writable.patch b/queue-6.12/kvm-arm64-make-all-32bit-id-registers-fully-writable.patch
new file mode 100644 (file)
index 0000000..15df22c
--- /dev/null
@@ -0,0 +1,132 @@
+From e3412476dbb05a67cb9095df97e1421c3d6571a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 23 Nov 2025 10:39:09 +0000
+Subject: KVM: arm64: Make all 32bit ID registers fully writable
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 3f9eacf4f0705876a5d6526d7d320ca91d7d7a16 upstream.
+
+32bit ID registers aren't getting much love these days, and are
+often missed in updates. One of these updates broke restoring
+a GICv2 guest on a GICv3 machine.
+
+Instead of performing a piecemeal fix, just bite the bullet
+and make all 32bit ID regs fully writable. KVM itself never
+relies on them for anything, and if the VMM wants to mess up
+the guest, so be it.
+
+Fixes: 5cb57a1aff755 ("KVM: arm64: Zero ID_AA64PFR0_EL1.GIC when no GICv3 is presented to the guest")
+Reported-by: Peter Maydell <peter.maydell@linaro.org>
+Cc: stable@vger.kernel.org
+Reviewed-by: Oliver Upton <oupton@kernel.org>
+Link: https://patch.msgid.link/20251030122707.2033690-2-maz@kernel.org
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kvm/sys_regs.c | 61 ++++++++++++++++++++-------------------
+ 1 file changed, 32 insertions(+), 29 deletions(-)
+
+diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
+index 42791971f7588..5c09c788aaa61 100644
+--- a/arch/arm64/kvm/sys_regs.c
++++ b/arch/arm64/kvm/sys_regs.c
+@@ -2176,22 +2176,26 @@ static bool bad_redir_trap(struct kvm_vcpu *vcpu,
+       .val = 0,                               \
+ }
+-/* sys_reg_desc initialiser for known cpufeature ID registers */
+-#define AA32_ID_SANITISED(name) {             \
++/* sys_reg_desc initialiser for writable ID registers */
++#define ID_WRITABLE(name, mask) {             \
+       ID_DESC(name),                          \
+       .set_user = set_id_reg,                 \
+-      .visibility = aa32_id_visibility,       \
++      .visibility = id_visibility,            \
+       .reset = kvm_read_sanitised_id_reg,     \
+-      .val = 0,                               \
++      .val = mask,                            \
+ }
+-/* sys_reg_desc initialiser for writable ID registers */
+-#define ID_WRITABLE(name, mask) {             \
++/*
++ * 32bit ID regs are fully writable when the guest is 32bit
++ * capable. Nothing in the KVM code should rely on 32bit features
++ * anyway, only 64bit, so let the VMM do its worse.
++ */
++#define AA32_ID_WRITABLE(name) {              \
+       ID_DESC(name),                          \
+       .set_user = set_id_reg,                 \
+-      .visibility = id_visibility,            \
++      .visibility = aa32_id_visibility,       \
+       .reset = kvm_read_sanitised_id_reg,     \
+-      .val = mask,                            \
++      .val = GENMASK(31, 0),                  \
+ }
+ /*
+@@ -2380,40 +2384,39 @@ static const struct sys_reg_desc sys_reg_descs[] = {
+       /* AArch64 mappings of the AArch32 ID registers */
+       /* CRm=1 */
+-      AA32_ID_SANITISED(ID_PFR0_EL1),
+-      AA32_ID_SANITISED(ID_PFR1_EL1),
++      AA32_ID_WRITABLE(ID_PFR0_EL1),
++      AA32_ID_WRITABLE(ID_PFR1_EL1),
+       { SYS_DESC(SYS_ID_DFR0_EL1),
+         .access = access_id_reg,
+         .get_user = get_id_reg,
+         .set_user = set_id_dfr0_el1,
+         .visibility = aa32_id_visibility,
+         .reset = read_sanitised_id_dfr0_el1,
+-        .val = ID_DFR0_EL1_PerfMon_MASK |
+-               ID_DFR0_EL1_CopDbg_MASK, },
++        .val = GENMASK(31, 0), },
+       ID_HIDDEN(ID_AFR0_EL1),
+-      AA32_ID_SANITISED(ID_MMFR0_EL1),
+-      AA32_ID_SANITISED(ID_MMFR1_EL1),
+-      AA32_ID_SANITISED(ID_MMFR2_EL1),
+-      AA32_ID_SANITISED(ID_MMFR3_EL1),
++      AA32_ID_WRITABLE(ID_MMFR0_EL1),
++      AA32_ID_WRITABLE(ID_MMFR1_EL1),
++      AA32_ID_WRITABLE(ID_MMFR2_EL1),
++      AA32_ID_WRITABLE(ID_MMFR3_EL1),
+       /* CRm=2 */
+-      AA32_ID_SANITISED(ID_ISAR0_EL1),
+-      AA32_ID_SANITISED(ID_ISAR1_EL1),
+-      AA32_ID_SANITISED(ID_ISAR2_EL1),
+-      AA32_ID_SANITISED(ID_ISAR3_EL1),
+-      AA32_ID_SANITISED(ID_ISAR4_EL1),
+-      AA32_ID_SANITISED(ID_ISAR5_EL1),
+-      AA32_ID_SANITISED(ID_MMFR4_EL1),
+-      AA32_ID_SANITISED(ID_ISAR6_EL1),
++      AA32_ID_WRITABLE(ID_ISAR0_EL1),
++      AA32_ID_WRITABLE(ID_ISAR1_EL1),
++      AA32_ID_WRITABLE(ID_ISAR2_EL1),
++      AA32_ID_WRITABLE(ID_ISAR3_EL1),
++      AA32_ID_WRITABLE(ID_ISAR4_EL1),
++      AA32_ID_WRITABLE(ID_ISAR5_EL1),
++      AA32_ID_WRITABLE(ID_MMFR4_EL1),
++      AA32_ID_WRITABLE(ID_ISAR6_EL1),
+       /* CRm=3 */
+-      AA32_ID_SANITISED(MVFR0_EL1),
+-      AA32_ID_SANITISED(MVFR1_EL1),
+-      AA32_ID_SANITISED(MVFR2_EL1),
++      AA32_ID_WRITABLE(MVFR0_EL1),
++      AA32_ID_WRITABLE(MVFR1_EL1),
++      AA32_ID_WRITABLE(MVFR2_EL1),
+       ID_UNALLOCATED(3,3),
+-      AA32_ID_SANITISED(ID_PFR2_EL1),
++      AA32_ID_WRITABLE(ID_PFR2_EL1),
+       ID_HIDDEN(ID_DFR1_EL1),
+-      AA32_ID_SANITISED(ID_MMFR5_EL1),
++      AA32_ID_WRITABLE(ID_MMFR5_EL1),
+       ID_UNALLOCATED(3,7),
+       /* AArch64 ID registers */
+-- 
+2.51.0
+
diff --git a/queue-6.12/revert-rdma-irdma-update-kconfig.patch b/queue-6.12/revert-rdma-irdma-update-kconfig.patch
new file mode 100644 (file)
index 0000000..b6129a9
--- /dev/null
@@ -0,0 +1,49 @@
+From a784b0180fca3a63e47f394d261c9a2562f31102 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Nov 2025 10:14:59 +0800
+Subject: Revert "RDMA/irdma: Update Kconfig"
+
+From: Wentao Guan <guanwentao@uniontech.com>
+
+Revert commit 8ced3cb73ccd20e744deab7b49f2b7468c984eb2 which is upstream
+commit 060842fed53f77a73824c9147f51dc6746c1267a
+
+It causes regression in 6.12.58 stable, no issues in upstream.
+
+The Kconfig dependency change 060842fed53f ("RDMA/irdma: Update Kconfig")
+went in linux kernel 6.18 where RDMA IDPF support was merged.
+
+Even though IDPF driver exists in older kernels, it doesn't provide RDMA
+support so there is no need for IRDMA to depend on IDPF in kernels <= 6.17.
+
+Link: https://lore.kernel.org/all/IA1PR11MB7727692DE0ECFE84E9B52F02CBD5A@IA1PR11MB7727.namprd11.prod.outlook.com/
+Link: https://lore.kernel.org/all/IA1PR11MB772718B36A3B27D2F07B0109CBD5A@IA1PR11MB7727.namprd11.prod.outlook.com/
+Cc: stable@vger.kernel.org # v6.12.58
+Signed-off-by: Tatyana Nikolova <tatyana.e.nikolova@intel.com>
+Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/irdma/Kconfig | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/irdma/Kconfig b/drivers/infiniband/hw/irdma/Kconfig
+index 41660203e0049..b6f9c41bca51d 100644
+--- a/drivers/infiniband/hw/irdma/Kconfig
++++ b/drivers/infiniband/hw/irdma/Kconfig
+@@ -4,10 +4,9 @@ config INFINIBAND_IRDMA
+       depends on INET
+       depends on IPV6 || !IPV6
+       depends on PCI
+-      depends on IDPF && ICE && I40E
++      depends on ICE && I40E
+       select GENERIC_ALLOCATOR
+       select AUXILIARY_BUS
+       help
+-        This is an Intel(R) Ethernet Protocol Driver for RDMA that
+-        supports IPU E2000 (RoCEv2), E810 (iWARP/RoCEv2) and X722 (iWARP)
+-        network devices.
++        This is an Intel(R) Ethernet Protocol Driver for RDMA driver
++        that support E810 (iWARP/RoCE) and X722 (iWARP) network devices.
+-- 
+2.51.0
+
diff --git a/queue-6.12/s390-mm-fix-__ptep_rdp-inline-assembly.patch b/queue-6.12/s390-mm-fix-__ptep_rdp-inline-assembly.patch
new file mode 100644 (file)
index 0000000..a29dc89
--- /dev/null
@@ -0,0 +1,87 @@
+From 98605e5e4be098a5b58a8915291a89278f0267e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Nov 2025 11:46:36 +0100
+Subject: s390/mm: Fix __ptep_rdp() inline assembly
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 31475b88110c4725b4f9a79c3a0d9bbf97e69e1c ]
+
+When a zero ASCE is passed to the __ptep_rdp() inline assembly, the
+generated instruction should have the R3 field of the instruction set to
+zero. However the inline assembly is written incorrectly: for such cases a
+zero is loaded into a register allocated by the compiler and this register
+is then used by the instruction.
+
+This means that selected TLB entries may not be flushed since the specified
+ASCE does not match the one which was used when the selected TLB entries
+were created.
+
+Fix this by removing the asce and opt parameters of __ptep_rdp(), since
+all callers always pass zero, and use a hard-coded register zero for
+the R3 field.
+
+Fixes: 0807b856521f ("s390/mm: add support for RDP (Reset DAT-Protection)")
+Cc: stable@vger.kernel.org
+Reviewed-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/pgtable.h | 12 +++++-------
+ arch/s390/mm/pgtable.c          |  4 ++--
+ 2 files changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index 5ee73f245a0c0..cf5a6af9cf41d 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1109,17 +1109,15 @@ static inline pte_t pte_mkhuge(pte_t pte)
+ #define IPTE_NODAT    0x400
+ #define IPTE_GUEST_ASCE       0x800
+-static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
+-                                     unsigned long opt, unsigned long asce,
+-                                     int local)
++static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local)
+ {
+       unsigned long pto;
+       pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
+-      asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
++      asm volatile(".insn     rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]"
+                    : "+m" (*ptep)
+-                   : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
+-                     [asce] "a" (asce), [m4] "i" (local));
++                   : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK),
++                     [m4] "i" (local));
+ }
+ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
+@@ -1303,7 +1301,7 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+        * A local RDP can be used to do the flush.
+        */
+       if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
+-              __ptep_rdp(address, ptep, 0, 0, 1);
++              __ptep_rdp(address, ptep, 1);
+ }
+ #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index b03c665d72426..8eba28b9975fe 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -293,9 +293,9 @@ void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+       preempt_disable();
+       atomic_inc(&mm->context.flush_count);
+       if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+-              __ptep_rdp(addr, ptep, 0, 0, 1);
++              __ptep_rdp(addr, ptep, 1);
+       else
+-              __ptep_rdp(addr, ptep, 0, 0, 0);
++              __ptep_rdp(addr, ptep, 0);
+       /*
+        * PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That
+        * means it is still valid and active, and must not be changed according
+-- 
+2.51.0
+
index f3c08c7c562a98a16ea6c53fffac289cd0aae698..abae70ebd70f1131cfb924426ce720a13c17642b 100644 (file)
@@ -90,3 +90,8 @@ blk-crypto-use-blk_sts_inval-for-alignment-errors.patch
 net-tls-cancel-rx-async-resync-request-on-rcd_delta-.patch
 kconfig-mconf-initialize-the-default-locale-at-start.patch
 kconfig-nconf-initialize-the-default-locale-at-start.patch
+alsa-usb-audio-fix-missing-unlock-at-error-path-of-m.patch
+kvm-arm64-make-all-32bit-id-registers-fully-writable.patch
+revert-rdma-irdma-update-kconfig.patch
+drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch
+s390-mm-fix-__ptep_rdp-inline-assembly.patch
diff --git a/queue-6.17/drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch b/queue-6.17/drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch
new file mode 100644 (file)
index 0000000..8ead573
--- /dev/null
@@ -0,0 +1,69 @@
+From 9163842e3e6ff62dc6b489a5221c2c5d217c316f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Nov 2025 22:16:10 +0000
+Subject: drm/xe: Prevent BIT() overflow when handling invalid prefetch region
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit d52dea485cd3c98cfeeb474cf66cf95df2ab142f ]
+
+If user provides a large value (such as 0x80) for parameter
+prefetch_mem_region_instance in vm_bind ioctl, it will cause
+BIT(prefetch_region) overflow as below:
+"
+ ------------[ cut here ]------------
+ UBSAN: shift-out-of-bounds in drivers/gpu/drm/xe/xe_vm.c:3414:7
+ shift exponent 128 is too large for 64-bit type 'long unsigned int'
+ CPU: 8 UID: 0 PID: 53120 Comm: xe_exec_system_ Tainted: G        W           6.18.0-rc1-lgci-xe-kernel+ #200 PREEMPT(voluntary)
+ Tainted: [W]=WARN
+ Hardware name: ASUS System Product Name/PRIME Z790-P WIFI, BIOS 0812 02/24/2023
+ Call Trace:
+  <TASK>
+  dump_stack_lvl+0xa0/0xc0
+  dump_stack+0x10/0x20
+  ubsan_epilogue+0x9/0x40
+  __ubsan_handle_shift_out_of_bounds+0x10e/0x170
+  ? mutex_unlock+0x12/0x20
+  xe_vm_bind_ioctl.cold+0x20/0x3c [xe]
+ ...
+"
+Fix it by validating prefetch_region before the BIT() usage.
+
+v2: Add Closes and Cc stable kernels. (Matt)
+
+Reported-by: Koen Koning <koen.koning@intel.com>
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6478
+Cc: <stable@vger.kernel.org> # v6.8+
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Signed-off-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patch.msgid.link/20251112181005.2120521-2-shuicheng.lin@intel.com
+(cherry picked from commit 8f565bdd14eec5611cc041dba4650e42ccdf71d9)
+Signed-off-by: Lucas De Marchi <lucas.demarchi@intel.com>
+(cherry picked from commit d52dea485cd3c98cfeeb474cf66cf95df2ab142f)
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_vm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
+index 30c32717a980e..ed457243e9076 100644
+--- a/drivers/gpu/drm/xe/xe_vm.c
++++ b/drivers/gpu/drm/xe/xe_vm.c
+@@ -3475,8 +3475,8 @@ static int vm_bind_ioctl_check_args(struct xe_device *xe, struct xe_vm *vm,
+                                op == DRM_XE_VM_BIND_OP_PREFETCH) ||
+                   XE_IOCTL_DBG(xe, prefetch_region &&
+                                op != DRM_XE_VM_BIND_OP_PREFETCH) ||
+-                  XE_IOCTL_DBG(xe, !(BIT(prefetch_region) &
+-                                     xe->info.mem_region_mask)) ||
++                  XE_IOCTL_DBG(xe, prefetch_region >= (sizeof(xe->info.mem_region_mask) * 8) ||
++                               !(BIT(prefetch_region) & xe->info.mem_region_mask)) ||
+                   XE_IOCTL_DBG(xe, obj &&
+                                op == DRM_XE_VM_BIND_OP_UNMAP)) {
+                       err = -EINVAL;
+-- 
+2.51.0
+
index 12d7401065830fe8c98070243ea45c4964f1977c..44538cf3e9f7168564944801fa7a06169e6fc08b 100644 (file)
@@ -150,3 +150,4 @@ net-tls-cancel-rx-async-resync-request-on-rcd_delta-.patch
 x86-cpu-amd-extend-zen6-model-range.patch
 kconfig-mconf-initialize-the-default-locale-at-start.patch
 kconfig-nconf-initialize-the-default-locale-at-start.patch
+drm-xe-prevent-bit-overflow-when-handling-invalid-pr.patch
diff --git a/queue-6.6/f2fs-compress-change-the-first-parameter-of-page_arr.patch b/queue-6.6/f2fs-compress-change-the-first-parameter-of-page_arr.patch
new file mode 100644 (file)
index 0000000..875fa4d
--- /dev/null
@@ -0,0 +1,196 @@
+From 73b31c3e97b90e45e5df12957d68f76f22e373a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Nov 2025 06:40:27 +0000
+Subject: f2fs: compress: change the first parameter of page_array_{alloc,free}
+ to sbi
+
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+
+[ Upstream commit 8e2a9b656474d67c55010f2c003ea2cf889a19ff ]
+
+No logic changes, just cleanup and prepare for fixing the UAF issue
+in f2fs_free_dic.
+
+Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Signed-off-by: Baocong Liu <baocong.liu@unisoc.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/compress.c | 40 ++++++++++++++++++++--------------------
+ 1 file changed, 20 insertions(+), 20 deletions(-)
+
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index c3b2f78ca4e3e..3bf7a6b40cbed 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -23,20 +23,18 @@
+ static struct kmem_cache *cic_entry_slab;
+ static struct kmem_cache *dic_entry_slab;
+-static void *page_array_alloc(struct inode *inode, int nr)
++static void *page_array_alloc(struct f2fs_sb_info *sbi, int nr)
+ {
+-      struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       unsigned int size = sizeof(struct page *) * nr;
+       if (likely(size <= sbi->page_array_slab_size))
+               return f2fs_kmem_cache_alloc(sbi->page_array_slab,
+-                                      GFP_F2FS_ZERO, false, F2FS_I_SB(inode));
++                                      GFP_F2FS_ZERO, false, sbi);
+       return f2fs_kzalloc(sbi, size, GFP_NOFS);
+ }
+-static void page_array_free(struct inode *inode, void *pages, int nr)
++static void page_array_free(struct f2fs_sb_info *sbi, void *pages, int nr)
+ {
+-      struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       unsigned int size = sizeof(struct page *) * nr;
+       if (!pages)
+@@ -145,13 +143,13 @@ int f2fs_init_compress_ctx(struct compress_ctx *cc)
+       if (cc->rpages)
+               return 0;
+-      cc->rpages = page_array_alloc(cc->inode, cc->cluster_size);
++      cc->rpages = page_array_alloc(F2FS_I_SB(cc->inode), cc->cluster_size);
+       return cc->rpages ? 0 : -ENOMEM;
+ }
+ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
+ {
+-      page_array_free(cc->inode, cc->rpages, cc->cluster_size);
++      page_array_free(F2FS_I_SB(cc->inode), cc->rpages, cc->cluster_size);
+       cc->rpages = NULL;
+       cc->nr_rpages = 0;
+       cc->nr_cpages = 0;
+@@ -614,6 +612,7 @@ static void *f2fs_vmap(struct page **pages, unsigned int count)
+ static int f2fs_compress_pages(struct compress_ctx *cc)
+ {
++      struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
+       struct f2fs_inode_info *fi = F2FS_I(cc->inode);
+       const struct f2fs_compress_ops *cops =
+                               f2fs_cops[fi->i_compress_algorithm];
+@@ -634,7 +633,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
+       cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
+       cc->valid_nr_cpages = cc->nr_cpages;
+-      cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
++      cc->cpages = page_array_alloc(sbi, cc->nr_cpages);
+       if (!cc->cpages) {
+               ret = -ENOMEM;
+               goto destroy_compress_ctx;
+@@ -709,7 +708,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
+               if (cc->cpages[i])
+                       f2fs_compress_free_page(cc->cpages[i]);
+       }
+-      page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
++      page_array_free(sbi, cc->cpages, cc->nr_cpages);
+       cc->cpages = NULL;
+ destroy_compress_ctx:
+       if (cops->destroy_compress_ctx)
+@@ -1302,7 +1301,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+       cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
+       cic->inode = inode;
+       atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
+-      cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
++      cic->rpages = page_array_alloc(sbi, cc->cluster_size);
+       if (!cic->rpages)
+               goto out_put_cic;
+@@ -1395,13 +1394,13 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+       spin_unlock(&fi->i_size_lock);
+       f2fs_put_rpages(cc);
+-      page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
++      page_array_free(sbi, cc->cpages, cc->nr_cpages);
+       cc->cpages = NULL;
+       f2fs_destroy_compress_ctx(cc, false);
+       return 0;
+ out_destroy_crypt:
+-      page_array_free(cc->inode, cic->rpages, cc->cluster_size);
++      page_array_free(sbi, cic->rpages, cc->cluster_size);
+       for (--i; i >= 0; i--)
+               fscrypt_finalize_bounce_page(&cc->cpages[i]);
+@@ -1419,7 +1418,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
+               f2fs_compress_free_page(cc->cpages[i]);
+               cc->cpages[i] = NULL;
+       }
+-      page_array_free(cc->inode, cc->cpages, cc->nr_cpages);
++      page_array_free(sbi, cc->cpages, cc->nr_cpages);
+       cc->cpages = NULL;
+       return -EAGAIN;
+ }
+@@ -1449,7 +1448,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
+               end_page_writeback(cic->rpages[i]);
+       }
+-      page_array_free(cic->inode, cic->rpages, cic->nr_rpages);
++      page_array_free(sbi, cic->rpages, cic->nr_rpages);
+       kmem_cache_free(cic_entry_slab, cic);
+ }
+@@ -1587,7 +1586,7 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
+       if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
+               return 0;
+-      dic->tpages = page_array_alloc(dic->inode, dic->cluster_size);
++      dic->tpages = page_array_alloc(F2FS_I_SB(dic->inode), dic->cluster_size);
+       if (!dic->tpages)
+               return -ENOMEM;
+@@ -1647,7 +1646,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
+       if (!dic)
+               return ERR_PTR(-ENOMEM);
+-      dic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
++      dic->rpages = page_array_alloc(sbi, cc->cluster_size);
+       if (!dic->rpages) {
+               kmem_cache_free(dic_entry_slab, dic);
+               return ERR_PTR(-ENOMEM);
+@@ -1668,7 +1667,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
+               dic->rpages[i] = cc->rpages[i];
+       dic->nr_rpages = cc->cluster_size;
+-      dic->cpages = page_array_alloc(dic->inode, dic->nr_cpages);
++      dic->cpages = page_array_alloc(sbi, dic->nr_cpages);
+       if (!dic->cpages) {
+               ret = -ENOMEM;
+               goto out_free;
+@@ -1698,6 +1697,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
+               bool bypass_destroy_callback)
+ {
+       int i;
++      struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
+       f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
+@@ -1709,7 +1709,7 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
+                               continue;
+                       f2fs_compress_free_page(dic->tpages[i]);
+               }
+-              page_array_free(dic->inode, dic->tpages, dic->cluster_size);
++              page_array_free(sbi, dic->tpages, dic->cluster_size);
+       }
+       if (dic->cpages) {
+@@ -1718,10 +1718,10 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
+                               continue;
+                       f2fs_compress_free_page(dic->cpages[i]);
+               }
+-              page_array_free(dic->inode, dic->cpages, dic->nr_cpages);
++              page_array_free(sbi, dic->cpages, dic->nr_cpages);
+       }
+-      page_array_free(dic->inode, dic->rpages, dic->nr_rpages);
++      page_array_free(sbi, dic->rpages, dic->nr_rpages);
+       kmem_cache_free(dic_entry_slab, dic);
+ }
+-- 
+2.51.0
+
diff --git a/queue-6.6/f2fs-compress-fix-uaf-of-f2fs_inode_info-in-f2fs_fre.patch b/queue-6.6/f2fs-compress-fix-uaf-of-f2fs_inode_info-in-f2fs_fre.patch
new file mode 100644 (file)
index 0000000..dab8dfa
--- /dev/null
@@ -0,0 +1,222 @@
+From 6c09ddb42cc6eb6b06498f293c209e2249765bd6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Nov 2025 06:40:28 +0000
+Subject: f2fs: compress: fix UAF of f2fs_inode_info in f2fs_free_dic
+
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+
+[ Upstream commit 39868685c2a94a70762bc6d77dc81d781d05bff5 ]
+
+The decompress_io_ctx may be released asynchronously after
+I/O completion. If this file is deleted immediately after read,
+and the kworker of processing post_read_wq has not been executed yet
+due to high workloads, It is possible that the inode(f2fs_inode_info)
+is evicted and freed before it is used f2fs_free_dic.
+
+    The UAF case as below:
+    Thread A                                      Thread B
+    - f2fs_decompress_end_io
+     - f2fs_put_dic
+      - queue_work
+        add free_dic work to post_read_wq
+                                                   - do_unlink
+                                                    - iput
+                                                     - evict
+                                                      - call_rcu
+    This file is deleted after read.
+
+    Thread C                                 kworker to process post_read_wq
+    - rcu_do_batch
+     - f2fs_free_inode
+      - kmem_cache_free
+     inode is freed by rcu
+                                             - process_scheduled_works
+                                              - f2fs_late_free_dic
+                                               - f2fs_free_dic
+                                                - f2fs_release_decomp_mem
+                                      read (dic->inode)->i_compress_algorithm
+
+This patch store compress_algorithm and sbi in dic to avoid inode UAF.
+
+In addition, the previous solution is deprecated in [1] may cause system hang.
+[1] https://lore.kernel.org/all/c36ab955-c8db-4a8b-a9d0-f07b5f426c3f@kernel.org
+
+Cc: Daeho Jeong <daehojeong@google.com>
+Fixes: bff139b49d9f ("f2fs: handle decompress only post processing in softirq")
+Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Signed-off-by: Baocong Liu <baocong.liu@unisoc.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+[ In Linux 6.6.y, the f2fs_vmalloc() function parameters are not
+  related to the f2fs_sb_info structure, the code changes for
+  f2fs_vmalloc() have not been backported. ]
+Signed-off-by: Bin Lan <lanbincn@139.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/compress.c | 38 +++++++++++++++++++-------------------
+ fs/f2fs/f2fs.h     |  2 ++
+ 2 files changed, 21 insertions(+), 19 deletions(-)
+
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 3bf7a6b40cbed..df7404214f34e 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -209,13 +209,13 @@ static int lzo_decompress_pages(struct decompress_io_ctx *dic)
+       ret = lzo1x_decompress_safe(dic->cbuf->cdata, dic->clen,
+                                               dic->rbuf, &dic->rlen);
+       if (ret != LZO_E_OK) {
+-              f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++              f2fs_err_ratelimited(dic->sbi,
+                               "lzo decompress failed, ret:%d", ret);
+               return -EIO;
+       }
+       if (dic->rlen != PAGE_SIZE << dic->log_cluster_size) {
+-              f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++              f2fs_err_ratelimited(dic->sbi,
+                               "lzo invalid rlen:%zu, expected:%lu",
+                               dic->rlen, PAGE_SIZE << dic->log_cluster_size);
+               return -EIO;
+@@ -289,13 +289,13 @@ static int lz4_decompress_pages(struct decompress_io_ctx *dic)
+       ret = LZ4_decompress_safe(dic->cbuf->cdata, dic->rbuf,
+                                               dic->clen, dic->rlen);
+       if (ret < 0) {
+-              f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++              f2fs_err_ratelimited(dic->sbi,
+                               "lz4 decompress failed, ret:%d", ret);
+               return -EIO;
+       }
+       if (ret != PAGE_SIZE << dic->log_cluster_size) {
+-              f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++              f2fs_err_ratelimited(dic->sbi,
+                               "lz4 invalid ret:%d, expected:%lu",
+                               ret, PAGE_SIZE << dic->log_cluster_size);
+               return -EIO;
+@@ -423,7 +423,7 @@ static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
+       stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
+       if (!stream) {
+-              f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++              f2fs_err_ratelimited(dic->sbi,
+                               "%s zstd_init_dstream failed", __func__);
+               vfree(workspace);
+               return -EIO;
+@@ -459,14 +459,14 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
+       ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
+       if (zstd_is_error(ret)) {
+-              f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++              f2fs_err_ratelimited(dic->sbi,
+                               "%s zstd_decompress_stream failed, ret: %d",
+                               __func__, zstd_get_error_code(ret));
+               return -EIO;
+       }
+       if (dic->rlen != outbuf.pos) {
+-              f2fs_err_ratelimited(F2FS_I_SB(dic->inode),
++              f2fs_err_ratelimited(dic->sbi,
+                               "%s ZSTD invalid rlen:%zu, expected:%lu",
+                               __func__, dic->rlen,
+                               PAGE_SIZE << dic->log_cluster_size);
+@@ -726,7 +726,7 @@ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+ void f2fs_decompress_cluster(struct decompress_io_ctx *dic, bool in_task)
+ {
+-      struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
++      struct f2fs_sb_info *sbi = dic->sbi;
+       struct f2fs_inode_info *fi = F2FS_I(dic->inode);
+       const struct f2fs_compress_ops *cops =
+                       f2fs_cops[fi->i_compress_algorithm];
+@@ -799,7 +799,7 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
+ {
+       struct decompress_io_ctx *dic =
+                       (struct decompress_io_ctx *)page_private(page);
+-      struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
++      struct f2fs_sb_info *sbi = dic->sbi;
+       dec_page_count(sbi, F2FS_RD_DATA);
+@@ -1579,14 +1579,13 @@ static inline bool allow_memalloc_for_decomp(struct f2fs_sb_info *sbi,
+ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
+               bool pre_alloc)
+ {
+-      const struct f2fs_compress_ops *cops =
+-              f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
++      const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
+       int i;
+-      if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
++      if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
+               return 0;
+-      dic->tpages = page_array_alloc(F2FS_I_SB(dic->inode), dic->cluster_size);
++      dic->tpages = page_array_alloc(dic->sbi, dic->cluster_size);
+       if (!dic->tpages)
+               return -ENOMEM;
+@@ -1616,10 +1615,9 @@ static int f2fs_prepare_decomp_mem(struct decompress_io_ctx *dic,
+ static void f2fs_release_decomp_mem(struct decompress_io_ctx *dic,
+               bool bypass_destroy_callback, bool pre_alloc)
+ {
+-      const struct f2fs_compress_ops *cops =
+-              f2fs_cops[F2FS_I(dic->inode)->i_compress_algorithm];
++      const struct f2fs_compress_ops *cops = f2fs_cops[dic->compress_algorithm];
+-      if (!allow_memalloc_for_decomp(F2FS_I_SB(dic->inode), pre_alloc))
++      if (!allow_memalloc_for_decomp(dic->sbi, pre_alloc))
+               return;
+       if (!bypass_destroy_callback && cops->destroy_decompress_ctx)
+@@ -1654,6 +1652,8 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
+       dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
+       dic->inode = cc->inode;
++      dic->sbi = sbi;
++      dic->compress_algorithm = F2FS_I(cc->inode)->i_compress_algorithm;
+       atomic_set(&dic->remaining_pages, cc->nr_cpages);
+       dic->cluster_idx = cc->cluster_idx;
+       dic->cluster_size = cc->cluster_size;
+@@ -1697,7 +1697,8 @@ static void f2fs_free_dic(struct decompress_io_ctx *dic,
+               bool bypass_destroy_callback)
+ {
+       int i;
+-      struct f2fs_sb_info *sbi = F2FS_I_SB(dic->inode);
++      /* use sbi in dic to avoid UFA of dic->inode*/
++      struct f2fs_sb_info *sbi = dic->sbi;
+       f2fs_release_decomp_mem(dic, bypass_destroy_callback, true);
+@@ -1740,8 +1741,7 @@ static void f2fs_put_dic(struct decompress_io_ctx *dic, bool in_task)
+                       f2fs_free_dic(dic, false);
+               } else {
+                       INIT_WORK(&dic->free_work, f2fs_late_free_dic);
+-                      queue_work(F2FS_I_SB(dic->inode)->post_read_wq,
+-                                      &dic->free_work);
++                      queue_work(dic->sbi->post_read_wq, &dic->free_work);
+               }
+       }
+ }
+diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
+index ab2ddd09d8131..406243395b943 100644
+--- a/fs/f2fs/f2fs.h
++++ b/fs/f2fs/f2fs.h
+@@ -1493,6 +1493,7 @@ struct compress_io_ctx {
+ struct decompress_io_ctx {
+       u32 magic;                      /* magic number to indicate page is compressed */
+       struct inode *inode;            /* inode the context belong to */
++      struct f2fs_sb_info *sbi;       /* f2fs_sb_info pointer */
+       pgoff_t cluster_idx;            /* cluster index number */
+       unsigned int cluster_size;      /* page count in cluster */
+       unsigned int log_cluster_size;  /* log of cluster size */
+@@ -1533,6 +1534,7 @@ struct decompress_io_ctx {
+       bool failed;                    /* IO error occurred before decompression? */
+       bool need_verity;               /* need fs-verity verification after decompression? */
++      unsigned char compress_algorithm;       /* backup algorithm type */
+       void *private;                  /* payload buffer for specified decompression algorithm */
+       void *private2;                 /* extra payload buffer */
+       struct work_struct verity_work; /* work to verify the decompressed pages */
+-- 
+2.51.0
+
diff --git a/queue-6.6/s390-mm-fix-__ptep_rdp-inline-assembly.patch b/queue-6.6/s390-mm-fix-__ptep_rdp-inline-assembly.patch
new file mode 100644 (file)
index 0000000..2ff17ec
--- /dev/null
@@ -0,0 +1,87 @@
+From 7decbd7826c033c43a4e8ebd3f4f49b1e5dcb4e8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Nov 2025 11:51:07 +0100
+Subject: s390/mm: Fix __ptep_rdp() inline assembly
+
+From: Heiko Carstens <hca@linux.ibm.com>
+
+[ Upstream commit 31475b88110c4725b4f9a79c3a0d9bbf97e69e1c ]
+
+When a zero ASCE is passed to the __ptep_rdp() inline assembly, the
+generated instruction should have the R3 field of the instruction set to
+zero. However the inline assembly is written incorrectly: for such cases a
+zero is loaded into a register allocated by the compiler and this register
+is then used by the instruction.
+
+This means that selected TLB entries may not be flushed since the specified
+ASCE does not match the one which was used when the selected TLB entries
+were created.
+
+Fix this by removing the asce and opt parameters of __ptep_rdp(), since
+all callers always pass zero, and use a hard-coded register zero for
+the R3 field.
+
+Fixes: 0807b856521f ("s390/mm: add support for RDP (Reset DAT-Protection)")
+Cc: stable@vger.kernel.org
+Reviewed-by: Gerald Schaefer <gerald.schaefer@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/pgtable.h | 12 +++++-------
+ arch/s390/mm/pgtable.c          |  4 ++--
+ 2 files changed, 7 insertions(+), 9 deletions(-)
+
+diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
+index da2e91b5b1925..2cc9d7bb1b2ac 100644
+--- a/arch/s390/include/asm/pgtable.h
++++ b/arch/s390/include/asm/pgtable.h
+@@ -1065,17 +1065,15 @@ static inline pte_t pte_mkhuge(pte_t pte)
+ #define IPTE_NODAT    0x400
+ #define IPTE_GUEST_ASCE       0x800
+-static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep,
+-                                     unsigned long opt, unsigned long asce,
+-                                     int local)
++static __always_inline void __ptep_rdp(unsigned long addr, pte_t *ptep, int local)
+ {
+       unsigned long pto;
+       pto = __pa(ptep) & ~(PTRS_PER_PTE * sizeof(pte_t) - 1);
+-      asm volatile(".insn rrf,0xb98b0000,%[r1],%[r2],%[asce],%[m4]"
++      asm volatile(".insn     rrf,0xb98b0000,%[r1],%[r2],%%r0,%[m4]"
+                    : "+m" (*ptep)
+-                   : [r1] "a" (pto), [r2] "a" ((addr & PAGE_MASK) | opt),
+-                     [asce] "a" (asce), [m4] "i" (local));
++                   : [r1] "a" (pto), [r2] "a" (addr & PAGE_MASK),
++                     [m4] "i" (local));
+ }
+ static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
+@@ -1259,7 +1257,7 @@ static inline void flush_tlb_fix_spurious_fault(struct vm_area_struct *vma,
+        * A local RDP can be used to do the flush.
+        */
+       if (MACHINE_HAS_RDP && !(pte_val(*ptep) & _PAGE_PROTECT))
+-              __ptep_rdp(address, ptep, 0, 0, 1);
++              __ptep_rdp(address, ptep, 1);
+ }
+ #define flush_tlb_fix_spurious_fault flush_tlb_fix_spurious_fault
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index 5e349869590a8..1fb435b3913cd 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -312,9 +312,9 @@ void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
+       preempt_disable();
+       atomic_inc(&mm->context.flush_count);
+       if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
+-              __ptep_rdp(addr, ptep, 0, 0, 1);
++              __ptep_rdp(addr, ptep, 1);
+       else
+-              __ptep_rdp(addr, ptep, 0, 0, 0);
++              __ptep_rdp(addr, ptep, 0);
+       /*
+        * PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That
+        * means it is still valid and active, and must not be changed according
+-- 
+2.51.0
+
index a217872cc1e86132f2f78bb4cbe9c5696307bb72..6ae84b9c8b78bb2862e2b4d3a90cdf0ed2de139d 100644 (file)
@@ -63,3 +63,6 @@ selftests-net-use-bash-for-bareudp-testing.patch
 net-tls-cancel-rx-async-resync-request-on-rcd_delta-.patch
 kconfig-mconf-initialize-the-default-locale-at-start.patch
 kconfig-nconf-initialize-the-default-locale-at-start.patch
+f2fs-compress-change-the-first-parameter-of-page_arr.patch
+s390-mm-fix-__ptep_rdp-inline-assembly.patch
+f2fs-compress-fix-uaf-of-f2fs_inode_info-in-f2fs_fre.patch