]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
6.12-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Jan 2026 13:11:17 +0000 (14:11 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 5 Jan 2026 13:11:17 +0000 (14:11 +0100)
added patches:
compiler_types.h-add-auto-as-a-macro-for-__auto_type.patch
e1000-fix-oob-in-e1000_tbi_should_accept.patch
fjes-add-missing-iounmap-in-fjes_hw_init.patch
idr-fix-idr_alloc-returning-an-id-out-of-range.patch
kasan-refactor-pcpu-kasan-vmalloc-unpoison.patch
kasan-unpoison-vms-addresses-with-a-common-tag.patch
lockd-fix-vfs_test_lock-calls.patch
loongarch-bpf-sign-extend-kfunc-call-arguments.patch
loongarch-bpf-zero-extend-bpf_tail_call-index.patch
loongarch-refactor-register-restoration-in-ftrace_common_return.patch
mm-kasan-fix-incorrect-unpoisoning-in-vrealloc-for-kasan.patch
mm-page_owner-fix-memory-leak-in-page_owner_stack_fops-release.patch
net-macb-relocate-mog_init_rings-callback-from-macb_mac_link_up-to-macb_open.patch
net-nfc-fix-deadlock-between-nfc_unregister_device-and-rfkill_fop_write.patch
net-usb-sr9700-fix-incorrect-command-used-to-write-single-register.patch
nfsd-drop-the-client-reference-in-client_states_open.patch
pmdomain-imx-fix-reference-count-leak-in-imx_gpc_probe.patch
rdma-cm-fix-leaking-the-multicast-gid-table-reference.patch
rdma-core-check-for-the-presence-of-ls_nla_type_dgid-correctly.patch
samples-ftrace-adjust-loongarch-register-restore-order-in-direct-calls.patch
tools-mm-page_owner_sort-fix-timestamp-comparison-for-stable-sorting.patch
x86-microcode-amd-fix-entrysign-revision-check-for-zen5-strix-halo.patch

23 files changed:
queue-6.12/compiler_types.h-add-auto-as-a-macro-for-__auto_type.patch [new file with mode: 0644]
queue-6.12/e1000-fix-oob-in-e1000_tbi_should_accept.patch [new file with mode: 0644]
queue-6.12/fjes-add-missing-iounmap-in-fjes_hw_init.patch [new file with mode: 0644]
queue-6.12/idr-fix-idr_alloc-returning-an-id-out-of-range.patch [new file with mode: 0644]
queue-6.12/kasan-refactor-pcpu-kasan-vmalloc-unpoison.patch [new file with mode: 0644]
queue-6.12/kasan-unpoison-vms-addresses-with-a-common-tag.patch [new file with mode: 0644]
queue-6.12/lockd-fix-vfs_test_lock-calls.patch [new file with mode: 0644]
queue-6.12/loongarch-bpf-sign-extend-kfunc-call-arguments.patch [new file with mode: 0644]
queue-6.12/loongarch-bpf-zero-extend-bpf_tail_call-index.patch [new file with mode: 0644]
queue-6.12/loongarch-refactor-register-restoration-in-ftrace_common_return.patch [new file with mode: 0644]
queue-6.12/mm-kasan-fix-incorrect-unpoisoning-in-vrealloc-for-kasan.patch [new file with mode: 0644]
queue-6.12/mm-page_owner-fix-memory-leak-in-page_owner_stack_fops-release.patch [new file with mode: 0644]
queue-6.12/net-macb-relocate-mog_init_rings-callback-from-macb_mac_link_up-to-macb_open.patch [new file with mode: 0644]
queue-6.12/net-nfc-fix-deadlock-between-nfc_unregister_device-and-rfkill_fop_write.patch [new file with mode: 0644]
queue-6.12/net-usb-sr9700-fix-incorrect-command-used-to-write-single-register.patch [new file with mode: 0644]
queue-6.12/nfsd-drop-the-client-reference-in-client_states_open.patch [new file with mode: 0644]
queue-6.12/pmdomain-imx-fix-reference-count-leak-in-imx_gpc_probe.patch [new file with mode: 0644]
queue-6.12/rdma-cm-fix-leaking-the-multicast-gid-table-reference.patch [new file with mode: 0644]
queue-6.12/rdma-core-check-for-the-presence-of-ls_nla_type_dgid-correctly.patch [new file with mode: 0644]
queue-6.12/samples-ftrace-adjust-loongarch-register-restore-order-in-direct-calls.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/tools-mm-page_owner_sort-fix-timestamp-comparison-for-stable-sorting.patch [new file with mode: 0644]
queue-6.12/x86-microcode-amd-fix-entrysign-revision-check-for-zen5-strix-halo.patch [new file with mode: 0644]

diff --git a/queue-6.12/compiler_types.h-add-auto-as-a-macro-for-__auto_type.patch b/queue-6.12/compiler_types.h-add-auto-as-a-macro-for-__auto_type.patch
new file mode 100644 (file)
index 0000000..2774d61
--- /dev/null
@@ -0,0 +1,58 @@
+From 2fb6915fa22dc5524d704afba58a13305dd9f533 Mon Sep 17 00:00:00 2001
+From: "H. Peter Anvin" <hpa@zytor.com>
+Date: Fri, 18 Jul 2025 11:35:00 -0700
+Subject: compiler_types.h: add "auto" as a macro for "__auto_type"
+
+From: H. Peter Anvin <hpa@zytor.com>
+
+commit 2fb6915fa22dc5524d704afba58a13305dd9f533 upstream.
+
+"auto" was defined as a keyword back in the K&R days, but as a storage
+type specifier.  No one ever used it, since it was and is the default
+storage type for local variables.
+
+C++11 recycled the keyword to allow a type to be declared based on the
+type of an initializer.  This was finally adopted into standard C in
+C23.
+
+gcc and clang provide the "__auto_type" alias keyword as an extension
+for pre-C23, however, there is no reason to pollute the bulk of the
+source base with this temporary keyword; instead define "auto" as a
+macro unless the compiler is running in C23+ mode.
+
+This macro is added in <linux/compiler_types.h> because that header is
+included in some of the tools headers, wheres <linux/compiler.h> is
+not as it has a bunch of very kernel-specific things in it.
+
+[ Cc: stable to reduce potential backporting burden. ]
+
+Signed-off-by: H. Peter Anvin (Intel) <hpa@zytor.com>
+Acked-by: Miguel Ojeda <ojeda@kernel.org>
+Cc: <stable@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/compiler_types.h |   13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/include/linux/compiler_types.h
++++ b/include/linux/compiler_types.h
+@@ -14,6 +14,19 @@
+ #ifndef __ASSEMBLY__
+ /*
++ * C23 introduces "auto" as a standard way to define type-inferred
++ * variables, but "auto" has been a (useless) keyword even since K&R C,
++ * so it has always been "namespace reserved."
++ *
++ * Until at some future time we require C23 support, we need the gcc
++ * extension __auto_type, but there is no reason to put that elsewhere
++ * in the source code.
++ */
++#if __STDC_VERSION__ < 202311L
++# define auto __auto_type
++#endif
++
++/*
+  * Skipped when running bindgen due to a libclang issue;
+  * see https://github.com/rust-lang/rust-bindgen/issues/2244.
+  */
diff --git a/queue-6.12/e1000-fix-oob-in-e1000_tbi_should_accept.patch b/queue-6.12/e1000-fix-oob-in-e1000_tbi_should_accept.patch
new file mode 100644 (file)
index 0000000..d98b1e0
--- /dev/null
@@ -0,0 +1,123 @@
+From 9c72a5182ed92904d01057f208c390a303f00a0f Mon Sep 17 00:00:00 2001
+From: Guangshuo Li <lgs201920130244@gmail.com>
+Date: Mon, 1 Dec 2025 11:40:58 +0800
+Subject: e1000: fix OOB in e1000_tbi_should_accept()
+
+From: Guangshuo Li <lgs201920130244@gmail.com>
+
+commit 9c72a5182ed92904d01057f208c390a303f00a0f upstream.
+
+In e1000_tbi_should_accept() we read the last byte of the frame via
+'data[length - 1]' to evaluate the TBI workaround. If the descriptor-
+reported length is zero or larger than the actual RX buffer size, this
+read goes out of bounds and can hit unrelated slab objects. The issue
+is observed from the NAPI receive path (e1000_clean_rx_irq):
+
+==================================================================
+BUG: KASAN: slab-out-of-bounds in e1000_tbi_should_accept+0x610/0x790
+Read of size 1 at addr ffff888014114e54 by task sshd/363
+
+CPU: 0 PID: 363 Comm: sshd Not tainted 5.18.0-rc1 #1
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba5276e321-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x5a/0x74
+ print_address_description+0x7b/0x440
+ print_report+0x101/0x200
+ kasan_report+0xc1/0xf0
+ e1000_tbi_should_accept+0x610/0x790
+ e1000_clean_rx_irq+0xa8c/0x1110
+ e1000_clean+0xde2/0x3c10
+ __napi_poll+0x98/0x380
+ net_rx_action+0x491/0xa20
+ __do_softirq+0x2c9/0x61d
+ do_softirq+0xd1/0x120
+ </IRQ>
+ <TASK>
+ __local_bh_enable_ip+0xfe/0x130
+ ip_finish_output2+0x7d5/0xb00
+ __ip_queue_xmit+0xe24/0x1ab0
+ __tcp_transmit_skb+0x1bcb/0x3340
+ tcp_write_xmit+0x175d/0x6bd0
+ __tcp_push_pending_frames+0x7b/0x280
+ tcp_sendmsg_locked+0x2e4f/0x32d0
+ tcp_sendmsg+0x24/0x40
+ sock_write_iter+0x322/0x430
+ vfs_write+0x56c/0xa60
+ ksys_write+0xd1/0x190
+ do_syscall_64+0x43/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+RIP: 0033:0x7f511b476b10
+Code: 73 01 c3 48 8b 0d 88 d3 2b 00 f7 d8 64 89 01 48 83 c8 ff c3 66 0f 1f 44 00 00 83 3d f9 2b 2c 00 00 75 10 b8 01 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 31 c3 48 83 ec 08 e8 8e 9b 01 00 48 89 04 24
+RSP: 002b:00007ffc9211d4e8 EFLAGS: 00000246 ORIG_RAX: 0000000000000001
+RAX: ffffffffffffffda RBX: 0000000000004024 RCX: 00007f511b476b10
+RDX: 0000000000004024 RSI: 0000559a9385962c RDI: 0000000000000003
+RBP: 0000559a9383a400 R08: fffffffffffffff0 R09: 0000000000004f00
+R10: 0000000000000070 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007ffc9211d57f R14: 0000559a9347bde7 R15: 0000000000000003
+ </TASK>
+Allocated by task 1:
+ __kasan_krealloc+0x131/0x1c0
+ krealloc+0x90/0xc0
+ add_sysfs_param+0xcb/0x8a0
+ kernel_add_sysfs_param+0x81/0xd4
+ param_sysfs_builtin+0x138/0x1a6
+ param_sysfs_init+0x57/0x5b
+ do_one_initcall+0x104/0x250
+ do_initcall_level+0x102/0x132
+ do_initcalls+0x46/0x74
+ kernel_init_freeable+0x28f/0x393
+ kernel_init+0x14/0x1a0
+ ret_from_fork+0x22/0x30
+The buggy address belongs to the object at ffff888014114000
+ which belongs to the cache kmalloc-2k of size 2048
+The buggy address is located 1620 bytes to the right of
+ 2048-byte region [ffff888014114000, ffff888014114800]
+The buggy address belongs to the physical page:
+page:ffffea0000504400 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x14110
+head:ffffea0000504400 order:3 compound_mapcount:0 compound_pincount:0
+flags: 0x100000000010200(slab|head|node=0|zone=1)
+raw: 0100000000010200 0000000000000000 dead000000000001 ffff888013442000
+raw: 0000000000000000 0000000000080008 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+==================================================================
+
+This happens because the TBI check unconditionally dereferences the last
+byte without validating the reported length first:
+
+       u8 last_byte = *(data + length - 1);
+
+Fix by rejecting the frame early if the length is zero, or if it exceeds
+adapter->rx_buffer_len. This preserves the TBI workaround semantics for
+valid frames and prevents touching memory beyond the RX buffer.
+
+Fixes: 2037110c96d5 ("e1000: move tbi workaround code into helper function")
+Cc: stable@vger.kernel.org
+Signed-off-by: Guangshuo Li <lgs201920130244@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/intel/e1000/e1000_main.c |   10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -4088,7 +4088,15 @@ static bool e1000_tbi_should_accept(stru
+                                   u32 length, const u8 *data)
+ {
+       struct e1000_hw *hw = &adapter->hw;
+-      u8 last_byte = *(data + length - 1);
++      u8 last_byte;
++
++      /* Guard against OOB on data[length - 1] */
++      if (unlikely(!length))
++              return false;
++      /* Upper bound: length must not exceed rx_buffer_len */
++      if (unlikely(length > adapter->rx_buffer_len))
++              return false;
++      last_byte = *(data + length - 1);
+       if (TBI_ACCEPT(hw, status, errors, length, last_byte)) {
+               unsigned long irq_flags;
diff --git a/queue-6.12/fjes-add-missing-iounmap-in-fjes_hw_init.patch b/queue-6.12/fjes-add-missing-iounmap-in-fjes_hw_init.patch
new file mode 100644 (file)
index 0000000..6428d99
--- /dev/null
@@ -0,0 +1,60 @@
+From 15ef641a0c6728d25a400df73922e80ab2cf029c Mon Sep 17 00:00:00 2001
+From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+Date: Thu, 11 Dec 2025 15:37:56 +0800
+Subject: fjes: Add missing iounmap in fjes_hw_init()
+
+From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+
+commit 15ef641a0c6728d25a400df73922e80ab2cf029c upstream.
+
+In error paths, add fjes_hw_iounmap() to release the
+resource acquired by fjes_hw_iomap(). Add a goto label
+to do so.
+
+Fixes: 8cdc3f6c5d22 ("fjes: Hardware initialization routine")
+Cc: stable@vger.kernel.org
+Signed-off-by: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+Signed-off-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20251211073756.101824-1-lihaoxiang@isrc.iscas.ac.cn
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/fjes/fjes_hw.c |   12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/fjes/fjes_hw.c
++++ b/drivers/net/fjes/fjes_hw.c
+@@ -334,7 +334,7 @@ int fjes_hw_init(struct fjes_hw *hw)
+       ret = fjes_hw_reset(hw);
+       if (ret)
+-              return ret;
++              goto err_iounmap;
+       fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, true);
+@@ -347,8 +347,10 @@ int fjes_hw_init(struct fjes_hw *hw)
+       hw->max_epid = fjes_hw_get_max_epid(hw);
+       hw->my_epid = fjes_hw_get_my_epid(hw);
+-      if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid))
+-              return -ENXIO;
++      if ((hw->max_epid == 0) || (hw->my_epid >= hw->max_epid)) {
++              ret = -ENXIO;
++              goto err_iounmap;
++      }
+       ret = fjes_hw_setup(hw);
+@@ -356,6 +358,10 @@ int fjes_hw_init(struct fjes_hw *hw)
+       hw->hw_info.trace_size = FJES_DEBUG_BUFFER_SIZE;
+       return ret;
++
++err_iounmap:
++      fjes_hw_iounmap(hw);
++      return ret;
+ }
+ void fjes_hw_exit(struct fjes_hw *hw)
diff --git a/queue-6.12/idr-fix-idr_alloc-returning-an-id-out-of-range.patch b/queue-6.12/idr-fix-idr_alloc-returning-an-id-out-of-range.patch
new file mode 100644 (file)
index 0000000..eed5f4b
--- /dev/null
@@ -0,0 +1,80 @@
+From c6e8e595a0798ad67da0f7bebaf69c31ef70dfff Mon Sep 17 00:00:00 2001
+From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
+Date: Fri, 28 Nov 2025 16:18:32 +0000
+Subject: idr: fix idr_alloc() returning an ID out of range
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Matthew Wilcox (Oracle) <willy@infradead.org>
+
+commit c6e8e595a0798ad67da0f7bebaf69c31ef70dfff upstream.
+
+If you use an IDR with a non-zero base, and specify a range that lies
+entirely below the base, 'max - base' becomes very large and
+idr_get_free() can return an ID that lies outside of the requested range.
+
+Link: https://lkml.kernel.org/r/20251128161853.3200058-1-willy@infradead.org
+Fixes: 6ce711f27500 ("idr: Make 1-based IDRs more efficient")
+Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Reported-by: Jan Sokolowski <jan.sokolowski@intel.com>
+Reported-by: Koen Koning <koen.koning@intel.com>
+Reported-by: Peter Senna Tschudin <peter.senna@linux.intel.com>
+Closes: https://gitlab.freedesktop.org/drm/xe/kernel/-/issues/6449
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/idr.c                           |    2 ++
+ tools/testing/radix-tree/idr-test.c |   21 +++++++++++++++++++++
+ 2 files changed, 23 insertions(+)
+
+--- a/lib/idr.c
++++ b/lib/idr.c
+@@ -40,6 +40,8 @@ int idr_alloc_u32(struct idr *idr, void
+       if (WARN_ON_ONCE(!(idr->idr_rt.xa_flags & ROOT_IS_IDR)))
+               idr->idr_rt.xa_flags |= IDR_RT_MARKER;
++      if (max < base)
++              return -ENOSPC;
+       id = (id < base) ? 0 : id - base;
+       radix_tree_iter_init(&iter, id);
+--- a/tools/testing/radix-tree/idr-test.c
++++ b/tools/testing/radix-tree/idr-test.c
+@@ -57,6 +57,26 @@ void idr_alloc_test(void)
+       idr_destroy(&idr);
+ }
++void idr_alloc2_test(void)
++{
++      int id;
++      struct idr idr = IDR_INIT_BASE(idr, 1);
++
++      id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL);
++      assert(id == -ENOSPC);
++
++      id = idr_alloc(&idr, idr_alloc2_test, 1, 2, GFP_KERNEL);
++      assert(id == 1);
++
++      id = idr_alloc(&idr, idr_alloc2_test, 0, 1, GFP_KERNEL);
++      assert(id == -ENOSPC);
++
++      id = idr_alloc(&idr, idr_alloc2_test, 0, 2, GFP_KERNEL);
++      assert(id == -ENOSPC);
++
++      idr_destroy(&idr);
++}
++
+ void idr_replace_test(void)
+ {
+       DEFINE_IDR(idr);
+@@ -409,6 +429,7 @@ void idr_checks(void)
+       idr_replace_test();
+       idr_alloc_test();
++      idr_alloc2_test();
+       idr_null_test();
+       idr_nowait_test();
+       idr_get_next_test(0);
diff --git a/queue-6.12/kasan-refactor-pcpu-kasan-vmalloc-unpoison.patch b/queue-6.12/kasan-refactor-pcpu-kasan-vmalloc-unpoison.patch
new file mode 100644 (file)
index 0000000..1e2e0e9
--- /dev/null
@@ -0,0 +1,122 @@
+From 6f13db031e27e88213381039032a9cc061578ea6 Mon Sep 17 00:00:00 2001
+From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+Date: Thu, 4 Dec 2025 19:00:04 +0000
+Subject: kasan: refactor pcpu kasan vmalloc unpoison
+
+From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+
+commit 6f13db031e27e88213381039032a9cc061578ea6 upstream.
+
+A KASAN tag mismatch, possibly causing a kernel panic, can be observed
+on systems with a tag-based KASAN enabled and with multiple NUMA nodes.
+It was reported on arm64 and reproduced on x86. It can be explained in
+the following points:
+
+1. There can be more than one virtual memory chunk.
+2. Chunk's base address has a tag.
+3. The base address points at the first chunk and thus inherits
+   the tag of the first chunk.
+4. The subsequent chunks will be accessed with the tag from the
+   first chunk.
+5. Thus, the subsequent chunks need to have their tag set to
+   match that of the first chunk.
+
+Refactor code by reusing __kasan_unpoison_vmalloc in a new helper in
+preparation for the actual fix.
+
+Link: https://lkml.kernel.org/r/eb61d93b907e262eefcaa130261a08bcb6c5ce51.1764874575.git.m.wieczorretman@pm.me
+Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
+Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Jiayuan Chen <jiayuan.chen@linux.dev>
+Cc: Kees Cook <kees@kernel.org>
+Cc: Marco Elver <elver@google.com>
+Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>   [6.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kasan.h |   15 +++++++++++++++
+ mm/kasan/common.c     |   17 +++++++++++++++++
+ mm/vmalloc.c          |    4 +---
+ 3 files changed, 33 insertions(+), 3 deletions(-)
+
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -608,6 +608,16 @@ static __always_inline void kasan_poison
+               __kasan_poison_vmalloc(start, size);
+ }
++void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
++                               kasan_vmalloc_flags_t flags);
++static __always_inline void
++kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
++                        kasan_vmalloc_flags_t flags)
++{
++      if (kasan_enabled())
++              __kasan_unpoison_vmap_areas(vms, nr_vms, flags);
++}
++
+ #else /* CONFIG_KASAN_VMALLOC */
+ static inline void kasan_populate_early_vm_area_shadow(void *start,
+@@ -632,6 +642,11 @@ static inline void *kasan_unpoison_vmall
+ static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+ { }
++static __always_inline void
++kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
++                        kasan_vmalloc_flags_t flags)
++{ }
++
+ #endif /* CONFIG_KASAN_VMALLOC */
+ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+--- a/mm/kasan/common.c
++++ b/mm/kasan/common.c
+@@ -28,6 +28,7 @@
+ #include <linux/string.h>
+ #include <linux/types.h>
+ #include <linux/bug.h>
++#include <linux/vmalloc.h>
+ #include "kasan.h"
+ #include "../slab.h"
+@@ -559,3 +560,19 @@ bool __kasan_check_byte(const void *addr
+       }
+       return true;
+ }
++
++#ifdef CONFIG_KASAN_VMALLOC
++void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms,
++                               kasan_vmalloc_flags_t flags)
++{
++      unsigned long size;
++      void *addr;
++      int area;
++
++      for (area = 0 ; area < nr_vms ; area++) {
++              size = vms[area]->size;
++              addr = vms[area]->addr;
++              vms[area]->addr = __kasan_unpoison_vmalloc(addr, size, flags);
++      }
++}
++#endif
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -4812,9 +4812,7 @@ retry:
+        * With hardware tag-based KASAN, marking is skipped for
+        * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
+        */
+-      for (area = 0; area < nr_vms; area++)
+-              vms[area]->addr = kasan_unpoison_vmalloc(vms[area]->addr,
+-                              vms[area]->size, KASAN_VMALLOC_PROT_NORMAL);
++      kasan_unpoison_vmap_areas(vms, nr_vms, KASAN_VMALLOC_PROT_NORMAL);
+       kfree(vas);
+       return vms;
diff --git a/queue-6.12/kasan-unpoison-vms-addresses-with-a-common-tag.patch b/queue-6.12/kasan-unpoison-vms-addresses-with-a-common-tag.patch
new file mode 100644 (file)
index 0000000..4f08052
--- /dev/null
@@ -0,0 +1,85 @@
+From 6a0e5b333842cf65d6f4e4f0a2a4386504802515 Mon Sep 17 00:00:00 2001
+From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+Date: Thu, 4 Dec 2025 19:00:11 +0000
+Subject: kasan: unpoison vms[area] addresses with a common tag
+
+From: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+
+commit 6a0e5b333842cf65d6f4e4f0a2a4386504802515 upstream.
+
+A KASAN tag mismatch, possibly causing a kernel panic, can be observed on
+systems with a tag-based KASAN enabled and with multiple NUMA nodes.  It
+was reported on arm64 and reproduced on x86.  It can be explained in the
+following points:
+
+1. There can be more than one virtual memory chunk.
+2. Chunk's base address has a tag.
+3. The base address points at the first chunk and thus inherits
+   the tag of the first chunk.
+4. The subsequent chunks will be accessed with the tag from the
+   first chunk.
+5. Thus, the subsequent chunks need to have their tag set to
+   match that of the first chunk.
+
+Use the new vmalloc flag that disables random tag assignment in
+__kasan_unpoison_vmalloc() - pass the same random tag to all the
+vm_structs by tagging the pointers before they go inside
+__kasan_unpoison_vmalloc().  Assigning a common tag resolves the pcpu
+chunk address mismatch.
+
+[akpm@linux-foundation.org: use WARN_ON_ONCE(), per Andrey]
+  Link: https://lkml.kernel.org/r/CA+fCnZeuGdKSEm11oGT6FS71_vGq1vjq-xY36kxVdFvwmag2ZQ@mail.gmail.com
+[maciej.wieczor-retman@intel.com: remove unneeded pr_warn()]
+  Link: https://lkml.kernel.org/r/919897daaaa3c982a27762a2ee038769ad033991.1764945396.git.m.wieczorretman@pm.me
+Link: https://lkml.kernel.org/r/873821114a9f722ffb5d6702b94782e902883fdf.1764874575.git.m.wieczorretman@pm.me
+Fixes: 1d96320f8d53 ("kasan, vmalloc: add vmalloc tagging for SW_TAGS")
+Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Jiayuan Chen <jiayuan.chen@linux.dev>
+Cc: Kees Cook <kees@kernel.org>
+Cc: Marco Elver <elver@google.com>
+Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>   [6.1+]
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/kasan/common.c |   21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+--- a/mm/kasan/common.c
++++ b/mm/kasan/common.c
+@@ -568,11 +568,26 @@ void __kasan_unpoison_vmap_areas(struct
+       unsigned long size;
+       void *addr;
+       int area;
++      u8 tag;
+-      for (area = 0 ; area < nr_vms ; area++) {
++      /*
++       * If KASAN_VMALLOC_KEEP_TAG was set at this point, all vms[] pointers
++       * would be unpoisoned with the KASAN_TAG_KERNEL which would disable
++       * KASAN checks down the line.
++       */
++      if (WARN_ON_ONCE(flags & KASAN_VMALLOC_KEEP_TAG))
++              return;
++
++      size = vms[0]->size;
++      addr = vms[0]->addr;
++      vms[0]->addr = __kasan_unpoison_vmalloc(addr, size, flags);
++      tag = get_tag(vms[0]->addr);
++
++      for (area = 1 ; area < nr_vms ; area++) {
+               size = vms[area]->size;
+-              addr = vms[area]->addr;
+-              vms[area]->addr = __kasan_unpoison_vmalloc(addr, size, flags);
++              addr = set_tag(vms[area]->addr, tag);
++              vms[area]->addr =
++                      __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG);
+       }
+ }
+ #endif
diff --git a/queue-6.12/lockd-fix-vfs_test_lock-calls.patch b/queue-6.12/lockd-fix-vfs_test_lock-calls.patch
new file mode 100644 (file)
index 0000000..30fc037
--- /dev/null
@@ -0,0 +1,176 @@
+From a49a2a1baa0c553c3548a1c414b6a3c005a8deba Mon Sep 17 00:00:00 2001
+From: NeilBrown <neil@brown.name>
+Date: Sat, 22 Nov 2025 12:00:36 +1100
+Subject: lockd: fix vfs_test_lock() calls
+
+From: NeilBrown <neil@brown.name>
+
+commit a49a2a1baa0c553c3548a1c414b6a3c005a8deba upstream.
+
+Usage of vfs_test_lock() is somewhat confused.  Documentation suggests
+it is given a "lock" but this is not the case.  It is given a struct
+file_lock which contains some details of the sort of lock it should be
+looking for.
+
+In particular passing a "file_lock" containing fl_lmops or fl_ops is
+meaningless and possibly confusing.
+
+This is particularly problematic in lockd.  nlmsvc_testlock() receives
+an initialised "file_lock" from xdr-decode, including manager ops and an
+owner.  It then mistakenly passes this to vfs_test_lock() which might
+replace the owner and the ops.  This can lead to confusion when freeing
+the lock.
+
+The primary role of the 'struct file_lock' passed to vfs_test_lock() is
+to report a conflicting lock that was found, so it makes more sense for
+nlmsvc_testlock() to pass "conflock", which it uses for returning the
+conflicting lock.
+
+With this change, freeing of the lock is not confused and code in
+__nlm4svc_proc_test() and __nlmsvc_proc_test() can be simplified.
+
+Documentation for vfs_test_lock() is improved to reflect its real
+purpose, and a WARN_ON_ONCE() is added to avoid a similar problem in the
+future.
+
+Reported-by: Olga Kornievskaia <okorniev@redhat.com>
+Closes: https://lore.kernel.org/all/20251021130506.45065-1-okorniev@redhat.com
+Signed-off-by: NeilBrown <neil@brown.name>
+Fixes: 20fa19027286 ("nfs: add export operations")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/lockd/svc4proc.c |    4 +---
+ fs/lockd/svclock.c  |   21 ++++++++++++---------
+ fs/lockd/svcproc.c  |    5 +----
+ fs/locks.c          |   12 ++++++++++--
+ 4 files changed, 24 insertions(+), 18 deletions(-)
+
+--- a/fs/lockd/svc4proc.c
++++ b/fs/lockd/svc4proc.c
+@@ -96,7 +96,6 @@ __nlm4svc_proc_test(struct svc_rqst *rqs
+       struct nlm_args *argp = rqstp->rq_argp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+-      struct nlm_lockowner *test_owner;
+       __be32 rc = rpc_success;
+       dprintk("lockd: TEST4        called\n");
+@@ -106,7 +105,6 @@ __nlm4svc_proc_test(struct svc_rqst *rqs
+       if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+-      test_owner = argp->lock.fl.c.flc_owner;
+       /* Now check for conflicting locks */
+       resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie);
+       if (resp->status == nlm_drop_reply)
+@@ -114,7 +112,7 @@ __nlm4svc_proc_test(struct svc_rqst *rqs
+       else
+               dprintk("lockd: TEST4        status %d\n", ntohl(resp->status));
+-      nlmsvc_put_lockowner(test_owner);
++      nlmsvc_release_lockowner(&argp->lock);
+       nlmsvc_release_host(host);
+       nlm_release_file(file);
+       return rc;
+--- a/fs/lockd/svclock.c
++++ b/fs/lockd/svclock.c
+@@ -628,7 +628,13 @@ nlmsvc_testlock(struct svc_rqst *rqstp,
+       }
+       mode = lock_to_openmode(&lock->fl);
+-      error = vfs_test_lock(file->f_file[mode], &lock->fl);
++      locks_init_lock(&conflock->fl);
++      /* vfs_test_lock only uses start, end, and owner, but tests flc_file */
++      conflock->fl.c.flc_file = lock->fl.c.flc_file;
++      conflock->fl.fl_start = lock->fl.fl_start;
++      conflock->fl.fl_end = lock->fl.fl_end;
++      conflock->fl.c.flc_owner = lock->fl.c.flc_owner;
++      error = vfs_test_lock(file->f_file[mode], &conflock->fl);
+       if (error) {
+               /* We can't currently deal with deferred test requests */
+               if (error == FILE_LOCK_DEFERRED)
+@@ -638,22 +644,19 @@ nlmsvc_testlock(struct svc_rqst *rqstp,
+               goto out;
+       }
+-      if (lock->fl.c.flc_type == F_UNLCK) {
++      if (conflock->fl.c.flc_type == F_UNLCK) {
+               ret = nlm_granted;
+               goto out;
+       }
+       dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n",
+-              lock->fl.c.flc_type, (long long)lock->fl.fl_start,
+-              (long long)lock->fl.fl_end);
++              conflock->fl.c.flc_type, (long long)conflock->fl.fl_start,
++              (long long)conflock->fl.fl_end);
+       conflock->caller = "somehost";  /* FIXME */
+       conflock->len = strlen(conflock->caller);
+       conflock->oh.len = 0;           /* don't return OH info */
+-      conflock->svid = lock->fl.c.flc_pid;
+-      conflock->fl.c.flc_type = lock->fl.c.flc_type;
+-      conflock->fl.fl_start = lock->fl.fl_start;
+-      conflock->fl.fl_end = lock->fl.fl_end;
+-      locks_release_private(&lock->fl);
++      conflock->svid = conflock->fl.c.flc_pid;
++      locks_release_private(&conflock->fl);
+       ret = nlm_lck_denied;
+ out:
+--- a/fs/lockd/svcproc.c
++++ b/fs/lockd/svcproc.c
+@@ -117,7 +117,6 @@ __nlmsvc_proc_test(struct svc_rqst *rqst
+       struct nlm_args *argp = rqstp->rq_argp;
+       struct nlm_host *host;
+       struct nlm_file *file;
+-      struct nlm_lockowner *test_owner;
+       __be32 rc = rpc_success;
+       dprintk("lockd: TEST          called\n");
+@@ -127,8 +126,6 @@ __nlmsvc_proc_test(struct svc_rqst *rqst
+       if ((resp->status = nlmsvc_retrieve_args(rqstp, argp, &host, &file)))
+               return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
+-      test_owner = argp->lock.fl.c.flc_owner;
+-
+       /* Now check for conflicting locks */
+       resp->status = cast_status(nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie));
+       if (resp->status == nlm_drop_reply)
+@@ -137,7 +134,7 @@ __nlmsvc_proc_test(struct svc_rqst *rqst
+               dprintk("lockd: TEST          status %d vers %d\n",
+                       ntohl(resp->status), rqstp->rq_vers);
+-      nlmsvc_put_lockowner(test_owner);
++      nlmsvc_release_lockowner(&argp->lock);
+       nlmsvc_release_host(host);
+       nlm_release_file(file);
+       return rc;
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -2190,13 +2190,21 @@ SYSCALL_DEFINE2(flock, unsigned int, fd,
+ /**
+  * vfs_test_lock - test file byte range lock
+  * @filp: The file to test lock for
+- * @fl: The lock to test; also used to hold result
++ * @fl: The byte-range in the file to test; also used to hold result
+  *
++ * On entry, @fl does not contain a lock, but identifies a range (fl_start, fl_end)
++ * in the file (c.flc_file), and an owner (c.flc_owner) for whom existing locks
++ * should be ignored.  c.flc_type and c.flc_flags are ignored.
++ * Both fl_lmops and fl_ops in @fl must be NULL.
+  * Returns -ERRNO on failure.  Indicates presence of conflicting lock by
+- * setting conf->fl_type to something other than F_UNLCK.
++ * setting fl->fl_type to something other than F_UNLCK.
++ *
++ * If vfs_test_lock() does find a lock and return it, the caller must
++ * use locks_free_lock() or locks_release_private() on the returned lock.
+  */
+ int vfs_test_lock(struct file *filp, struct file_lock *fl)
+ {
++      WARN_ON_ONCE(fl->fl_ops || fl->fl_lmops);
+       WARN_ON_ONCE(filp != fl->c.flc_file);
+       if (filp->f_op->lock)
+               return filp->f_op->lock(filp, F_GETLK, fl);
diff --git a/queue-6.12/loongarch-bpf-sign-extend-kfunc-call-arguments.patch b/queue-6.12/loongarch-bpf-sign-extend-kfunc-call-arguments.patch
new file mode 100644 (file)
index 0000000..16b73d6
--- /dev/null
@@ -0,0 +1,86 @@
+From 3f5a238f24d7b75f9efe324d3539ad388f58536e Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Wed, 31 Dec 2025 15:19:20 +0800
+Subject: LoongArch: BPF: Sign extend kfunc call arguments
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit 3f5a238f24d7b75f9efe324d3539ad388f58536e upstream.
+
+The kfunc calls are native calls so they should follow LoongArch calling
+conventions. Sign extend its arguments properly to avoid kernel panic.
+This is done by adding a new emit_abi_ext() helper. The emit_abi_ext()
+helper performs extension in place meaning a value already store in the
+target register (Note: this is different from the existing sign_extend()
+helper and thus we can't reuse it).
+
+Cc: stable@vger.kernel.org
+Fixes: 5dc615520c4d ("LoongArch: Add BPF JIT support")
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c |   16 ++++++++++++++++
+ arch/loongarch/net/bpf_jit.h |   26 ++++++++++++++++++++++++++
+ 2 files changed, 42 insertions(+)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -897,6 +897,22 @@ static int build_insn(const struct bpf_i
+               if (ret < 0)
+                       return ret;
++              if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL) {
++                      const struct btf_func_model *m;
++                      int i;
++
++                      m = bpf_jit_find_kfunc_model(ctx->prog, insn);
++                      if (!m)
++                              return -EINVAL;
++
++                      for (i = 0; i < m->nr_args; i++) {
++                              u8 reg = regmap[BPF_REG_1 + i];
++                              bool sign = m->arg_flags[i] & BTF_FMODEL_SIGNED_ARG;
++
++                              emit_abi_ext(ctx, reg, m->arg_size[i], sign);
++                      }
++              }
++
+               move_addr(ctx, t1, func_addr);
+               emit_insn(ctx, jirl, LOONGARCH_GPR_RA, t1, 0);
+--- a/arch/loongarch/net/bpf_jit.h
++++ b/arch/loongarch/net/bpf_jit.h
+@@ -87,6 +87,32 @@ static inline void emit_sext_32(struct j
+       emit_insn(ctx, addiw, reg, reg, 0);
+ }
++/* Emit proper extension according to ABI requirements.
++ * Note that it requires a value of size `size` already resides in register `reg`.
++ */
++static inline void emit_abi_ext(struct jit_ctx *ctx, int reg, u8 size, bool sign)
++{
++      /* ABI requires unsigned char/short to be zero-extended */
++      if (!sign && (size == 1 || size == 2))
++              return;
++
++      switch (size) {
++      case 1:
++              emit_insn(ctx, extwb, reg, reg);
++              break;
++      case 2:
++              emit_insn(ctx, extwh, reg, reg);
++              break;
++      case 4:
++              emit_insn(ctx, addiw, reg, reg, 0);
++              break;
++      case 8:
++              break;
++      default:
++              pr_warn("bpf_jit: invalid size %d for extension\n", size);
++      }
++}
++
+ static inline void move_addr(struct jit_ctx *ctx, enum loongarch_gpr rd, u64 addr)
+ {
+       u64 imm_11_0, imm_31_12, imm_51_32, imm_63_52;
diff --git a/queue-6.12/loongarch-bpf-zero-extend-bpf_tail_call-index.patch b/queue-6.12/loongarch-bpf-zero-extend-bpf_tail_call-index.patch
new file mode 100644 (file)
index 0000000..b408df9
--- /dev/null
@@ -0,0 +1,36 @@
+From eb71f5c433e1c6dff089b315881dec40a88a7baf Mon Sep 17 00:00:00 2001
+From: Hengqi Chen <hengqi.chen@gmail.com>
+Date: Wed, 31 Dec 2025 15:19:20 +0800
+Subject: LoongArch: BPF: Zero-extend bpf_tail_call() index
+
+From: Hengqi Chen <hengqi.chen@gmail.com>
+
+commit eb71f5c433e1c6dff089b315881dec40a88a7baf upstream.
+
+The bpf_tail_call() index should be treated as a u32 value. Let's
+zero-extend it to avoid calling wrong BPF progs. See similar fixes
+for x86 [1]) and arm64 ([2]) for more details.
+
+  [1]: https://github.com/torvalds/linux/commit/90caccdd8cc0215705f18b92771b449b01e2474a
+  [2]: https://github.com/torvalds/linux/commit/16338a9b3ac30740d49f5dfed81bac0ffa53b9c7
+
+Cc: stable@vger.kernel.org
+Fixes: 5dc615520c4d ("LoongArch: Add BPF JIT support")
+Signed-off-by: Hengqi Chen <hengqi.chen@gmail.com>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/net/bpf_jit.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/loongarch/net/bpf_jit.c
++++ b/arch/loongarch/net/bpf_jit.c
+@@ -231,6 +231,8 @@ static int emit_bpf_tail_call(struct jit
+        *       goto out;
+        */
+       tc_ninsn = insn ? ctx->offset[insn+1] - ctx->offset[insn] : ctx->offset[0];
++      emit_zext_32(ctx, a2, true);
++
+       off = offsetof(struct bpf_array, map.max_entries);
+       emit_insn(ctx, ldwu, t1, a1, off);
+       /* bgeu $a2, $t1, jmp_offset */
diff --git a/queue-6.12/loongarch-refactor-register-restoration-in-ftrace_common_return.patch b/queue-6.12/loongarch-refactor-register-restoration-in-ftrace_common_return.patch
new file mode 100644 (file)
index 0000000..7e183c4
--- /dev/null
@@ -0,0 +1,75 @@
+From 45cb47c628dfbd1994c619f3eac271a780602826 Mon Sep 17 00:00:00 2001
+From: Chenghao Duan <duanchenghao@kylinos.cn>
+Date: Wed, 31 Dec 2025 15:19:20 +0800
+Subject: LoongArch: Refactor register restoration in ftrace_common_return
+
+From: Chenghao Duan <duanchenghao@kylinos.cn>
+
+commit 45cb47c628dfbd1994c619f3eac271a780602826 upstream.
+
+Refactor the register restoration sequence in the ftrace_common_return
+function to clearly distinguish between the logic of normal returns and
+direct call returns in function tracing scenarios. The logic is as
+follows:
+
+1. In the case of a normal return, the execution flow returns to the
+traced function, and ftrace must ensure that the register data is
+consistent with the state when the function was entered.
+
+ra = parent return address; t0 = traced function return address.
+
+2. In the case of a direct call return, the execution flow jumps to the
+custom trampoline function, and ftrace must ensure that the register
+data is consistent with the state when ftrace was entered.
+
+ra = traced function return address; t0 = parent return address.
+
+Cc: stable@vger.kernel.org
+Fixes: 9cdc3b6a299c ("LoongArch: ftrace: Add direct call support")
+Signed-off-by: Chenghao Duan <duanchenghao@kylinos.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/mcount_dyn.S |   14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+--- a/arch/loongarch/kernel/mcount_dyn.S
++++ b/arch/loongarch/kernel/mcount_dyn.S
+@@ -94,7 +94,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L
+  * at the callsite, so there is no need to restore the T series regs.
+  */
+ ftrace_common_return:
+-      PTR_L           ra, sp, PT_R1
+       PTR_L           a0, sp, PT_R4
+       PTR_L           a1, sp, PT_R5
+       PTR_L           a2, sp, PT_R6
+@@ -104,12 +103,17 @@ ftrace_common_return:
+       PTR_L           a6, sp, PT_R10
+       PTR_L           a7, sp, PT_R11
+       PTR_L           fp, sp, PT_R22
+-      PTR_L           t0, sp, PT_ERA
+       PTR_L           t1, sp, PT_R13
+-      PTR_ADDI        sp, sp, PT_SIZE
+       bnez            t1, .Ldirect
++
++      PTR_L           ra, sp, PT_R1
++      PTR_L           t0, sp, PT_ERA
++      PTR_ADDI        sp, sp, PT_SIZE
+       jr              t0
+ .Ldirect:
++      PTR_L           t0, sp, PT_R1
++      PTR_L           ra, sp, PT_ERA
++      PTR_ADDI        sp, sp, PT_SIZE
+       jr              t1
+ SYM_CODE_END(ftrace_common)
+@@ -161,6 +165,8 @@ SYM_CODE_END(return_to_handler)
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+ SYM_CODE_START(ftrace_stub_direct_tramp)
+       UNWIND_HINT_UNDEFINED
+-      jr              t0
++      move            t1, ra
++      move            ra, t0
++      jr              t1
+ SYM_CODE_END(ftrace_stub_direct_tramp)
+ #endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
diff --git a/queue-6.12/mm-kasan-fix-incorrect-unpoisoning-in-vrealloc-for-kasan.patch b/queue-6.12/mm-kasan-fix-incorrect-unpoisoning-in-vrealloc-for-kasan.patch
new file mode 100644 (file)
index 0000000..c9ccfb0
--- /dev/null
@@ -0,0 +1,151 @@
+From 007f5da43b3d0ecff972e2616062b8da1f862f5e Mon Sep 17 00:00:00 2001
+From: Jiayuan Chen <jiayuan.chen@linux.dev>
+Date: Thu, 4 Dec 2025 18:59:55 +0000
+Subject: mm/kasan: fix incorrect unpoisoning in vrealloc for KASAN
+
+From: Jiayuan Chen <jiayuan.chen@linux.dev>
+
+commit 007f5da43b3d0ecff972e2616062b8da1f862f5e upstream.
+
+Patch series "kasan: vmalloc: Fixes for the percpu allocator and
+vrealloc", v3.
+
+Patches fix two issues related to KASAN and vmalloc.
+
+The first one, a KASAN tag mismatch, possibly resulting in a kernel panic,
+can be observed on systems with a tag-based KASAN enabled and with
+multiple NUMA nodes.  Initially it was only noticed on x86 [1] but later a
+similar issue was also reported on arm64 [2].
+
+Specifically the problem is related to how vm_structs interact with
+pcpu_chunks - both when they are allocated, assigned and when pcpu_chunk
+addresses are derived.
+
+When vm_structs are allocated they are unpoisoned, each with a different
+random tag, if vmalloc support is enabled along the KASAN mode.  Later
+when first pcpu chunk is allocated it gets its 'base_addr' field set to
+the first allocated vm_struct.  With that it inherits that vm_struct's
+tag.
+
+When pcpu_chunk addresses are later derived (by pcpu_chunk_addr(), for
+example in pcpu_alloc_noprof()) the base_addr field is used and offsets
+are added to it.  If the initial conditions are satisfied then some of the
+offsets will point into memory allocated with a different vm_struct.  So
+while the lower bits will get accurately derived the tag bits in the top
+of the pointer won't match the shadow memory contents.
+
+The solution (proposed at v2 of the x86 KASAN series [3]) is to unpoison
+the vm_structs with the same tag when allocating them for the per cpu
+allocator (in pcpu_get_vm_areas()).
+
+The second one reported by syzkaller [4] is related to vrealloc and
+happens because of random tag generation when unpoisoning memory without
+allocating new pages.  This breaks shadow memory tracking and needs to
+reuse the existing tag instead of generating a new one.  At the same time
+an inconsistency in used flags is corrected.
+
+
+This patch (of 3):
+
+Syzkaller reported a memory out-of-bounds bug [4].  This patch fixes two
+issues:
+
+1. In vrealloc the KASAN_VMALLOC_VM_ALLOC flag is missing when
+   unpoisoning the extended region. This flag is required to correctly
+   associate the allocation with KASAN's vmalloc tracking.
+
+   Note: In contrast, vzalloc (via __vmalloc_node_range_noprof)
+   explicitly sets KASAN_VMALLOC_VM_ALLOC and calls
+   kasan_unpoison_vmalloc() with it.  vrealloc must behave consistently --
+   especially when reusing existing vmalloc regions -- to ensure KASAN can
+   track allocations correctly.
+
+2. When vrealloc reuses an existing vmalloc region (without allocating
+   new pages) KASAN generates a new tag, which breaks tag-based memory
+   access tracking.
+
+Introduce KASAN_VMALLOC_KEEP_TAG, a new KASAN flag that allows reusing the
+tag already attached to the pointer, ensuring consistent tag behavior
+during reallocation.
+
+Pass KASAN_VMALLOC_KEEP_TAG and KASAN_VMALLOC_VM_ALLOC to the
+kasan_unpoison_vmalloc inside vrealloc_node_align_noprof().
+
+Link: https://lkml.kernel.org/r/cover.1765978969.git.m.wieczorretman@pm.me
+Link: https://lkml.kernel.org/r/38dece0a4074c43e48150d1e242f8242c73bf1a5.1764874575.git.m.wieczorretman@pm.me
+Link: https://lore.kernel.org/all/e7e04692866d02e6d3b32bb43b998e5d17092ba4.1738686764.git.maciej.wieczor-retman@intel.com/ [1]
+Link: https://lore.kernel.org/all/aMUrW1Znp1GEj7St@MiWiFi-R3L-srv/ [2]
+Link: https://lore.kernel.org/all/CAPAsAGxDRv_uFeMYu9TwhBVWHCCtkSxoWY4xmFB_vowMbi8raw@mail.gmail.com/ [3]
+Link: https://syzkaller.appspot.com/bug?extid=997752115a851cb0cf36 [4]
+Fixes: a0309faf1cb0 ("mm: vmalloc: support more granular vrealloc() sizing")
+Signed-off-by: Jiayuan Chen <jiayuan.chen@linux.dev>
+Co-developed-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+Signed-off-by: Maciej Wieczor-Retman <maciej.wieczor-retman@intel.com>
+Reported-by: syzbot+997752115a851cb0cf36@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68e243a2.050a0220.1696c6.007d.GAE@google.com/T/
+Reviewed-by: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+Cc: Danilo Krummrich <dakr@kernel.org>
+Cc: Dmitriy Vyukov <dvyukov@google.com>
+Cc: Kees Cook <kees@kernel.org>
+Cc: Marco Elver <elver@google.com>
+Cc: "Uladzislau Rezki (Sony)" <urezki@gmail.com>
+Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/kasan.h |    1 +
+ mm/kasan/hw_tags.c    |    2 +-
+ mm/kasan/shadow.c     |    4 +++-
+ mm/vmalloc.c          |    4 +++-
+ 4 files changed, 8 insertions(+), 3 deletions(-)
+
+--- a/include/linux/kasan.h
++++ b/include/linux/kasan.h
+@@ -28,6 +28,7 @@ typedef unsigned int __bitwise kasan_vma
+ #define KASAN_VMALLOC_INIT            ((__force kasan_vmalloc_flags_t)0x01u)
+ #define KASAN_VMALLOC_VM_ALLOC                ((__force kasan_vmalloc_flags_t)0x02u)
+ #define KASAN_VMALLOC_PROT_NORMAL     ((__force kasan_vmalloc_flags_t)0x04u)
++#define KASAN_VMALLOC_KEEP_TAG                ((__force kasan_vmalloc_flags_t)0x08u)
+ #define KASAN_VMALLOC_PAGE_RANGE 0x1 /* Apply exsiting page range */
+ #define KASAN_VMALLOC_TLB_FLUSH  0x2 /* TLB flush */
+--- a/mm/kasan/hw_tags.c
++++ b/mm/kasan/hw_tags.c
+@@ -345,7 +345,7 @@ void *__kasan_unpoison_vmalloc(const voi
+               return (void *)start;
+       }
+-      tag = kasan_random_tag();
++      tag = (flags & KASAN_VMALLOC_KEEP_TAG) ? get_tag(start) : kasan_random_tag();
+       start = set_tag(start, tag);
+       /* Unpoison and initialize memory up to size. */
+--- a/mm/kasan/shadow.c
++++ b/mm/kasan/shadow.c
+@@ -561,7 +561,9 @@ void *__kasan_unpoison_vmalloc(const voi
+           !(flags & KASAN_VMALLOC_PROT_NORMAL))
+               return (void *)start;
+-      start = set_tag(start, kasan_random_tag());
++      if (unlikely(!(flags & KASAN_VMALLOC_KEEP_TAG)))
++              start = set_tag(start, kasan_random_tag());
++
+       kasan_unpoison(start, size, false);
+       return (void *)start;
+ }
+--- a/mm/vmalloc.c
++++ b/mm/vmalloc.c
+@@ -4118,7 +4118,9 @@ void *vrealloc_noprof(const void *p, siz
+        */
+       if (size <= alloced_size) {
+               kasan_unpoison_vmalloc(p + old_size, size - old_size,
+-                                     KASAN_VMALLOC_PROT_NORMAL);
++                                     KASAN_VMALLOC_PROT_NORMAL |
++                                     KASAN_VMALLOC_VM_ALLOC |
++                                     KASAN_VMALLOC_KEEP_TAG);
+               /*
+                * No need to zero memory here, as unused memory will have
+                * already been zeroed at initial allocation time or during
diff --git a/queue-6.12/mm-page_owner-fix-memory-leak-in-page_owner_stack_fops-release.patch b/queue-6.12/mm-page_owner-fix-memory-leak-in-page_owner_stack_fops-release.patch
new file mode 100644 (file)
index 0000000..bed1864
--- /dev/null
@@ -0,0 +1,43 @@
+From a76a5ae2c6c645005672c2caf2d49361c6f2500f Mon Sep 17 00:00:00 2001
+From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
+Date: Fri, 19 Dec 2025 07:42:32 +0000
+Subject: mm/page_owner: fix memory leak in page_owner_stack_fops->release()
+
+From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
+
+commit a76a5ae2c6c645005672c2caf2d49361c6f2500f upstream.
+
+The page_owner_stack_fops->open() callback invokes seq_open_private(),
+therefore its corresponding ->release() callback must call
+seq_release_private().  Otherwise it will cause a memory leak of struct
+stack_print_ctx.
+
+Link: https://lkml.kernel.org/r/20251219074232.136482-1-ranxiaokai627@163.com
+Fixes: 765973a09803 ("mm,page_owner: display all stacks and their count")
+Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Acked-by: Vlastimil Babka <vbabka@suse.cz>
+Cc: Andrey Konovalov <andreyknvl@gmail.com>
+Cc: Brendan Jackman <jackmanb@google.com>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Marco Elver <elver@google.com>
+Cc: Suren Baghdasaryan <surenb@google.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/page_owner.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/mm/page_owner.c
++++ b/mm/page_owner.c
+@@ -933,7 +933,7 @@ static const struct file_operations page
+       .open           = page_owner_stack_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+-      .release        = seq_release,
++      .release        = seq_release_private,
+ };
+ static int page_owner_threshold_get(void *data, u64 *val)
diff --git a/queue-6.12/net-macb-relocate-mog_init_rings-callback-from-macb_mac_link_up-to-macb_open.patch b/queue-6.12/net-macb-relocate-mog_init_rings-callback-from-macb_mac_link_up-to-macb_open.patch
new file mode 100644 (file)
index 0000000..1a7d65b
--- /dev/null
@@ -0,0 +1,170 @@
+From 99537d5c476cada9cf75aef9fa75579a31faadb9 Mon Sep 17 00:00:00 2001
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+Date: Mon, 22 Dec 2025 09:56:24 +0800
+Subject: net: macb: Relocate mog_init_rings() callback from macb_mac_link_up() to macb_open()
+
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+
+commit 99537d5c476cada9cf75aef9fa75579a31faadb9 upstream.
+
+In the non-RT kernel, local_bh_disable() merely disables preemption,
+whereas it maps to an actual spin lock in the RT kernel. Consequently,
+when attempting to refill RX buffers via netdev_alloc_skb() in
+macb_mac_link_up(), a deadlock scenario arises as follows:
+
+   WARNING: possible circular locking dependency detected
+   6.18.0-08691-g2061f18ad76e #39 Not tainted
+   ------------------------------------------------------
+   kworker/0:0/8 is trying to acquire lock:
+   ffff00080369bbe0 (&bp->lock){+.+.}-{3:3}, at: macb_start_xmit+0x808/0xb7c
+
+   but task is already holding lock:
+   ffff000803698e58 (&queue->tx_ptr_lock){+...}-{3:3}, at: macb_start_xmit
+   +0x148/0xb7c
+
+   which lock already depends on the new lock.
+
+   the existing dependency chain (in reverse order) is:
+
+   -> #3 (&queue->tx_ptr_lock){+...}-{3:3}:
+          rt_spin_lock+0x50/0x1f0
+          macb_start_xmit+0x148/0xb7c
+          dev_hard_start_xmit+0x94/0x284
+          sch_direct_xmit+0x8c/0x37c
+          __dev_queue_xmit+0x708/0x1120
+          neigh_resolve_output+0x148/0x28c
+          ip6_finish_output2+0x2c0/0xb2c
+          __ip6_finish_output+0x114/0x308
+          ip6_output+0xc4/0x4a4
+          mld_sendpack+0x220/0x68c
+          mld_ifc_work+0x2a8/0x4f4
+          process_one_work+0x20c/0x5f8
+          worker_thread+0x1b0/0x35c
+          kthread+0x144/0x200
+          ret_from_fork+0x10/0x20
+
+   -> #2 (_xmit_ETHER#2){+...}-{3:3}:
+          rt_spin_lock+0x50/0x1f0
+          sch_direct_xmit+0x11c/0x37c
+          __dev_queue_xmit+0x708/0x1120
+          neigh_resolve_output+0x148/0x28c
+          ip6_finish_output2+0x2c0/0xb2c
+          __ip6_finish_output+0x114/0x308
+          ip6_output+0xc4/0x4a4
+          mld_sendpack+0x220/0x68c
+          mld_ifc_work+0x2a8/0x4f4
+          process_one_work+0x20c/0x5f8
+          worker_thread+0x1b0/0x35c
+          kthread+0x144/0x200
+          ret_from_fork+0x10/0x20
+
+   -> #1 ((softirq_ctrl.lock)){+.+.}-{3:3}:
+          lock_release+0x250/0x348
+          __local_bh_enable_ip+0x7c/0x240
+          __netdev_alloc_skb+0x1b4/0x1d8
+          gem_rx_refill+0xdc/0x240
+          gem_init_rings+0xb4/0x108
+          macb_mac_link_up+0x9c/0x2b4
+          phylink_resolve+0x170/0x614
+          process_one_work+0x20c/0x5f8
+          worker_thread+0x1b0/0x35c
+          kthread+0x144/0x200
+          ret_from_fork+0x10/0x20
+
+   -> #0 (&bp->lock){+.+.}-{3:3}:
+          __lock_acquire+0x15a8/0x2084
+          lock_acquire+0x1cc/0x350
+          rt_spin_lock+0x50/0x1f0
+          macb_start_xmit+0x808/0xb7c
+          dev_hard_start_xmit+0x94/0x284
+          sch_direct_xmit+0x8c/0x37c
+          __dev_queue_xmit+0x708/0x1120
+          neigh_resolve_output+0x148/0x28c
+          ip6_finish_output2+0x2c0/0xb2c
+          __ip6_finish_output+0x114/0x308
+          ip6_output+0xc4/0x4a4
+          mld_sendpack+0x220/0x68c
+          mld_ifc_work+0x2a8/0x4f4
+          process_one_work+0x20c/0x5f8
+          worker_thread+0x1b0/0x35c
+          kthread+0x144/0x200
+          ret_from_fork+0x10/0x20
+
+   other info that might help us debug this:
+
+   Chain exists of:
+     &bp->lock --> _xmit_ETHER#2 --> &queue->tx_ptr_lock
+
+    Possible unsafe locking scenario:
+
+          CPU0                    CPU1
+          ----                    ----
+     lock(&queue->tx_ptr_lock);
+                                  lock(_xmit_ETHER#2);
+                                  lock(&queue->tx_ptr_lock);
+     lock(&bp->lock);
+
+    *** DEADLOCK ***
+
+   Call trace:
+    show_stack+0x18/0x24 (C)
+    dump_stack_lvl+0xa0/0xf0
+    dump_stack+0x18/0x24
+    print_circular_bug+0x28c/0x370
+    check_noncircular+0x198/0x1ac
+    __lock_acquire+0x15a8/0x2084
+    lock_acquire+0x1cc/0x350
+    rt_spin_lock+0x50/0x1f0
+    macb_start_xmit+0x808/0xb7c
+    dev_hard_start_xmit+0x94/0x284
+    sch_direct_xmit+0x8c/0x37c
+    __dev_queue_xmit+0x708/0x1120
+    neigh_resolve_output+0x148/0x28c
+    ip6_finish_output2+0x2c0/0xb2c
+    __ip6_finish_output+0x114/0x308
+    ip6_output+0xc4/0x4a4
+    mld_sendpack+0x220/0x68c
+    mld_ifc_work+0x2a8/0x4f4
+    process_one_work+0x20c/0x5f8
+    worker_thread+0x1b0/0x35c
+    kthread+0x144/0x200
+    ret_from_fork+0x10/0x20
+
+Notably, invoking the mog_init_rings() callback upon link establishment
+is unnecessary. Instead, we can exclusively call mog_init_rings() within
+the ndo_open() callback. This adjustment resolves the deadlock issue.
+Furthermore, since MACB_CAPS_MACB_IS_EMAC cases do not use mog_init_rings()
+when opening the network interface via at91ether_open(), moving
+mog_init_rings() to macb_open() also eliminates the MACB_CAPS_MACB_IS_EMAC
+check.
+
+Fixes: 633e98a711ac ("net: macb: use resolved link config in mac_link_up()")
+Cc: stable@vger.kernel.org
+Suggested-by: Kevin Hao <kexin.hao@windriver.com>
+Signed-off-by: Xiaolei Wang <xiaolei.wang@windriver.com>
+Link: https://patch.msgid.link/20251222015624.1994551-1-xiaolei.wang@windriver.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_main.c |    3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cadence/macb_main.c
++++ b/drivers/net/ethernet/cadence/macb_main.c
+@@ -759,7 +759,6 @@ static void macb_mac_link_up(struct phyl
+               /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
+                * cleared the pipeline and control registers.
+                */
+-              bp->macbgem_ops.mog_init_rings(bp);
+               macb_init_buffers(bp);
+               for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+@@ -2985,6 +2984,8 @@ static int macb_open(struct net_device *
+               goto pm_exit;
+       }
++      bp->macbgem_ops.mog_init_rings(bp);
++
+       for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
+               napi_enable(&queue->napi_rx);
+               napi_enable(&queue->napi_tx);
diff --git a/queue-6.12/net-nfc-fix-deadlock-between-nfc_unregister_device-and-rfkill_fop_write.patch b/queue-6.12/net-nfc-fix-deadlock-between-nfc_unregister_device-and-rfkill_fop_write.patch
new file mode 100644 (file)
index 0000000..dccbd71
--- /dev/null
@@ -0,0 +1,91 @@
+From 1ab526d97a57e44d26fadcc0e9adeb9c0c0182f5 Mon Sep 17 00:00:00 2001
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+Date: Thu, 18 Dec 2025 06:53:54 +0530
+Subject: net: nfc: fix deadlock between nfc_unregister_device and rfkill_fop_write
+
+From: Deepanshu Kartikey <kartikey406@gmail.com>
+
+commit 1ab526d97a57e44d26fadcc0e9adeb9c0c0182f5 upstream.
+
+A deadlock can occur between nfc_unregister_device() and rfkill_fop_write()
+due to lock ordering inversion between device_lock and rfkill_global_mutex.
+
+The problematic lock order is:
+
+Thread A (rfkill_fop_write):
+  rfkill_fop_write()
+    mutex_lock(&rfkill_global_mutex)
+      rfkill_set_block()
+        nfc_rfkill_set_block()
+          nfc_dev_down()
+            device_lock(&dev->dev)    <- waits for device_lock
+
+Thread B (nfc_unregister_device):
+  nfc_unregister_device()
+    device_lock(&dev->dev)
+      rfkill_unregister()
+        mutex_lock(&rfkill_global_mutex)  <- waits for rfkill_global_mutex
+
+This creates a classic ABBA deadlock scenario.
+
+Fix this by moving rfkill_unregister() and rfkill_destroy() outside the
+device_lock critical section. Store the rfkill pointer in a local variable
+before releasing the lock, then call rfkill_unregister() after releasing
+device_lock.
+
+This change is safe because rfkill_fop_write() holds rfkill_global_mutex
+while calling the rfkill callbacks, and rfkill_unregister() also acquires
+rfkill_global_mutex before cleanup. Therefore, rfkill_unregister() will
+wait for any ongoing callback to complete before proceeding, and
+device_del() is only called after rfkill_unregister() returns, preventing
+any use-after-free.
+
+The similar lock ordering in nfc_register_device() (device_lock ->
+rfkill_global_mutex via rfkill_register) is safe because during
+registration the device is not yet in rfkill_list, so no concurrent
+rfkill operations can occur on this device.
+
+Fixes: 3e3b5dfcd16a ("NFC: reorder the logic in nfc_{un,}register_device")
+Cc: stable@vger.kernel.org
+Reported-by: syzbot+4ef89409a235d804c6c2@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=4ef89409a235d804c6c2
+Link: https://lore.kernel.org/all/20251217054908.178907-1-kartikey406@gmail.com/T/ [v1]
+Signed-off-by: Deepanshu Kartikey <kartikey406@gmail.com>
+Reviewed-by: Krzysztof Kozlowski <krzysztof.kozlowski@oss.qualcomm.com>
+Link: https://patch.msgid.link/20251218012355.279940-1-kartikey406@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/nfc/core.c |    9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/net/nfc/core.c
++++ b/net/nfc/core.c
+@@ -1154,6 +1154,7 @@ EXPORT_SYMBOL(nfc_register_device);
+ void nfc_unregister_device(struct nfc_dev *dev)
+ {
+       int rc;
++      struct rfkill *rfk = NULL;
+       pr_debug("dev_name=%s\n", dev_name(&dev->dev));
+@@ -1164,13 +1165,17 @@ void nfc_unregister_device(struct nfc_de
+       device_lock(&dev->dev);
+       if (dev->rfkill) {
+-              rfkill_unregister(dev->rfkill);
+-              rfkill_destroy(dev->rfkill);
++              rfk = dev->rfkill;
+               dev->rfkill = NULL;
+       }
+       dev->shutting_down = true;
+       device_unlock(&dev->dev);
++      if (rfk) {
++              rfkill_unregister(rfk);
++              rfkill_destroy(rfk);
++      }
++
+       if (dev->ops->check_presence) {
+               del_timer_sync(&dev->check_pres_timer);
+               cancel_work_sync(&dev->check_pres_work);
diff --git a/queue-6.12/net-usb-sr9700-fix-incorrect-command-used-to-write-single-register.patch b/queue-6.12/net-usb-sr9700-fix-incorrect-command-used-to-write-single-register.patch
new file mode 100644 (file)
index 0000000..2e4a06f
--- /dev/null
@@ -0,0 +1,43 @@
+From fa0b198be1c6775bc7804731a43be5d899d19e7a Mon Sep 17 00:00:00 2001
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Date: Sun, 21 Dec 2025 00:24:00 -0800
+Subject: net: usb: sr9700: fix incorrect command used to write single register
+
+From: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+
+commit fa0b198be1c6775bc7804731a43be5d899d19e7a upstream.
+
+This fixes the device failing to initialize with "error reading MAC
+address" for me, probably because the incorrect write of NCR_RST to
+SR_NCR is not actually resetting the device.
+
+Fixes: c9b37458e95629b1d1171457afdcc1bf1eb7881d ("USB2NET : SR9700 : One chip USB 1.1 USB2NET SR9700Device Driver Support")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ethan Nelson-Moore <enelsonmoore@gmail.com>
+Link: https://patch.msgid.link/20251221082400.50688-1-enelsonmoore@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/sr9700.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/usb/sr9700.c
++++ b/drivers/net/usb/sr9700.c
+@@ -52,7 +52,7 @@ static int sr_read_reg(struct usbnet *de
+ static int sr_write_reg(struct usbnet *dev, u8 reg, u8 value)
+ {
+-      return usbnet_write_cmd(dev, SR_WR_REGS, SR_REQ_WR_REG,
++      return usbnet_write_cmd(dev, SR_WR_REG, SR_REQ_WR_REG,
+                               value, reg, NULL, 0);
+ }
+@@ -65,7 +65,7 @@ static void sr_write_async(struct usbnet
+ static void sr_write_reg_async(struct usbnet *dev, u8 reg, u8 value)
+ {
+-      usbnet_write_cmd_async(dev, SR_WR_REGS, SR_REQ_WR_REG,
++      usbnet_write_cmd_async(dev, SR_WR_REG, SR_REQ_WR_REG,
+                              value, reg, NULL, 0);
+ }
diff --git a/queue-6.12/nfsd-drop-the-client-reference-in-client_states_open.patch b/queue-6.12/nfsd-drop-the-client-reference-in-client_states_open.patch
new file mode 100644 (file)
index 0000000..0d60da4
--- /dev/null
@@ -0,0 +1,36 @@
+From 1f941b2c23fd34c6f3b76d36f9d0a2528fa92b8f Mon Sep 17 00:00:00 2001
+From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+Date: Sat, 6 Dec 2025 15:38:42 +0800
+Subject: nfsd: Drop the client reference in client_states_open()
+
+From: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+
+commit 1f941b2c23fd34c6f3b76d36f9d0a2528fa92b8f upstream.
+
+In error path, call drop_client() to drop the reference
+obtained by get_nfsdfs_clp().
+
+Fixes: 78599c42ae3c ("nfsd4: add file to display list of client's opens")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Haoxiang Li <lihaoxiang@isrc.iscas.ac.cn>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4state.c |    4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -2989,8 +2989,10 @@ static int client_states_open(struct ino
+               return -ENXIO;
+       ret = seq_open(file, &states_seq_ops);
+-      if (ret)
++      if (ret) {
++              drop_client(clp);
+               return ret;
++      }
+       s = file->private_data;
+       s->private = clp;
+       return 0;
diff --git a/queue-6.12/pmdomain-imx-fix-reference-count-leak-in-imx_gpc_probe.patch b/queue-6.12/pmdomain-imx-fix-reference-count-leak-in-imx_gpc_probe.patch
new file mode 100644 (file)
index 0000000..951182a
--- /dev/null
@@ -0,0 +1,44 @@
+From 73cb5f6eafb0ac7aea8cdeb8ff12981aa741d8fb Mon Sep 17 00:00:00 2001
+From: Wentao Liang <vulab@iscas.ac.cn>
+Date: Thu, 11 Dec 2025 04:02:52 +0000
+Subject: pmdomain: imx: Fix reference count leak in imx_gpc_probe()
+
+From: Wentao Liang <vulab@iscas.ac.cn>
+
+commit 73cb5f6eafb0ac7aea8cdeb8ff12981aa741d8fb upstream.
+
+of_get_child_by_name() returns a node pointer with refcount incremented.
+Use the __free() attribute to manage the pgc_node reference, ensuring
+automatic of_node_put() cleanup when pgc_node goes out of scope.
+
+This eliminates the need for explicit error handling paths and avoids
+reference count leaks.
+
+Fixes: 721cabf6c660 ("soc: imx: move PGC handling to a new GPC driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Wentao Liang <vulab@iscas.ac.cn>
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pmdomain/imx/gpc.c |    5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/drivers/pmdomain/imx/gpc.c
++++ b/drivers/pmdomain/imx/gpc.c
+@@ -403,13 +403,12 @@ clk_err:
+ static int imx_gpc_probe(struct platform_device *pdev)
+ {
+       const struct imx_gpc_dt_data *of_id_data = device_get_match_data(&pdev->dev);
+-      struct device_node *pgc_node;
++      struct device_node *pgc_node __free(device_node)
++              = of_get_child_by_name(pdev->dev.of_node, "pgc");
+       struct regmap *regmap;
+       void __iomem *base;
+       int ret;
+-      pgc_node = of_get_child_by_name(pdev->dev.of_node, "pgc");
+-
+       /* bail out if DT too old and doesn't provide the necessary info */
+       if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells") &&
+           !pgc_node)
diff --git a/queue-6.12/rdma-cm-fix-leaking-the-multicast-gid-table-reference.patch b/queue-6.12/rdma-cm-fix-leaking-the-multicast-gid-table-reference.patch
new file mode 100644 (file)
index 0000000..091482d
--- /dev/null
@@ -0,0 +1,51 @@
+From 57f3cb6c84159d12ba343574df2115fb18dd83ca Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@nvidia.com>
+Date: Fri, 28 Nov 2025 20:53:21 -0400
+Subject: RDMA/cm: Fix leaking the multicast GID table reference
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+commit 57f3cb6c84159d12ba343574df2115fb18dd83ca upstream.
+
+If the CM ID is destroyed while the CM event for multicast creating is
+still queued the cancel_work_sync() will prevent the work from running
+which also prevents destroying the ah_attr. This leaks a refcount and
+triggers a WARN:
+
+   GID entry ref leak for dev syz1 index 2 ref=573
+   WARNING: CPU: 1 PID: 655 at drivers/infiniband/core/cache.c:809 release_gid_table drivers/infiniband/core/cache.c:806 [inline]
+   WARNING: CPU: 1 PID: 655 at drivers/infiniband/core/cache.c:809 gid_table_release_one+0x284/0x3cc drivers/infiniband/core/cache.c:886
+
+Destroy the ah_attr after canceling the work, it is safe to call this
+twice.
+
+Link: https://patch.msgid.link/r/0-v1-4285d070a6b2+20a-rdma_mc_gid_leak_syz_jgg@nvidia.com
+Cc: stable@vger.kernel.org
+Fixes: fe454dc31e84 ("RDMA/ucma: Fix use-after-free bug in ucma_create_uevent")
+Reported-by: syzbot+b0da83a6c0e2e2bddbd4@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68232e7b.050a0220.f2294.09f6.GAE@google.com
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/cma.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/infiniband/core/cma.c
++++ b/drivers/infiniband/core/cma.c
+@@ -2009,6 +2009,7 @@ static void destroy_mc(struct rdma_id_pr
+               ib_sa_free_multicast(mc->sa_mc);
+       if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
++              struct rdma_cm_event *event = &mc->iboe_join.event;
+               struct rdma_dev_addr *dev_addr =
+                       &id_priv->id.route.addr.dev_addr;
+               struct net_device *ndev = NULL;
+@@ -2031,6 +2032,8 @@ static void destroy_mc(struct rdma_id_pr
+               dev_put(ndev);
+               cancel_work_sync(&mc->iboe_join.work);
++              if (event->event == RDMA_CM_EVENT_MULTICAST_JOIN)
++                      rdma_destroy_ah_attr(&event->param.ud.ah_attr);
+       }
+       kfree(mc);
+ }
diff --git a/queue-6.12/rdma-core-check-for-the-presence-of-ls_nla_type_dgid-correctly.patch b/queue-6.12/rdma-core-check-for-the-presence-of-ls_nla_type_dgid-correctly.patch
new file mode 100644 (file)
index 0000000..f60ef54
--- /dev/null
@@ -0,0 +1,129 @@
+From a7b8e876e0ef0232b8076972c57ce9a7286b47ca Mon Sep 17 00:00:00 2001
+From: Jason Gunthorpe <jgg@nvidia.com>
+Date: Fri, 28 Nov 2025 13:37:28 -0400
+Subject: RDMA/core: Check for the presence of LS_NLA_TYPE_DGID correctly
+
+From: Jason Gunthorpe <jgg@nvidia.com>
+
+commit a7b8e876e0ef0232b8076972c57ce9a7286b47ca upstream.
+
+The netlink response for RDMA_NL_LS_OP_IP_RESOLVE should always have a
+LS_NLA_TYPE_DGID attribute, it is invalid if it does not.
+
+Use the nl parsing logic properly and call nla_parse_deprecated() to fill
+the nlattrs array and then directly index that array to get the data for
+the DGID. Just fail if it is NULL.
+
+Remove the for loop searching for the nla, and squash the validation and
+parsing into one function.
+
+Fixes an uninitialized read from the stack triggered by userspace if it
+does not provide the DGID to a kernel initiated RDMA_NL_LS_OP_IP_RESOLVE
+query.
+
+    BUG: KMSAN: uninit-value in hex_byte_pack include/linux/hex.h:13 [inline]
+    BUG: KMSAN: uninit-value in ip6_string+0xef4/0x13a0 lib/vsprintf.c:1490
+     hex_byte_pack include/linux/hex.h:13 [inline]
+     ip6_string+0xef4/0x13a0 lib/vsprintf.c:1490
+     ip6_addr_string+0x18a/0x3e0 lib/vsprintf.c:1509
+     ip_addr_string+0x245/0xee0 lib/vsprintf.c:1633
+     pointer+0xc09/0x1bd0 lib/vsprintf.c:2542
+     vsnprintf+0xf8a/0x1bd0 lib/vsprintf.c:2930
+     vprintk_store+0x3ae/0x1530 kernel/printk/printk.c:2279
+     vprintk_emit+0x307/0xcd0 kernel/printk/printk.c:2426
+     vprintk_default+0x3f/0x50 kernel/printk/printk.c:2465
+     vprintk+0x36/0x50 kernel/printk/printk_safe.c:82
+     _printk+0x17e/0x1b0 kernel/printk/printk.c:2475
+     ib_nl_process_good_ip_rsep drivers/infiniband/core/addr.c:128 [inline]
+     ib_nl_handle_ip_res_resp+0x963/0x9d0 drivers/infiniband/core/addr.c:141
+     rdma_nl_rcv_msg drivers/infiniband/core/netlink.c:-1 [inline]
+     rdma_nl_rcv_skb drivers/infiniband/core/netlink.c:239 [inline]
+     rdma_nl_rcv+0xefa/0x11c0 drivers/infiniband/core/netlink.c:259
+     netlink_unicast_kernel net/netlink/af_netlink.c:1320 [inline]
+     netlink_unicast+0xf04/0x12b0 net/netlink/af_netlink.c:1346
+     netlink_sendmsg+0x10b3/0x1250 net/netlink/af_netlink.c:1896
+     sock_sendmsg_nosec net/socket.c:714 [inline]
+     __sock_sendmsg+0x333/0x3d0 net/socket.c:729
+     ____sys_sendmsg+0x7e0/0xd80 net/socket.c:2617
+     ___sys_sendmsg+0x271/0x3b0 net/socket.c:2671
+     __sys_sendmsg+0x1aa/0x300 net/socket.c:2703
+     __compat_sys_sendmsg net/compat.c:346 [inline]
+     __do_compat_sys_sendmsg net/compat.c:353 [inline]
+     __se_compat_sys_sendmsg net/compat.c:350 [inline]
+     __ia32_compat_sys_sendmsg+0xa4/0x100 net/compat.c:350
+     ia32_sys_call+0x3f6c/0x4310 arch/x86/include/generated/asm/syscalls_32.h:371
+     do_syscall_32_irqs_on arch/x86/entry/syscall_32.c:83 [inline]
+     __do_fast_syscall_32+0xb0/0x150 arch/x86/entry/syscall_32.c:306
+     do_fast_syscall_32+0x38/0x80 arch/x86/entry/syscall_32.c:331
+     do_SYSENTER_32+0x1f/0x30 arch/x86/entry/syscall_32.c:3
+
+Link: https://patch.msgid.link/r/0-v1-3fbaef094271+2cf-rdma_op_ip_rslv_syz_jgg@nvidia.com
+Cc: stable@vger.kernel.org
+Fixes: ae43f8286730 ("IB/core: Add IP to GID netlink offload")
+Reported-by: syzbot+938fcd548c303fe33c1a@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/r/68dc3dac.a00a0220.102ee.004f.GAE@google.com
+Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/infiniband/core/addr.c |   33 ++++++++++-----------------------
+ 1 file changed, 10 insertions(+), 23 deletions(-)
+
+--- a/drivers/infiniband/core/addr.c
++++ b/drivers/infiniband/core/addr.c
+@@ -80,37 +80,25 @@ static const struct nla_policy ib_nl_add
+               .min = sizeof(struct rdma_nla_ls_gid)},
+ };
+-static inline bool ib_nl_is_good_ip_resp(const struct nlmsghdr *nlh)
++static void ib_nl_process_ip_rsep(const struct nlmsghdr *nlh)
+ {
+       struct nlattr *tb[LS_NLA_TYPE_MAX] = {};
++      union ib_gid gid;
++      struct addr_req *req;
++      int found = 0;
+       int ret;
+       if (nlh->nlmsg_flags & RDMA_NL_LS_F_ERR)
+-              return false;
++              return;
+       ret = nla_parse_deprecated(tb, LS_NLA_TYPE_MAX - 1, nlmsg_data(nlh),
+                                  nlmsg_len(nlh), ib_nl_addr_policy, NULL);
+       if (ret)
+-              return false;
+-
+-      return true;
+-}
+-
+-static void ib_nl_process_good_ip_rsep(const struct nlmsghdr *nlh)
+-{
+-      const struct nlattr *head, *curr;
+-      union ib_gid gid;
+-      struct addr_req *req;
+-      int len, rem;
+-      int found = 0;
+-
+-      head = (const struct nlattr *)nlmsg_data(nlh);
+-      len = nlmsg_len(nlh);
++              return;
+-      nla_for_each_attr(curr, head, len, rem) {
+-              if (curr->nla_type == LS_NLA_TYPE_DGID)
+-                      memcpy(&gid, nla_data(curr), nla_len(curr));
+-      }
++      if (!tb[LS_NLA_TYPE_DGID])
++              return;
++      memcpy(&gid, nla_data(tb[LS_NLA_TYPE_DGID]), sizeof(gid));
+       spin_lock_bh(&lock);
+       list_for_each_entry(req, &req_list, list) {
+@@ -137,8 +125,7 @@ int ib_nl_handle_ip_res_resp(struct sk_b
+           !(NETLINK_CB(skb).sk))
+               return -EPERM;
+-      if (ib_nl_is_good_ip_resp(nlh))
+-              ib_nl_process_good_ip_rsep(nlh);
++      ib_nl_process_ip_rsep(nlh);
+       return 0;
+ }
diff --git a/queue-6.12/samples-ftrace-adjust-loongarch-register-restore-order-in-direct-calls.patch b/queue-6.12/samples-ftrace-adjust-loongarch-register-restore-order-in-direct-calls.patch
new file mode 100644 (file)
index 0000000..0ece942
--- /dev/null
@@ -0,0 +1,117 @@
+From bb85d206be208bbf834883e948125a35ac59993a Mon Sep 17 00:00:00 2001
+From: Chenghao Duan <duanchenghao@kylinos.cn>
+Date: Wed, 31 Dec 2025 15:19:25 +0800
+Subject: samples/ftrace: Adjust LoongArch register restore order in direct calls
+
+From: Chenghao Duan <duanchenghao@kylinos.cn>
+
+commit bb85d206be208bbf834883e948125a35ac59993a upstream.
+
+Ensure that in the ftrace direct call logic, the CPU register state
+(with ra = parent return address) is restored to the correct state after
+the execution of the custom trampoline function and before returning to
+the traced function. Additionally, guarantee the correctness of the jump
+logic for jr t0 (traced function address).
+
+Cc: stable@vger.kernel.org
+Fixes: 9cdc3b6a299c ("LoongArch: ftrace: Add direct call support")
+Reported-by: Youling Tang <tangyouling@kylinos.cn>
+Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Chenghao Duan <duanchenghao@kylinos.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ samples/ftrace/ftrace-direct-modify.c       |    8 ++++----
+ samples/ftrace/ftrace-direct-multi-modify.c |    8 ++++----
+ samples/ftrace/ftrace-direct-multi.c        |    4 ++--
+ samples/ftrace/ftrace-direct-too.c          |    4 ++--
+ samples/ftrace/ftrace-direct.c              |    4 ++--
+ 5 files changed, 14 insertions(+), 14 deletions(-)
+
+--- a/samples/ftrace/ftrace-direct-modify.c
++++ b/samples/ftrace/ftrace-direct-modify.c
+@@ -176,8 +176,8 @@ asm (
+ "     st.d    $t0, $sp, 0\n"
+ "     st.d    $ra, $sp, 8\n"
+ "     bl      my_direct_func1\n"
+-"     ld.d    $t0, $sp, 0\n"
+-"     ld.d    $ra, $sp, 8\n"
++"     ld.d    $ra, $sp, 0\n"
++"     ld.d    $t0, $sp, 8\n"
+ "     addi.d  $sp, $sp, 16\n"
+ "     jr      $t0\n"
+ "     .size           my_tramp1, .-my_tramp1\n"
+@@ -189,8 +189,8 @@ asm (
+ "     st.d    $t0, $sp, 0\n"
+ "     st.d    $ra, $sp, 8\n"
+ "     bl      my_direct_func2\n"
+-"     ld.d    $t0, $sp, 0\n"
+-"     ld.d    $ra, $sp, 8\n"
++"     ld.d    $ra, $sp, 0\n"
++"     ld.d    $t0, $sp, 8\n"
+ "     addi.d  $sp, $sp, 16\n"
+ "     jr      $t0\n"
+ "     .size           my_tramp2, .-my_tramp2\n"
+--- a/samples/ftrace/ftrace-direct-multi-modify.c
++++ b/samples/ftrace/ftrace-direct-multi-modify.c
+@@ -199,8 +199,8 @@ asm (
+ "     move    $a0, $t0\n"
+ "     bl      my_direct_func1\n"
+ "     ld.d    $a0, $sp, 0\n"
+-"     ld.d    $t0, $sp, 8\n"
+-"     ld.d    $ra, $sp, 16\n"
++"     ld.d    $ra, $sp, 8\n"
++"     ld.d    $t0, $sp, 16\n"
+ "     addi.d  $sp, $sp, 32\n"
+ "     jr      $t0\n"
+ "     .size           my_tramp1, .-my_tramp1\n"
+@@ -215,8 +215,8 @@ asm (
+ "     move    $a0, $t0\n"
+ "     bl      my_direct_func2\n"
+ "     ld.d    $a0, $sp, 0\n"
+-"     ld.d    $t0, $sp, 8\n"
+-"     ld.d    $ra, $sp, 16\n"
++"     ld.d    $ra, $sp, 8\n"
++"     ld.d    $t0, $sp, 16\n"
+ "     addi.d  $sp, $sp, 32\n"
+ "     jr      $t0\n"
+ "     .size           my_tramp2, .-my_tramp2\n"
+--- a/samples/ftrace/ftrace-direct-multi.c
++++ b/samples/ftrace/ftrace-direct-multi.c
+@@ -131,8 +131,8 @@ asm (
+ "     move    $a0, $t0\n"
+ "     bl      my_direct_func\n"
+ "     ld.d    $a0, $sp, 0\n"
+-"     ld.d    $t0, $sp, 8\n"
+-"     ld.d    $ra, $sp, 16\n"
++"     ld.d    $ra, $sp, 8\n"
++"     ld.d    $t0, $sp, 16\n"
+ "     addi.d  $sp, $sp, 32\n"
+ "     jr      $t0\n"
+ "     .size           my_tramp, .-my_tramp\n"
+--- a/samples/ftrace/ftrace-direct-too.c
++++ b/samples/ftrace/ftrace-direct-too.c
+@@ -143,8 +143,8 @@ asm (
+ "     ld.d    $a0, $sp, 0\n"
+ "     ld.d    $a1, $sp, 8\n"
+ "     ld.d    $a2, $sp, 16\n"
+-"     ld.d    $t0, $sp, 24\n"
+-"     ld.d    $ra, $sp, 32\n"
++"     ld.d    $ra, $sp, 24\n"
++"     ld.d    $t0, $sp, 32\n"
+ "     addi.d  $sp, $sp, 48\n"
+ "     jr      $t0\n"
+ "     .size           my_tramp, .-my_tramp\n"
+--- a/samples/ftrace/ftrace-direct.c
++++ b/samples/ftrace/ftrace-direct.c
+@@ -124,8 +124,8 @@ asm (
+ "     st.d    $ra, $sp, 16\n"
+ "     bl      my_direct_func\n"
+ "     ld.d    $a0, $sp, 0\n"
+-"     ld.d    $t0, $sp, 8\n"
+-"     ld.d    $ra, $sp, 16\n"
++"     ld.d    $ra, $sp, 8\n"
++"     ld.d    $t0, $sp, 16\n"
+ "     addi.d  $sp, $sp, 32\n"
+ "     jr      $t0\n"
+ "     .size           my_tramp, .-my_tramp\n"
index 8f5ce142692c367e6d001d3a6d0944b59162c814..50388dfc8cc755eb4f2ac5adb897a18b2e8c08bd 100644 (file)
@@ -431,3 +431,25 @@ mm-damon-tests-core-kunit-handle-alloc-failures-in-damon_test_set_regions.patch
 mm-damon-tests-core-kunit-handle-alloc-failures-in-damon_test_update_monitoring_result.patch
 mm-damon-tests-core-kunit-handle-alloc-failures-in-damon_test_ops_registration.patch
 mm-damon-tests-core-kunit-handle-alloc-failure-on-damon_test_set_attrs.patch
+pmdomain-imx-fix-reference-count-leak-in-imx_gpc_probe.patch
+compiler_types.h-add-auto-as-a-macro-for-__auto_type.patch
+mm-kasan-fix-incorrect-unpoisoning-in-vrealloc-for-kasan.patch
+kasan-refactor-pcpu-kasan-vmalloc-unpoison.patch
+kasan-unpoison-vms-addresses-with-a-common-tag.patch
+lockd-fix-vfs_test_lock-calls.patch
+idr-fix-idr_alloc-returning-an-id-out-of-range.patch
+mm-page_owner-fix-memory-leak-in-page_owner_stack_fops-release.patch
+x86-microcode-amd-fix-entrysign-revision-check-for-zen5-strix-halo.patch
+tools-mm-page_owner_sort-fix-timestamp-comparison-for-stable-sorting.patch
+samples-ftrace-adjust-loongarch-register-restore-order-in-direct-calls.patch
+rdma-core-check-for-the-presence-of-ls_nla_type_dgid-correctly.patch
+rdma-cm-fix-leaking-the-multicast-gid-table-reference.patch
+e1000-fix-oob-in-e1000_tbi_should_accept.patch
+fjes-add-missing-iounmap-in-fjes_hw_init.patch
+loongarch-refactor-register-restoration-in-ftrace_common_return.patch
+loongarch-bpf-zero-extend-bpf_tail_call-index.patch
+loongarch-bpf-sign-extend-kfunc-call-arguments.patch
+nfsd-drop-the-client-reference-in-client_states_open.patch
+net-usb-sr9700-fix-incorrect-command-used-to-write-single-register.patch
+net-nfc-fix-deadlock-between-nfc_unregister_device-and-rfkill_fop_write.patch
+net-macb-relocate-mog_init_rings-callback-from-macb_mac_link_up-to-macb_open.patch
diff --git a/queue-6.12/tools-mm-page_owner_sort-fix-timestamp-comparison-for-stable-sorting.patch b/queue-6.12/tools-mm-page_owner_sort-fix-timestamp-comparison-for-stable-sorting.patch
new file mode 100644 (file)
index 0000000..94fc451
--- /dev/null
@@ -0,0 +1,40 @@
+From 7013803444dd3bbbe28fd3360c084cec3057c554 Mon Sep 17 00:00:00 2001
+From: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+Date: Tue, 9 Dec 2025 10:15:52 +0530
+Subject: tools/mm/page_owner_sort: fix timestamp comparison for stable sorting
+
+From: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+
+commit 7013803444dd3bbbe28fd3360c084cec3057c554 upstream.
+
+The ternary operator in compare_ts() returns 1 when timestamps are equal,
+causing unstable sorting behavior. Replace with explicit three-way
+comparison that returns 0 for equal timestamps, ensuring stable qsort
+ordering and consistent output.
+
+Link: https://lkml.kernel.org/r/20251209044552.3396468-1-kaushlendra.kumar@intel.com
+Fixes: 8f9c447e2e2b ("tools/vm/page_owner_sort.c: support sorting pid and time")
+Signed-off-by: Kaushlendra Kumar <kaushlendra.kumar@intel.com>
+Cc: Chongxi Zhao <zhaochongxi2019@email.szu.edu.cn>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/mm/page_owner_sort.c |    6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/tools/mm/page_owner_sort.c
++++ b/tools/mm/page_owner_sort.c
+@@ -183,7 +183,11 @@ static int compare_ts(const void *p1, co
+ {
+       const struct block_list *l1 = p1, *l2 = p2;
+-      return l1->ts_nsec < l2->ts_nsec ? -1 : 1;
++      if (l1->ts_nsec < l2->ts_nsec)
++              return -1;
++      if (l1->ts_nsec > l2->ts_nsec)
++              return 1;
++      return 0;
+ }
+ static int compare_cull_condition(const void *p1, const void *p2)
diff --git a/queue-6.12/x86-microcode-amd-fix-entrysign-revision-check-for-zen5-strix-halo.patch b/queue-6.12/x86-microcode-amd-fix-entrysign-revision-check-for-zen5-strix-halo.patch
new file mode 100644 (file)
index 0000000..d111f73
--- /dev/null
@@ -0,0 +1,33 @@
+From 150b1b97e27513535dcd3795d5ecd28e61b6cb8c Mon Sep 17 00:00:00 2001
+From: Rong Zhang <i@rong.moe>
+Date: Tue, 30 Dec 2025 02:22:21 +0800
+Subject: x86/microcode/AMD: Fix Entrysign revision check for Zen5/Strix Halo
+
+From: Rong Zhang <i@rong.moe>
+
+commit 150b1b97e27513535dcd3795d5ecd28e61b6cb8c upstream.
+
+Zen5 also contains family 1Ah, models 70h-7Fh, which are mistakenly missing
+from cpu_has_entrysign(). Add the missing range.
+
+Fixes: 8a9fb5129e8e ("x86/microcode/AMD: Limit Entrysign signature checking to known generations")
+Signed-off-by: Rong Zhang <i@rong.moe>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@kernel.org
+Link: https://patch.msgid.link/20251229182245.152747-1-i@rong.moe
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/cpu/microcode/amd.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/microcode/amd.c
++++ b/arch/x86/kernel/cpu/microcode/amd.c
+@@ -235,7 +235,7 @@ static bool cpu_has_entrysign(void)
+       if (fam == 0x1a) {
+               if (model <= 0x2f ||
+                   (0x40 <= model && model <= 0x4f) ||
+-                  (0x60 <= model && model <= 0x6f))
++                  (0x60 <= model && model <= 0x7f))
+                       return true;
+       }