--- /dev/null
+From 0d4aef630be9d5f9c1227d07669c26c4383b5ad0 Mon Sep 17 00:00:00 2001
+From: Yang Yang <n05ec@lzu.edu.cn>
+Date: Sat, 14 Mar 2026 07:11:27 +0000
+Subject: batman-adv: avoid OGM aggregation when skb tailroom is insufficient
+
+From: Yang Yang <n05ec@lzu.edu.cn>
+
+commit 0d4aef630be9d5f9c1227d07669c26c4383b5ad0 upstream.
+
+When OGM aggregation state is toggled at runtime, an existing forwarded
+packet may have been allocated with only packet_len bytes, while a later
+packet can still be selected for aggregation. Appending in this case can
+hit skb_put overflow conditions.
+
+Reject aggregation when the target skb tailroom cannot accommodate the new
+packet. The caller then falls back to creating a new forward packet
+instead of appending.
+
+Fixes: c6c8fea29769 ("net: Add batman-adv meshing protocol")
+Cc: stable@vger.kernel.org
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Signed-off-by: Yuan Tan <tanyuan98@outlook.com>
+Signed-off-by: Xin Liu <bird@lzu.edu.cn>
+Signed-off-by: Ao Zhou <n05ec@lzu.edu.cn>
+Signed-off-by: Yang Yang <n05ec@lzu.edu.cn>
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Simon Wunderlich <sw@simonwunderlich.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/bat_iv_ogm.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/batman-adv/bat_iv_ogm.c
++++ b/net/batman-adv/bat_iv_ogm.c
+@@ -473,6 +473,9 @@ batadv_iv_ogm_can_aggregate(const struct
+ if (aggregated_bytes > max_bytes)
+ return false;
+
++ if (skb_tailroom(forw_packet->skb) < packet_len)
++ return false;
++
+ if (packet_num >= BATADV_MAX_AGGREGATION_PACKETS)
+ return false;
+
--- /dev/null
+From 15145675690cab2de1056e7ed68e59cbd0452529 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Lukas=20Johannes=20M=C3=B6ller?=
+ <research@johannes-moeller.dev>
+Date: Tue, 10 Mar 2026 21:59:46 +0000
+Subject: Bluetooth: L2CAP: Fix type confusion in l2cap_ecred_reconf_rsp()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lukas Johannes Möller <research@johannes-moeller.dev>
+
+commit 15145675690cab2de1056e7ed68e59cbd0452529 upstream.
+
+l2cap_ecred_reconf_rsp() casts the incoming data to struct
+l2cap_ecred_conn_rsp (the ECRED *connection* response, 8 bytes with
+result at offset 6) instead of struct l2cap_ecred_reconf_rsp (2 bytes
+with result at offset 0).
+
+This causes two problems:
+
+ - The sizeof(*rsp) length check requires 8 bytes instead of the
+ correct 2, so valid L2CAP_ECRED_RECONF_RSP packets are rejected
+ with -EPROTO.
+
+ - rsp->result reads from offset 6 instead of offset 0, returning
+ wrong data when the packet is large enough to pass the check.
+
+Fix by using the correct type. Also pass the already byte-swapped
+result variable to BT_DBG instead of the raw __le16 field.
+
+Fixes: 15f02b910562 ("Bluetooth: L2CAP: Add initial code for Enhanced Credit Based Mode")
+Cc: stable@vger.kernel.org
+Signed-off-by: Lukas Johannes Möller <research@johannes-moeller.dev>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bluetooth/l2cap_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -5408,7 +5408,7 @@ static inline int l2cap_ecred_reconf_rsp
+ u8 *data)
+ {
+ struct l2cap_chan *chan, *tmp;
+- struct l2cap_ecred_conn_rsp *rsp = (void *) data;
++ struct l2cap_ecred_reconf_rsp *rsp = (void *)data;
+ u16 result;
+
+ if (cmd_len < sizeof(*rsp))
+@@ -5416,7 +5416,7 @@ static inline int l2cap_ecred_reconf_rsp
+
+ result = __le16_to_cpu(rsp->result);
+
+- BT_DBG("result 0x%4.4x", rsp->result);
++ BT_DBG("result 0x%4.4x", result);
+
+ if (!result)
+ return 0;
--- /dev/null
+From dd815e6e3918dc75a49aaabac36e4f024d675101 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Lukas=20Johannes=20M=C3=B6ller?=
+ <research@johannes-moeller.dev>
+Date: Tue, 10 Mar 2026 21:59:47 +0000
+Subject: Bluetooth: L2CAP: Validate L2CAP_INFO_RSP payload length before access
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lukas Johannes Möller <research@johannes-moeller.dev>
+
+commit dd815e6e3918dc75a49aaabac36e4f024d675101 upstream.
+
+l2cap_information_rsp() checks that cmd_len covers the fixed
+l2cap_info_rsp header (type + result, 4 bytes) but then reads
+rsp->data without verifying that the payload is present:
+
+ - L2CAP_IT_FEAT_MASK calls get_unaligned_le32(rsp->data), which reads
+ 4 bytes past the header (needs cmd_len >= 8).
+
+ - L2CAP_IT_FIXED_CHAN reads rsp->data[0], 1 byte past the header
+ (needs cmd_len >= 5).
+
+A truncated L2CAP_INFO_RSP with result == L2CAP_IR_SUCCESS triggers an
+out-of-bounds read of adjacent skb data.
+
+Guard each data access with the required payload length check. If the
+payload is too short, skip the read and let the state machine complete
+with safe defaults (feat_mask and remote_fixed_chan remain zero from
+kzalloc), so the info timer cleanup and l2cap_conn_start() still run
+and the connection is not stalled.
+
+Fixes: 4e8402a3f884 ("[Bluetooth] Retrieve L2CAP features mask on connection setup")
+Cc: stable@vger.kernel.org
+Signed-off-by: Lukas Johannes Möller <research@johannes-moeller.dev>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bluetooth/l2cap_core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4622,7 +4622,8 @@ static inline int l2cap_information_rsp(
+
+ switch (type) {
+ case L2CAP_IT_FEAT_MASK:
+- conn->feat_mask = get_unaligned_le32(rsp->data);
++ if (cmd_len >= sizeof(*rsp) + sizeof(u32))
++ conn->feat_mask = get_unaligned_le32(rsp->data);
+
+ if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
+ struct l2cap_info_req req;
+@@ -4641,7 +4642,8 @@ static inline int l2cap_information_rsp(
+ break;
+
+ case L2CAP_IT_FIXED_CHAN:
+- conn->remote_fixed_chan = rsp->data[0];
++ if (cmd_len >= sizeof(*rsp) + sizeof(rsp->data[0]))
++ conn->remote_fixed_chan = rsp->data[0];
+ conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
+ conn->info_ident = 0;
+
--- /dev/null
+From 64dcbde7f8f870a4f2d9daf24ffb06f9748b5dd3 Mon Sep 17 00:00:00 2001
+From: Junrui Luo <moonafterrain@outlook.com>
+Date: Sat, 14 Mar 2026 17:41:04 +0800
+Subject: bnxt_en: fix OOB access in DBG_BUF_PRODUCER async event handler
+
+From: Junrui Luo <moonafterrain@outlook.com>
+
+commit 64dcbde7f8f870a4f2d9daf24ffb06f9748b5dd3 upstream.
+
+The ASYNC_EVENT_CMPL_EVENT_ID_DBG_BUF_PRODUCER handler in
+bnxt_async_event_process() uses a firmware-supplied 'type' field
+directly as an index into bp->bs_trace[] without bounds validation.
+
+The 'type' field is a 16-bit value extracted from DMA-mapped completion
+ring memory that the NIC writes directly to host RAM. A malicious or
+compromised NIC can supply any value from 0 to 65535, causing an
+out-of-bounds access into kernel heap memory.
+
+The bnxt_bs_trace_check_wrap() call then dereferences bs_trace->magic_byte
+and writes to bs_trace->last_offset and bs_trace->wrapped, leading to
+kernel memory corruption or a crash.
+
+Fix by adding a bounds check and defining BNXT_TRACE_MAX as
+DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE + 1 to cover all currently
+defined firmware trace types (0x0 through 0xc).
+
+Fixes: 84fcd9449fd7 ("bnxt_en: Manage the FW trace context memory")
+Reported-by: Yuhao Jiang <danisjiang@gmail.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Junrui Luo <moonafterrain@outlook.com>
+Reviewed-by: Michael Chan <michael.chan@broadcom.com>
+Link: https://patch.msgid.link/SYBPR01MB7881A253A1C9775D277F30E9AF42A@SYBPR01MB7881.ausprd01.prod.outlook.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 2 ++
+ drivers/net/ethernet/broadcom/bnxt/bnxt.h | 2 +-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2927,6 +2927,8 @@ static int bnxt_async_event_process(stru
+ u16 type = (u16)BNXT_EVENT_BUF_PRODUCER_TYPE(data1);
+ u32 offset = BNXT_EVENT_BUF_PRODUCER_OFFSET(data2);
+
++ if (type >= ARRAY_SIZE(bp->bs_trace))
++ goto async_event_process_exit;
+ bnxt_bs_trace_check_wrap(&bp->bs_trace[type], offset);
+ goto async_event_process_exit;
+ }
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+@@ -2135,7 +2135,7 @@ enum board_idx {
+ };
+
+ #define BNXT_TRACE_BUF_MAGIC_BYTE ((u8)0xbc)
+-#define BNXT_TRACE_MAX 11
++#define BNXT_TRACE_MAX (DBG_LOG_BUFFER_FLUSH_REQ_TYPE_ERR_QPC_TRACE + 1)
+
+ struct bnxt_bs_trace_info {
+ u8 *magic_byte;
--- /dev/null
+From 36f46b0e36892eba08978eef7502ff3c94ddba77 Mon Sep 17 00:00:00 2001
+From: Thorsten Blum <thorsten.blum@linux.dev>
+Date: Sat, 28 Feb 2026 00:00:09 +0100
+Subject: crash_dump: don't log dm-crypt key bytes in read_key_from_user_keying
+
+From: Thorsten Blum <thorsten.blum@linux.dev>
+
+commit 36f46b0e36892eba08978eef7502ff3c94ddba77 upstream.
+
+When debug logging is enabled, read_key_from_user_keying() logs the first
+8 bytes of the key payload and partially exposes the dm-crypt key. Stop
+logging any key bytes.
+
+Link: https://lkml.kernel.org/r/20260227230008.858641-2-thorsten.blum@linux.dev
+Fixes: 479e58549b0f ("crash_dump: store dm crypt keys in kdump reserved memory")
+Signed-off-by: Thorsten Blum <thorsten.blum@linux.dev>
+Cc: Baoquan He <bhe@redhat.com>
+Cc: Coiby Xu <coxu@redhat.com>
+Cc: Dave Young <dyoung@redhat.com>
+Cc: Vivek Goyal <vgoyal@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/crash_dump_dm_crypt.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/kernel/crash_dump_dm_crypt.c
++++ b/kernel/crash_dump_dm_crypt.c
+@@ -168,8 +168,8 @@ static int read_key_from_user_keying(str
+
+ memcpy(dm_key->data, ukp->data, ukp->datalen);
+ dm_key->key_size = ukp->datalen;
+- kexec_dprintk("Get dm crypt key (size=%u) %s: %8ph\n", dm_key->key_size,
+- dm_key->key_desc, dm_key->data);
++ kexec_dprintk("Get dm crypt key (size=%u) %s\n", dm_key->key_size,
++ dm_key->key_desc);
+
+ out:
+ up_read(&key->sem);
--- /dev/null
+From ebba09f198078b7a2565004104ef762d1148e7f0 Mon Sep 17 00:00:00 2001
+From: AlanSong-oc <AlanSong-oc@zhaoxin.com>
+Date: Fri, 13 Mar 2026 16:01:49 +0800
+Subject: crypto: padlock-sha - Disable for Zhaoxin processor
+
+From: AlanSong-oc <AlanSong-oc@zhaoxin.com>
+
+commit ebba09f198078b7a2565004104ef762d1148e7f0 upstream.
+
+For Zhaoxin processors, the XSHA1 instruction requires the total memory
+allocated at %rdi register must be 32 bytes, while the XSHA1 and
+XSHA256 instruction doesn't perform any operation when %ecx is zero.
+
+Due to these requirements, the current padlock-sha driver does not work
+correctly with Zhaoxin processors. It cannot pass the self-tests and
+therefore does not activate the driver on Zhaoxin processors. This issue
+has been reported in Debian [1]. The self-tests fail with the
+following messages [2]:
+
+alg: shash: sha1-padlock-nano test failed (wrong result) on test vector 0, cfg="init+update+final aligned buffer"
+alg: self-tests for sha1 using sha1-padlock-nano failed (rc=-22)
+
+alg: shash: sha256-padlock-nano test failed (wrong result) on test vector 0, cfg="init+update+final aligned buffer"
+alg: self-tests for sha256 using sha256-padlock-nano failed (rc=-22)
+
+Disable the padlock-sha driver on Zhaoxin processors with the CPU family
+0x07 and newer. Following the suggestion in [3], support for PHE will be
+added to lib/crypto/ instead.
+
+[1] https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1113996
+[2] https://linux-hardware.org/?probe=271fabb7a4&log=dmesg
+[3] https://lore.kernel.org/linux-crypto/aUI4CGp6kK7mxgEr@gondor.apana.org.au/
+
+Fixes: 63dc06cd12f9 ("crypto: padlock-sha - Use API partial block handling")
+Cc: stable@vger.kernel.org
+Signed-off-by: AlanSong-oc <AlanSong-oc@zhaoxin.com>
+Link: https://lore.kernel.org/r/20260313080150.9393-2-AlanSong-oc@zhaoxin.com
+Signed-off-by: Eric Biggers <ebiggers@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/crypto/padlock-sha.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/drivers/crypto/padlock-sha.c
++++ b/drivers/crypto/padlock-sha.c
+@@ -332,6 +332,13 @@ static int __init padlock_init(void)
+ if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
+ return -ENODEV;
+
++ /*
++ * Skip family 0x07 and newer used by Zhaoxin processors,
++ * as the driver's self-tests fail on these CPUs.
++ */
++ if (c->x86 >= 0x07)
++ return -ENODEV;
++
+ /* Register the newly added algorithm module if on *
+ * VIA Nano processor, or else just do as before */
+ if (c->x86_model < 0x0f) {
--- /dev/null
+From 2b658c1c442ec1cd9eec5ead98d68662c40fe645 Mon Sep 17 00:00:00 2001
+From: Benjamin Tissoires <bentiss@kernel.org>
+Date: Fri, 13 Mar 2026 08:40:25 +0100
+Subject: HID: bpf: prevent buffer overflow in hid_hw_request
+
+From: Benjamin Tissoires <bentiss@kernel.org>
+
+commit 2b658c1c442ec1cd9eec5ead98d68662c40fe645 upstream.
+
+right now the returned value is considered to be always valid. However,
+when playing with HID-BPF, the return value can be arbitrary big,
+because it's the return value of dispatch_hid_bpf_raw_requests(), which
+calls the struct_ops and we have no guarantees that the value makes
+sense.
+
+Fixes: 8bd0488b5ea5 ("HID: bpf: add HID-BPF hooks for hid_hw_raw_requests")
+Cc: stable@vger.kernel.org
+Acked-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/hid/bpf/hid_bpf_dispatch.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/hid/bpf/hid_bpf_dispatch.c
++++ b/drivers/hid/bpf/hid_bpf_dispatch.c
+@@ -447,6 +447,8 @@ hid_bpf_hw_request(struct hid_bpf_ctx *c
+ (u64)(long)ctx,
+ true); /* prevent infinite recursions */
+
++ if (ret > size)
++ ret = size;
+ if (ret > 0)
+ memcpy(buf, dma_data, ret);
+
--- /dev/null
+From a47f0754bdd01f971c9715acdbdd3a07515c8f83 Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Mon, 16 Mar 2026 10:36:01 +0800
+Subject: LoongArch: Give more information if kmem access failed
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit a47f0754bdd01f971c9715acdbdd3a07515c8f83 upstream.
+
+If memory access such as copy_{from, to}_kernel_nofault() failed, its
+users do not know what happened, so it is very useful to print the
+exception code for such cases. Furthermore, it is better to print the
+caller function to know where is the entry.
+
+Here are the low level call chains:
+
+ copy_from_kernel_nofault()
+ copy_from_kernel_nofault_loop()
+ __get_kernel_nofault()
+
+ copy_to_kernel_nofault()
+ copy_to_kernel_nofault_loop()
+ __put_kernel_nofault()
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/include/asm/uaccess.h | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/arch/loongarch/include/asm/uaccess.h
++++ b/arch/loongarch/include/asm/uaccess.h
+@@ -253,8 +253,13 @@ do { \
+ \
+ __get_kernel_common(*((type *)(dst)), sizeof(type), \
+ (__force type *)(src)); \
+- if (unlikely(__gu_err)) \
++ if (unlikely(__gu_err)) { \
++ pr_info("%s: memory access failed, ecode 0x%x\n", \
++ __func__, read_csr_excode()); \
++ pr_info("%s: the caller is %pS\n", \
++ __func__, __builtin_return_address(0)); \
+ goto err_label; \
++ } \
+ } while (0)
+
+ #define __put_kernel_nofault(dst, src, type, err_label) \
+@@ -264,8 +269,13 @@ do { \
+ \
+ __pu_val = *(__force type *)(src); \
+ __put_kernel_common(((type *)(dst)), sizeof(type)); \
+- if (unlikely(__pu_err)) \
++ if (unlikely(__pu_err)) { \
++ pr_info("%s: memory access failed, ecode 0x%x\n", \
++ __func__, read_csr_excode()); \
++ pr_info("%s: the caller is %pS\n", \
++ __func__, __builtin_return_address(0)); \
+ goto err_label; \
++ } \
+ } while (0)
+
+ extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
--- /dev/null
+From d3b8491961207ac967795c34375890407fd51a45 Mon Sep 17 00:00:00 2001
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+Date: Mon, 16 Mar 2026 10:36:01 +0800
+Subject: LoongArch: No need to flush icache if text copy failed
+
+From: Tiezhu Yang <yangtiezhu@loongson.cn>
+
+commit d3b8491961207ac967795c34375890407fd51a45 upstream.
+
+If copy_to_kernel_nofault() failed, no need to flush icache and just
+return immediately.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
+Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/loongarch/kernel/inst.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/loongarch/kernel/inst.c
++++ b/arch/loongarch/kernel/inst.c
+@@ -246,13 +246,15 @@ static int text_copy_cb(void *data)
+
+ if (smp_processor_id() == copy->cpu) {
+ ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len);
+- if (ret)
++ if (ret) {
+ pr_err("%s: operation failed\n", __func__);
++ return ret;
++ }
+ }
+
+ flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len);
+
+- return ret;
++ return 0;
+ }
+
+ int larch_insn_text_copy(void *dst, void *src, size_t len)
--- /dev/null
+From 672e5229e1ecfc2a3509b53adcb914d8b024a853 Mon Sep 17 00:00:00 2001
+From: Felix Fietkau <nbd@nbd.name>
+Date: Thu, 5 Mar 2026 17:08:12 +0000
+Subject: mac80211: fix crash in ieee80211_chan_bw_change for AP_VLAN stations
+
+From: Felix Fietkau <nbd@nbd.name>
+
+commit 672e5229e1ecfc2a3509b53adcb914d8b024a853 upstream.
+
+ieee80211_chan_bw_change() iterates all stations and accesses
+link->reserved.oper via sta->sdata->link[link_id]. For stations on
+AP_VLAN interfaces (e.g. 4addr WDS clients), sta->sdata points to
+the VLAN sdata, whose link never participates in chanctx reservations.
+This leaves link->reserved.oper zero-initialized with chan == NULL,
+causing a NULL pointer dereference in __ieee80211_sta_cap_rx_bw()
+when accessing chandef->chan->band during CSA.
+
+Resolve the VLAN sdata to its parent AP sdata using get_bss_sdata()
+before accessing link data.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Link: https://patch.msgid.link/20260305170812.2904208-1-nbd@nbd.name
+[also change sta->sdata in ARRAY_SIZE even if it doesn't matter]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/chan.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -561,14 +561,16 @@ static void ieee80211_chan_bw_change(str
+ rcu_read_lock();
+ list_for_each_entry_rcu(sta, &local->sta_list,
+ list) {
+- struct ieee80211_sub_if_data *sdata = sta->sdata;
++ struct ieee80211_sub_if_data *sdata;
+ enum ieee80211_sta_rx_bandwidth new_sta_bw;
+ unsigned int link_id;
+
+ if (!ieee80211_sdata_running(sta->sdata))
+ continue;
+
+- for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) {
++ sdata = get_bss_sdata(sta->sdata);
++
++ for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); link_id++) {
+ struct ieee80211_link_data *link =
+ rcu_dereference(sdata->link[link_id]);
+ struct ieee80211_bss_conf *link_conf;
--- /dev/null
+From 939080834fef3ce42fdbcfef33fd29c9ffe5bbed Mon Sep 17 00:00:00 2001
+From: Wei Yang <richard.weiyang@gmail.com>
+Date: Thu, 5 Mar 2026 01:50:06 +0000
+Subject: mm/huge_memory: fix early failure try_to_migrate() when split huge pmd for shared THP
+
+From: Wei Yang <richard.weiyang@gmail.com>
+
+commit 939080834fef3ce42fdbcfef33fd29c9ffe5bbed upstream.
+
+Commit 60fbb14396d5 ("mm/huge_memory: adjust try_to_migrate_one() and
+split_huge_pmd_locked()") return false unconditionally after
+split_huge_pmd_locked(). This may fail try_to_migrate() early when
+TTU_SPLIT_HUGE_PMD is specified.
+
+The reason is the above commit adjusted try_to_migrate_one() to, when a
+PMD-mapped THP entry is found, and TTU_SPLIT_HUGE_PMD is specified (for
+example, via unmap_folio()), return false unconditionally. This breaks
+the rmap walk and fail try_to_migrate() early, if this PMD-mapped THP is
+mapped in multiple processes.
+
+The user sensible impact of this bug could be:
+
+ * On memory pressure, shrink_folio_list() may split partially mapped
+ folio with split_folio_to_list(). Then free unmapped pages without IO.
+ If failed, it may not be reclaimed.
+ * On memory failure, memory_failure() would call try_to_split_thp_page()
+ to split folio contains the bad page. If succeed, the PG_has_hwpoisoned
+ bit is only set in the after-split folio contains @split_at. By doing
+ so, we limit bad memory. If failed to split, the whole folios is not
+ usable.
+
+One way to reproduce:
+
+ Create an anonymous THP range and fork 512 children, so we have a
+ THP shared mapped in 513 processes. Then trigger folio split with
+ /sys/kernel/debug/split_huge_pages debugfs to split the THP folio to
+ order 0.
+
+Without the above commit, we can successfully split to order 0. With the
+above commit, the folio is still a large folio.
+
+And currently there are two core users of TTU_SPLIT_HUGE_PMD:
+
+ * try_to_unmap_one()
+ * try_to_migrate_one()
+
+try_to_unmap_one() would restart the rmap walk, so only
+try_to_migrate_one() is affected.
+
+We can't simply revert commit 60fbb14396d5 ("mm/huge_memory: adjust
+try_to_migrate_one() and split_huge_pmd_locked()"), since it removed some
+duplicated check covered by page_vma_mapped_walk().
+
+This patch fixes this by restart page_vma_mapped_walk() after
+split_huge_pmd_locked(). Since we cannot simply return "true" to fix the
+problem, as that would affect another case:
+
+ When invoking folio_try_share_anon_rmap_pmd() from
+ split_huge_pmd_locked(), the latter can fail and leave a large folio
+ mapped through PTEs, in which case we ought to return true from
+ try_to_migrate_one(). This might result in unnecessary walking of the
+ rmap but is relatively harmless.
+
+Link: https://lkml.kernel.org/r/20260305015006.27343-1-richard.weiyang@gmail.com
+Fixes: 60fbb14396d5 ("mm/huge_memory: adjust try_to_migrate_one() and split_huge_pmd_locked()")
+Signed-off-by: Wei Yang <richard.weiyang@gmail.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Tested-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: Lance Yang <lance.yang@linux.dev>
+Reviewed-by: Gavin Guo <gavinguo@igalia.com>
+Acked-by: David Hildenbrand (arm) <david@kernel.org>
+Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/rmap.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -2338,11 +2338,17 @@ static bool try_to_migrate_one(struct fo
+ __maybe_unused pmd_t pmdval;
+
+ if (flags & TTU_SPLIT_HUGE_PMD) {
++ /*
++ * split_huge_pmd_locked() might leave the
++ * folio mapped through PTEs. Retry the walk
++ * so we can detect this scenario and properly
++ * abort the walk.
++ */
+ split_huge_pmd_locked(vma, pvmw.address,
+ pvmw.pmd, true);
+- ret = false;
+- page_vma_mapped_walk_done(&pvmw);
+- break;
++ flags &= ~TTU_SPLIT_HUGE_PMD;
++ page_vma_mapped_walk_restart(&pvmw);
++ continue;
+ }
+ #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+ pmdval = pmdp_get(pvmw.pmd);
--- /dev/null
+From fae654083bfa409bb2244f390232e2be47f05bfc Mon Sep 17 00:00:00 2001
+From: Chris Down <chris@chrisdown.name>
+Date: Tue, 3 Mar 2026 07:21:21 +0000
+Subject: mm/huge_memory: fix use of NULL folio in move_pages_huge_pmd()
+
+From: Chris Down <chris@chrisdown.name>
+
+commit fae654083bfa409bb2244f390232e2be47f05bfc upstream.
+
+move_pages_huge_pmd() handles UFFDIO_MOVE for both normal THPs and huge
+zero pages. For the huge zero page path, src_folio is explicitly set to
+NULL, and is used as a sentinel to skip folio operations like lock and
+rmap.
+
+In the huge zero page branch, src_folio is NULL, so folio_mk_pmd(NULL,
+pgprot) passes NULL through folio_pfn() and page_to_pfn(). With
+SPARSEMEM_VMEMMAP this silently produces a bogus PFN, installing a PMD
+pointing to non-existent physical memory. On other memory models it is a
+NULL dereference.
+
+Use page_folio(src_page) to obtain the valid huge zero folio from the
+page, which was obtained from pmd_page() and remains valid throughout.
+
+After commit d82d09e48219 ("mm/huge_memory: mark PMD mappings of the huge
+zero folio special"), moved huge zero PMDs must remain special so
+vm_normal_page_pmd() continues to treat them as special mappings.
+
+move_pages_huge_pmd() currently reconstructs the destination PMD in the
+huge zero page branch, which drops PMD state such as pmd_special() on
+architectures with CONFIG_ARCH_HAS_PTE_SPECIAL. As a result,
+vm_normal_page_pmd() can treat the moved huge zero PMD as a normal page
+and corrupt its refcount.
+
+Instead of reconstructing the PMD from the folio, derive the destination
+entry from src_pmdval after pmdp_huge_clear_flush(), then handle the PMD
+metadata the same way move_huge_pmd() does for moved entries by marking it
+soft-dirty and clearing uffd-wp.
+
+Link: https://lkml.kernel.org/r/a1e787dd-b911-474d-8570-f37685357d86@lucifer.local
+Fixes: e3981db444a0 ("mm: add folio_mk_pmd()")
+Signed-off-by: Chris Down <chris@chrisdown.name>
+Signed-off-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Tested-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/huge_memory.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -2797,7 +2797,8 @@ int move_pages_huge_pmd(struct mm_struct
+ _dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
+ } else {
+ src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
+- _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
++ _dst_pmd = move_soft_dirty_pmd(src_pmdval);
++ _dst_pmd = clear_uffd_wp_pmd(_dst_pmd);
+ }
+ set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
+
--- /dev/null
+From 29f40594a28114b9a9bc87f6cf7bbee9609628f2 Mon Sep 17 00:00:00 2001
+From: Dev Jain <dev.jain@arm.com>
+Date: Tue, 3 Mar 2026 11:45:28 +0530
+Subject: mm/rmap: fix incorrect pte restoration for lazyfree folios
+
+From: Dev Jain <dev.jain@arm.com>
+
+commit 29f40594a28114b9a9bc87f6cf7bbee9609628f2 upstream.
+
+We batch unmap anonymous lazyfree folios by folio_unmap_pte_batch. If the
+batch has a mix of writable and non-writable bits, we may end up setting
+the entire batch writable. Fix this by respecting writable bit during
+batching.
+
+Although on a successful unmap of a lazyfree folio, the soft-dirty bit is
+lost, preserve it on pte restoration by respecting the bit during
+batching, to make the fix consistent w.r.t both writable bit and
+soft-dirty bit.
+
+I was able to write the below reproducer and crash the kernel.
+Explanation of reproducer (set 64K mTHP to always):
+
+Fault in a 64K large folio. Split the VMA at mid-point with
+MADV_DONTFORK. fork() - parent points to the folio with 8 writable ptes
+and 8 non-writable ptes. Merge the VMAs with MADV_DOFORK so that
+folio_unmap_pte_batch() can determine all the 16 ptes as a batch. Do
+MADV_FREE on the range to mark the folio as lazyfree. Write to the memory
+to dirty the pte, eventually rmap will dirty the folio. Then trigger
+reclaim, we will hit the pte restoration path, and the kernel will crash
+with the trace given below.
+
+The BUG happens at:
+
+ BUG_ON(atomic_inc_return(&ptc->anon_map_count) > 1 && rw);
+
+The code path is asking for anonymous page to be mapped writable into the
+pagetable. The BUG_ON() firing implies that such a writable page has been
+mapped into the pagetables of more than one process, which breaks
+anonymous memory/CoW semantics.
+
+[ 21.134473] kernel BUG at mm/page_table_check.c:118!
+[ 21.134497] Internal error: Oops - BUG: 00000000f2000800 [#1] SMP
+[ 21.135917] Modules linked in:
+[ 21.136085] CPU: 1 UID: 0 PID: 1735 Comm: dup-lazyfree Not tainted 7.0.0-rc1-00116-g018018a17770 #1028 PREEMPT
+[ 21.136858] Hardware name: linux,dummy-virt (DT)
+[ 21.137019] pstate: 21400005 (nzCv daif +PAN -UAO -TCO +DIT -SSBS BTYPE=--)
+[ 21.137308] pc : page_table_check_set+0x28c/0x2a8
+[ 21.137607] lr : page_table_check_set+0x134/0x2a8
+[ 21.137885] sp : ffff80008a3b3340
+[ 21.138124] x29: ffff80008a3b3340 x28: fffffdffc3d14400 x27: ffffd1a55e03d000
+[ 21.138623] x26: 0040000000000040 x25: ffffd1a55f7dd000 x24: 0000000000000001
+[ 21.139045] x23: 0000000000000001 x22: 0000000000000001 x21: ffffd1a55f217f30
+[ 21.139629] x20: 0000000000134521 x19: 0000000000134519 x18: 005c43e000040000
+[ 21.140027] x17: 0001400000000000 x16: 0001700000000000 x15: 000000000000ffff
+[ 21.140578] x14: 000000000000000c x13: 005c006000000000 x12: 0000000000000020
+[ 21.140828] x11: 0000000000000000 x10: 005c000000000000 x9 : ffffd1a55c079ee0
+[ 21.141077] x8 : 0000000000000001 x7 : 005c03e000040000 x6 : 000000004000ffff
+[ 21.141490] x5 : ffff00017fffce00 x4 : 0000000000000001 x3 : 0000000000000002
+[ 21.141741] x2 : 0000000000134510 x1 : 0000000000000000 x0 : ffff0000c08228c0
+[ 21.141991] Call trace:
+[ 21.142093] page_table_check_set+0x28c/0x2a8 (P)
+[ 21.142265] __page_table_check_ptes_set+0x144/0x1e8
+[ 21.142441] __set_ptes_anysz.constprop.0+0x160/0x1a8
+[ 21.142766] contpte_set_ptes+0xe8/0x140
+[ 21.142907] try_to_unmap_one+0x10c4/0x10d0
+[ 21.143177] rmap_walk_anon+0x100/0x250
+[ 21.143315] try_to_unmap+0xa0/0xc8
+[ 21.143441] shrink_folio_list+0x59c/0x18a8
+[ 21.143759] shrink_lruvec+0x664/0xbf0
+[ 21.144043] shrink_node+0x218/0x878
+[ 21.144285] __node_reclaim.constprop.0+0x98/0x338
+[ 21.144763] user_proactive_reclaim+0x2a4/0x340
+[ 21.145056] reclaim_store+0x3c/0x60
+[ 21.145216] dev_attr_store+0x20/0x40
+[ 21.145585] sysfs_kf_write+0x84/0xa8
+[ 21.145835] kernfs_fop_write_iter+0x130/0x1c8
+[ 21.145994] vfs_write+0x2b8/0x368
+[ 21.146119] ksys_write+0x70/0x110
+[ 21.146240] __arm64_sys_write+0x24/0x38
+[ 21.146380] invoke_syscall+0x50/0x120
+[ 21.146513] el0_svc_common.constprop.0+0x48/0xf8
+[ 21.146679] do_el0_svc+0x28/0x40
+[ 21.146798] el0_svc+0x34/0x110
+[ 21.146926] el0t_64_sync_handler+0xa0/0xe8
+[ 21.147074] el0t_64_sync+0x198/0x1a0
+[ 21.147225] Code: f9400441 b4fff241 17ffff94 d4210000 (d4210000)
+[ 21.147440] ---[ end trace 0000000000000000 ]---
+
+#define _GNU_SOURCE
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <string.h>
+#include <sys/wait.h>
+#include <sched.h>
+#include <fcntl.h>
+
+void write_to_reclaim() {
+ const char *path = "/sys/devices/system/node/node0/reclaim";
+ const char *value = "409600000000";
+ int fd = open(path, O_WRONLY);
+ if (fd == -1) {
+ perror("open");
+ exit(EXIT_FAILURE);
+ }
+
+ if (write(fd, value, sizeof("409600000000") - 1) == -1) {
+ perror("write");
+ close(fd);
+ exit(EXIT_FAILURE);
+ }
+
+ printf("Successfully wrote %s to %s\n", value, path);
+ close(fd);
+}
+
+int main()
+{
+ char *ptr = mmap((void *)(1UL << 30), 1UL << 16, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ if ((unsigned long)ptr != (1UL << 30)) {
+ perror("mmap");
+ return 1;
+ }
+
+ /* a 64K folio gets faulted in */
+ memset(ptr, 0, 1UL << 16);
+
+ /* 32K half will not be shared into child */
+ if (madvise(ptr, 1UL << 15, MADV_DONTFORK)) {
+ perror("madvise madv dontfork");
+ return 1;
+ }
+
+ pid_t pid = fork();
+
+ if (pid < 0) {
+ perror("fork");
+ return 1;
+ } else if (pid == 0) {
+ sleep(15);
+ } else {
+ /* merge VMAs. now first half of the 16 ptes are writable, the other half not. */
+ if (madvise(ptr, 1UL << 15, MADV_DOFORK)) {
+ perror("madvise madv fork");
+ return 1;
+ }
+ if (madvise(ptr, (1UL << 16), MADV_FREE)) {
+ perror("madvise madv free");
+ return 1;
+ }
+
+ /* dirty the large folio */
+ (*ptr) += 10;
+
+ write_to_reclaim();
+ // sleep(10);
+ waitpid(pid, NULL, 0);
+
+ }
+}
+
+Link: https://lkml.kernel.org/r/20260303061528.2429162-1-dev.jain@arm.com
+Fixes: 354dffd29575 ("mm: support batched unmap for lazyfree large folios during reclamation")
+Signed-off-by: Dev Jain <dev.jain@arm.com>
+Acked-by: David Hildenbrand (Arm) <david@kernel.org>
+Reviewed-by: Lorenzo Stoakes <lorenzo.stoakes@oracle.com>
+Reviewed-by: Barry Song <baohua@kernel.org>
+Reviewed-by: Wei Yang <richard.weiyang@gmail.com>
+Tested-by: Lance Yang <lance.yang@linux.dev>
+Cc: Anshuman Khandual <anshuman.khandual@arm.com>
+Cc: Harry Yoo <harry.yoo@oracle.com>
+Cc: Jann Horn <jannh@google.com>
+Cc: Liam Howlett <liam.howlett@oracle.com>
+Cc: Rik van Riel <riel@surriel.com>
+Cc: Ryan Roberts <ryan.roberts@arm.com>
+Cc: Vlastimil Babka <vbabka@suse.cz>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/rmap.c | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+--- a/mm/rmap.c
++++ b/mm/rmap.c
+@@ -1843,7 +1843,14 @@ static inline unsigned int folio_unmap_p
+ if (pte_unused(pte))
+ return 1;
+
+- return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
++ /*
++ * If unmap fails, we need to restore the ptes. To avoid accidentally
++ * upgrading write permissions for ptes that were not originally
++ * writable, and to avoid losing the soft-dirty bit, use the
++ * appropriate FPB flags.
++ */
++ return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr,
++ FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY);
+ }
+
+ /*
--- /dev/null
+From 8da13e6d63c1a97f7302d342c89c4a56a55c7015 Mon Sep 17 00:00:00 2001
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+Date: Mon, 16 Mar 2026 13:38:24 +0300
+Subject: net: macb: fix use-after-free access to PTP clock
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+commit 8da13e6d63c1a97f7302d342c89c4a56a55c7015 upstream.
+
+PTP clock is registered on every opening of the interface and destroyed on
+every closing. However it may be accessed via get_ts_info ethtool call
+which is possible while the interface is just present in the kernel.
+
+BUG: KASAN: use-after-free in ptp_clock_index+0x47/0x50 drivers/ptp/ptp_clock.c:426
+Read of size 4 at addr ffff8880194345cc by task syz.0.6/948
+
+CPU: 1 PID: 948 Comm: syz.0.6 Not tainted 6.1.164+ #109
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.1-0-g3208b098f51a-prebuilt.qemu.org 04/01/2014
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0x8d/0xba lib/dump_stack.c:106
+ print_address_description mm/kasan/report.c:316 [inline]
+ print_report+0x17f/0x496 mm/kasan/report.c:420
+ kasan_report+0xd9/0x180 mm/kasan/report.c:524
+ ptp_clock_index+0x47/0x50 drivers/ptp/ptp_clock.c:426
+ gem_get_ts_info+0x138/0x1e0 drivers/net/ethernet/cadence/macb_main.c:3349
+ macb_get_ts_info+0x68/0xb0 drivers/net/ethernet/cadence/macb_main.c:3371
+ __ethtool_get_ts_info+0x17c/0x260 net/ethtool/common.c:558
+ ethtool_get_ts_info net/ethtool/ioctl.c:2367 [inline]
+ __dev_ethtool net/ethtool/ioctl.c:3017 [inline]
+ dev_ethtool+0x2b05/0x6290 net/ethtool/ioctl.c:3095
+ dev_ioctl+0x637/0x1070 net/core/dev_ioctl.c:510
+ sock_do_ioctl+0x20d/0x2c0 net/socket.c:1215
+ sock_ioctl+0x577/0x6d0 net/socket.c:1320
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:870 [inline]
+ __se_sys_ioctl fs/ioctl.c:856 [inline]
+ __x64_sys_ioctl+0x18c/0x210 fs/ioctl.c:856
+ do_syscall_x64 arch/x86/entry/common.c:46 [inline]
+ do_syscall_64+0x35/0x80 arch/x86/entry/common.c:76
+ entry_SYSCALL_64_after_hwframe+0x6e/0xd8
+ </TASK>
+
+Allocated by task 457:
+ kmalloc include/linux/slab.h:563 [inline]
+ kzalloc include/linux/slab.h:699 [inline]
+ ptp_clock_register+0x144/0x10e0 drivers/ptp/ptp_clock.c:235
+ gem_ptp_init+0x46f/0x930 drivers/net/ethernet/cadence/macb_ptp.c:375
+ macb_open+0x901/0xd10 drivers/net/ethernet/cadence/macb_main.c:2920
+ __dev_open+0x2ce/0x500 net/core/dev.c:1501
+ __dev_change_flags+0x56a/0x740 net/core/dev.c:8651
+ dev_change_flags+0x92/0x170 net/core/dev.c:8722
+ do_setlink+0xaf8/0x3a80 net/core/rtnetlink.c:2833
+ __rtnl_newlink+0xbf4/0x1940 net/core/rtnetlink.c:3608
+ rtnl_newlink+0x63/0xa0 net/core/rtnetlink.c:3655
+ rtnetlink_rcv_msg+0x3c6/0xed0 net/core/rtnetlink.c:6150
+ netlink_rcv_skb+0x15d/0x430 net/netlink/af_netlink.c:2511
+ netlink_unicast_kernel net/netlink/af_netlink.c:1318 [inline]
+ netlink_unicast+0x6d7/0xa30 net/netlink/af_netlink.c:1344
+ netlink_sendmsg+0x97e/0xeb0 net/netlink/af_netlink.c:1872
+ sock_sendmsg_nosec net/socket.c:718 [inline]
+ __sock_sendmsg+0x14b/0x180 net/socket.c:730
+ __sys_sendto+0x320/0x3b0 net/socket.c:2152
+ __do_sys_sendto net/socket.c:2164 [inline]
+ __se_sys_sendto net/socket.c:2160 [inline]
+ __x64_sys_sendto+0xdc/0x1b0 net/socket.c:2160
+ do_syscall_x64 arch/x86/entry/common.c:46 [inline]
+ do_syscall_64+0x35/0x80 arch/x86/entry/common.c:76
+ entry_SYSCALL_64_after_hwframe+0x6e/0xd8
+
+Freed by task 938:
+ kasan_slab_free include/linux/kasan.h:177 [inline]
+ slab_free_hook mm/slub.c:1729 [inline]
+ slab_free_freelist_hook mm/slub.c:1755 [inline]
+ slab_free mm/slub.c:3687 [inline]
+ __kmem_cache_free+0xbc/0x320 mm/slub.c:3700
+ device_release+0xa0/0x240 drivers/base/core.c:2507
+ kobject_cleanup lib/kobject.c:681 [inline]
+ kobject_release lib/kobject.c:712 [inline]
+ kref_put include/linux/kref.h:65 [inline]
+ kobject_put+0x1cd/0x350 lib/kobject.c:729
+ put_device+0x1b/0x30 drivers/base/core.c:3805
+ ptp_clock_unregister+0x171/0x270 drivers/ptp/ptp_clock.c:391
+ gem_ptp_remove+0x4e/0x1f0 drivers/net/ethernet/cadence/macb_ptp.c:404
+ macb_close+0x1c8/0x270 drivers/net/ethernet/cadence/macb_main.c:2966
+ __dev_close_many+0x1b9/0x310 net/core/dev.c:1585
+ __dev_close net/core/dev.c:1597 [inline]
+ __dev_change_flags+0x2bb/0x740 net/core/dev.c:8649
+ dev_change_flags+0x92/0x170 net/core/dev.c:8722
+ dev_ifsioc+0x151/0xe00 net/core/dev_ioctl.c:326
+ dev_ioctl+0x33e/0x1070 net/core/dev_ioctl.c:572
+ sock_do_ioctl+0x20d/0x2c0 net/socket.c:1215
+ sock_ioctl+0x577/0x6d0 net/socket.c:1320
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:870 [inline]
+ __se_sys_ioctl fs/ioctl.c:856 [inline]
+ __x64_sys_ioctl+0x18c/0x210 fs/ioctl.c:856
+ do_syscall_x64 arch/x86/entry/common.c:46 [inline]
+ do_syscall_64+0x35/0x80 arch/x86/entry/common.c:76
+ entry_SYSCALL_64_after_hwframe+0x6e/0xd8
+
+Set the PTP clock pointer to NULL after unregistering.
+
+Fixes: c2594d804d5c ("macb: Common code to enable ptp support for MACB/GEM")
+Cc: stable@vger.kernel.org
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Link: https://patch.msgid.link/20260316103826.74506-1-pchelkin@ispras.ru
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/ethernet/cadence/macb_ptp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/ethernet/cadence/macb_ptp.c
++++ b/drivers/net/ethernet/cadence/macb_ptp.c
+@@ -357,8 +357,10 @@ void gem_ptp_remove(struct net_device *n
+ {
+ struct macb *bp = netdev_priv(ndev);
+
+- if (bp->ptp_clock)
++ if (bp->ptp_clock) {
+ ptp_clock_unregister(bp->ptp_clock);
++ bp->ptp_clock = NULL;
++ }
+
+ gem_ptp_clear_timer(bp);
+
--- /dev/null
+From 55dc632ab2ac2889b15995a9eef56c753d48ebc7 Mon Sep 17 00:00:00 2001
+From: Ian Ray <ian.ray@gehealthcare.com>
+Date: Tue, 17 Mar 2026 10:53:36 +0200
+Subject: NFC: nxp-nci: allow GPIOs to sleep
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+commit 55dc632ab2ac2889b15995a9eef56c753d48ebc7 upstream.
+
+Allow the firmware and enable GPIOs to sleep.
+
+This fixes a `WARN_ON' and allows the driver to operate GPIOs which are
+connected to I2C GPIO expanders.
+
+-- >8 --
+kernel: WARNING: CPU: 3 PID: 2636 at drivers/gpio/gpiolib.c:3880 gpiod_set_value+0x88/0x98
+-- >8 --
+
+Fixes: 43201767b44c ("NFC: nxp-nci: Convert to use GPIO descriptor")
+Cc: stable@vger.kernel.org
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260317085337.146545-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nfc/nxp-nci/i2c.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/nfc/nxp-nci/i2c.c
++++ b/drivers/nfc/nxp-nci/i2c.c
+@@ -47,8 +47,8 @@ static int nxp_nci_i2c_set_mode(void *ph
+ {
+ struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id;
+
+- gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
+- gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
++ gpiod_set_value_cansleep(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0);
++ gpiod_set_value_cansleep(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0);
+ usleep_range(10000, 15000);
+
+ if (mode == NXP_NCI_MODE_COLD)
--- /dev/null
+From 48db892356d6cb80f6942885545de4a6dd8d2a29 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Thu, 19 Feb 2026 16:50:16 -0500
+Subject: NFSD: Defer sub-object cleanup in export put callbacks
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 48db892356d6cb80f6942885545de4a6dd8d2a29 upstream.
+
+svc_export_put() calls path_put() and auth_domain_put() immediately
+when the last reference drops, before the RCU grace period. RCU
+readers in e_show() and c_show() access both ex_path (via
+seq_path/d_path) and ex_client->name (via seq_escape) without
+holding a reference. If cache_clean removes the entry and drops the
+last reference concurrently, the sub-objects are freed while still
+in use, producing a NULL pointer dereference in d_path.
+
+Commit 2530766492ec ("nfsd: fix UAF when access ex_uuid or
+ex_stats") moved kfree of ex_uuid and ex_stats into the
+call_rcu callback, but left path_put() and auth_domain_put() running
+before the grace period because both may sleep and call_rcu
+callbacks execute in softirq context.
+
+Replace call_rcu/kfree_rcu with queue_rcu_work(), which defers the
+callback until after the RCU grace period and executes it in process
+context where sleeping is permitted. This allows path_put() and
+auth_domain_put() to be moved into the deferred callback alongside
+the other resource releases. Apply the same fix to expkey_put(),
+which has the identical pattern with ek_path and ek_client.
+
+A dedicated workqueue scopes the shutdown drain to only NFSD
+export release work items; flushing the shared
+system_unbound_wq would stall on unrelated work from other
+subsystems. nfsd_export_shutdown() uses rcu_barrier() followed
+by flush_workqueue() to ensure all deferred release callbacks
+complete before the export caches are destroyed.
+
+Reported-by: Misbah Anjum N <misanjum@linux.ibm.com>
+Closes: https://lore.kernel.org/linux-nfs/dcd371d3a95815a84ba7de52cef447b8@linux.ibm.com/
+Fixes: c224edca7af0 ("nfsd: no need get cache ref when protected by rcu")
+Fixes: 1b10f0b603c0 ("SUNRPC: no need get cache ref when protected by rcu")
+Cc: stable@vger.kernel.org
+Reviwed-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: NeilBrown <neil@brown.name>
+Tested-by: Olga Kornievskaia <okorniev@redhat.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/export.c | 63 +++++++++++++++++++++++++++++++++++++++++++++++--------
+ fs/nfsd/export.h | 7 ++++--
+ fs/nfsd/nfsctl.c | 8 ++++++
+ 3 files changed, 66 insertions(+), 12 deletions(-)
+
+--- a/fs/nfsd/export.c
++++ b/fs/nfsd/export.c
+@@ -36,19 +36,30 @@
+ * second map contains a reference to the entry in the first map.
+ */
+
++static struct workqueue_struct *nfsd_export_wq;
++
+ #define EXPKEY_HASHBITS 8
+ #define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
+ #define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
+
+-static void expkey_put(struct kref *ref)
++static void expkey_release(struct work_struct *work)
+ {
+- struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
++ struct svc_expkey *key = container_of(to_rcu_work(work),
++ struct svc_expkey, ek_rwork);
+
+ if (test_bit(CACHE_VALID, &key->h.flags) &&
+ !test_bit(CACHE_NEGATIVE, &key->h.flags))
+ path_put(&key->ek_path);
+ auth_domain_put(key->ek_client);
+- kfree_rcu(key, ek_rcu);
++ kfree(key);
++}
++
++static void expkey_put(struct kref *ref)
++{
++ struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
++
++ INIT_RCU_WORK(&key->ek_rwork, expkey_release);
++ queue_rcu_work(nfsd_export_wq, &key->ek_rwork);
+ }
+
+ static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
+@@ -353,11 +364,13 @@ static void export_stats_destroy(struct
+ EXP_STATS_COUNTERS_NUM);
+ }
+
+-static void svc_export_release(struct rcu_head *rcu_head)
++static void svc_export_release(struct work_struct *work)
+ {
+- struct svc_export *exp = container_of(rcu_head, struct svc_export,
+- ex_rcu);
++ struct svc_export *exp = container_of(to_rcu_work(work),
++ struct svc_export, ex_rwork);
+
++ path_put(&exp->ex_path);
++ auth_domain_put(exp->ex_client);
+ nfsd4_fslocs_free(&exp->ex_fslocs);
+ export_stats_destroy(exp->ex_stats);
+ kfree(exp->ex_stats);
+@@ -369,9 +382,8 @@ static void svc_export_put(struct kref *
+ {
+ struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
+
+- path_put(&exp->ex_path);
+- auth_domain_put(exp->ex_client);
+- call_rcu(&exp->ex_rcu, svc_export_release);
++ INIT_RCU_WORK(&exp->ex_rwork, svc_export_release);
++ queue_rcu_work(nfsd_export_wq, &exp->ex_rwork);
+ }
+
+ static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
+@@ -1480,6 +1492,36 @@ const struct seq_operations nfs_exports_
+ .show = e_show,
+ };
+
++/**
++ * nfsd_export_wq_init - allocate the export release workqueue
++ *
++ * Called once at module load. The workqueue runs deferred svc_export and
++ * svc_expkey release work scheduled by queue_rcu_work() in the cache put
++ * callbacks.
++ *
++ * Return values:
++ * %0: workqueue allocated
++ * %-ENOMEM: allocation failed
++ */
++int nfsd_export_wq_init(void)
++{
++ nfsd_export_wq = alloc_workqueue("nfsd_export", WQ_UNBOUND, 0);
++ if (!nfsd_export_wq)
++ return -ENOMEM;
++ return 0;
++}
++
++/**
++ * nfsd_export_wq_shutdown - drain and free the export release workqueue
++ *
++ * Called once at module unload. Per-namespace teardown in
++ * nfsd_export_shutdown() has already drained all deferred work.
++ */
++void nfsd_export_wq_shutdown(void)
++{
++ destroy_workqueue(nfsd_export_wq);
++}
++
+ /*
+ * Initialize the exports module.
+ */
+@@ -1541,6 +1583,9 @@ nfsd_export_shutdown(struct net *net)
+
+ cache_unregister_net(nn->svc_expkey_cache, net);
+ cache_unregister_net(nn->svc_export_cache, net);
++ /* Drain deferred export and expkey release work. */
++ rcu_barrier();
++ flush_workqueue(nfsd_export_wq);
+ cache_destroy_net(nn->svc_expkey_cache, net);
+ cache_destroy_net(nn->svc_export_cache, net);
+ svcauth_unix_purge(net);
+--- a/fs/nfsd/export.h
++++ b/fs/nfsd/export.h
+@@ -7,6 +7,7 @@
+
+ #include <linux/sunrpc/cache.h>
+ #include <linux/percpu_counter.h>
++#include <linux/workqueue.h>
+ #include <uapi/linux/nfsd/export.h>
+ #include <linux/nfs4.h>
+
+@@ -75,7 +76,7 @@ struct svc_export {
+ u32 ex_layout_types;
+ struct nfsd4_deviceid_map *ex_devid_map;
+ struct cache_detail *cd;
+- struct rcu_head ex_rcu;
++ struct rcu_work ex_rwork;
+ unsigned long ex_xprtsec_modes;
+ struct export_stats *ex_stats;
+ };
+@@ -92,7 +93,7 @@ struct svc_expkey {
+ u32 ek_fsid[6];
+
+ struct path ek_path;
+- struct rcu_head ek_rcu;
++ struct rcu_work ek_rwork;
+ };
+
+ #define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
+@@ -110,6 +111,8 @@ __be32 check_nfsd_access(struct svc_expo
+ /*
+ * Function declarations
+ */
++int nfsd_export_wq_init(void);
++void nfsd_export_wq_shutdown(void);
+ int nfsd_export_init(struct net *);
+ void nfsd_export_shutdown(struct net *);
+ void nfsd_export_flush(struct net *);
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -2252,9 +2252,12 @@ static int __init init_nfsd(void)
+ if (retval)
+ goto out_free_pnfs;
+ nfsd_lockd_init(); /* lockd->nfsd callbacks */
++ retval = nfsd_export_wq_init();
++ if (retval)
++ goto out_free_lockd;
+ retval = register_pernet_subsys(&nfsd_net_ops);
+ if (retval < 0)
+- goto out_free_lockd;
++ goto out_free_export_wq;
+ retval = register_cld_notifier();
+ if (retval)
+ goto out_free_subsys;
+@@ -2283,6 +2286,8 @@ out_free_cld:
+ unregister_cld_notifier();
+ out_free_subsys:
+ unregister_pernet_subsys(&nfsd_net_ops);
++out_free_export_wq:
++ nfsd_export_wq_shutdown();
+ out_free_lockd:
+ nfsd_lockd_shutdown();
+ nfsd_drc_slab_free();
+@@ -2303,6 +2308,7 @@ static void __exit exit_nfsd(void)
+ nfsd4_destroy_laundry_wq();
+ unregister_cld_notifier();
+ unregister_pernet_subsys(&nfsd_net_ops);
++ nfsd_export_wq_shutdown();
+ nfsd_drc_slab_free();
+ nfsd_lockd_shutdown();
+ nfsd4_free_slabs();
--- /dev/null
+From 5133b61aaf437e5f25b1b396b14242a6bb0508e2 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Tue, 24 Feb 2026 11:33:35 -0500
+Subject: nfsd: fix heap overflow in NFSv4.0 LOCK replay cache
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 5133b61aaf437e5f25b1b396b14242a6bb0508e2 upstream.
+
+The NFSv4.0 replay cache uses a fixed 112-byte inline buffer
+(rp_ibuf[NFSD4_REPLAY_ISIZE]) to store encoded operation responses.
+This size was calculated based on OPEN responses and does not account
+for LOCK denied responses, which include the conflicting lock owner as
+a variable-length field up to 1024 bytes (NFS4_OPAQUE_LIMIT).
+
+When a LOCK operation is denied due to a conflict with an existing lock
+that has a large owner, nfsd4_encode_operation() copies the full encoded
+response into the undersized replay buffer via read_bytes_from_xdr_buf()
+with no bounds check. This results in a slab-out-of-bounds write of up
+to 944 bytes past the end of the buffer, corrupting adjacent heap memory.
+
+This can be triggered remotely by an unauthenticated attacker with two
+cooperating NFSv4.0 clients: one sets a lock with a large owner string,
+then the other requests a conflicting lock to provoke the denial.
+
+We could fix this by increasing NFSD4_REPLAY_ISIZE to allow for a full
+opaque, but that would increase the size of every stateowner, when most
+lockowners are not that large.
+
+Instead, fix this by checking the encoded response length against
+NFSD4_REPLAY_ISIZE before copying into the replay buffer. If the
+response is too large, set rp_buflen to 0 to skip caching the replay
+payload. The status is still cached, and the client already received the
+correct response on the original request.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@kernel.org
+Reported-by: Nicholas Carlini <npc@anthropic.com>
+Tested-by: Nicholas Carlini <npc@anthropic.com>
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfs4xdr.c | 9 +++++++--
+ fs/nfsd/state.h | 17 ++++++++++++-----
+ 2 files changed, 19 insertions(+), 7 deletions(-)
+
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -5946,9 +5946,14 @@ nfsd4_encode_operation(struct nfsd4_comp
+ int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
+
+ so->so_replay.rp_status = op->status;
+- so->so_replay.rp_buflen = len;
+- read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
++ if (len <= NFSD4_REPLAY_ISIZE) {
++ so->so_replay.rp_buflen = len;
++ read_bytes_from_xdr_buf(xdr->buf,
++ op_status_offset + XDR_UNIT,
+ so->so_replay.rp_buf, len);
++ } else {
++ so->so_replay.rp_buflen = 0;
++ }
+ }
+ status:
+ op->status = nfsd4_map_status(op->status,
+--- a/fs/nfsd/state.h
++++ b/fs/nfsd/state.h
+@@ -541,11 +541,18 @@ struct nfs4_client_reclaim {
+ struct xdr_netobj cr_princhash;
+ };
+
+-/* A reasonable value for REPLAY_ISIZE was estimated as follows:
+- * The OPEN response, typically the largest, requires
+- * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
+- * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
+- * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
++/*
++ * REPLAY_ISIZE is sized for an OPEN response with delegation:
++ * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) +
++ * 8(verifier) + 4(deleg. type) + 8(deleg. stateid) +
++ * 4(deleg. recall flag) + 20(deleg. space limit) +
++ * ~32(deleg. ace) = 112 bytes
++ *
++ * Some responses can exceed this. A LOCK denial includes the conflicting
++ * lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses
++ * larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is
++ * saved. Enlarging this constant increases the size of every
++ * nfs4_stateowner.
+ */
+
+ #define NFSD4_REPLAY_ISIZE 112
--- /dev/null
+From e7fcf179b82d3a3730fd8615da01b087cc654d0b Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Thu, 19 Feb 2026 16:50:17 -0500
+Subject: NFSD: Hold net reference for the lifetime of /proc/fs/nfs/exports fd
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit e7fcf179b82d3a3730fd8615da01b087cc654d0b upstream.
+
+The /proc/fs/nfs/exports proc entry is created at module init
+and persists for the module's lifetime. exports_proc_open()
+captures the caller's current network namespace and stores
+its svc_export_cache in seq->private, but takes no reference
+on the namespace. If the namespace is subsequently torn down
+(e.g. container destruction after the opener does setns() to a
+different namespace), nfsd_net_exit() calls nfsd_export_shutdown()
+which frees the cache. Subsequent reads on the still-open fd
+dereference the freed cache_detail, walking a freed hash table.
+
+Hold a reference on the struct net for the lifetime of the open
+file descriptor. This prevents nfsd_net_exit() from running --
+and thus prevents nfsd_export_shutdown() from freeing the cache
+-- while any exports fd is open. cache_detail already stores
+its net pointer (cd->net, set by cache_create_net()), so
+exports_release() can retrieve it without additional per-file
+storage.
+
+Reported-by: Misbah Anjum N <misanjum@linux.ibm.com>
+Closes: https://lore.kernel.org/linux-nfs/dcd371d3a95815a84ba7de52cef447b8@linux.ibm.com/
+Fixes: 96d851c4d28d ("nfsd: use proper net while reading "exports" file")
+Cc: stable@vger.kernel.org
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Reviewed-by: NeilBrown <neil@brown.name>
+Tested-by: Olga Kornievskaia <okorniev@redhat.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nfsd/nfsctl.c | 14 ++++++++++++--
+ 1 file changed, 12 insertions(+), 2 deletions(-)
+
+--- a/fs/nfsd/nfsctl.c
++++ b/fs/nfsd/nfsctl.c
+@@ -149,9 +149,19 @@ static int exports_net_open(struct net *
+
+ seq = file->private_data;
+ seq->private = nn->svc_export_cache;
++ get_net(net);
+ return 0;
+ }
+
++static int exports_release(struct inode *inode, struct file *file)
++{
++ struct seq_file *seq = file->private_data;
++ struct cache_detail *cd = seq->private;
++
++ put_net(cd->net);
++ return seq_release(inode, file);
++}
++
+ static int exports_nfsd_open(struct inode *inode, struct file *file)
+ {
+ return exports_net_open(inode->i_sb->s_fs_info, file);
+@@ -161,7 +171,7 @@ static const struct file_operations expo
+ .open = exports_nfsd_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = seq_release,
++ .release = exports_release,
+ };
+
+ static int export_features_show(struct seq_file *m, void *v)
+@@ -1375,7 +1385,7 @@ static const struct proc_ops exports_pro
+ .proc_open = exports_proc_open,
+ .proc_read = seq_read,
+ .proc_lseek = seq_lseek,
+- .proc_release = seq_release,
++ .proc_release = exports_release,
+ };
+
+ static int create_proc_exports_entry(void)
--- /dev/null
+From a8aec14230322ed8f1e8042b6d656c1631d41163 Mon Sep 17 00:00:00 2001
+From: Ira Weiny <ira.weiny@intel.com>
+Date: Fri, 6 Mar 2026 12:33:05 -0600
+Subject: nvdimm/bus: Fix potential use after free in asynchronous initialization
+
+From: Ira Weiny <ira.weiny@intel.com>
+
+commit a8aec14230322ed8f1e8042b6d656c1631d41163 upstream.
+
+Dingisoul with KASAN reports a use after free if device_add() fails in
+nd_async_device_register().
+
+Commit b6eae0f61db2 ("libnvdimm: Hold reference on parent while
+scheduling async init") correctly added a reference on the parent device
+to be held until asynchronous initialization was complete. However, if
+device_add() results in an allocation failure the ref count of the
+device drops to 0 prior to the parent pointer being accessed. Thus
+resulting in use after free.
+
+The bug bot AI correctly identified the fix. Save a reference to the
+parent pointer to be used to drop the parent reference regardless of the
+outcome of device_add().
+
+Reported-by: Dingisoul <dingiso.kernel@gmail.com>
+Closes: http://lore.kernel.org/8855544b-be9e-4153-aa55-0bc328b13733@gmail.com
+Fixes: b6eae0f61db2 ("libnvdimm: Hold reference on parent while scheduling async init")
+Cc: stable@vger.kernel.org
+Reviewed-by: Dave Jiang <dave.jiang@intel.com>
+Link: https://patch.msgid.link/20260306-fix-uaf-async-init-v1-1-a28fd7526723@intel.com
+Signed-off-by: Ira Weiny <ira.weiny@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/nvdimm/bus.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/drivers/nvdimm/bus.c
++++ b/drivers/nvdimm/bus.c
+@@ -486,14 +486,15 @@ EXPORT_SYMBOL_GPL(nd_synchronize);
+ static void nd_async_device_register(void *d, async_cookie_t cookie)
+ {
+ struct device *dev = d;
++ struct device *parent = dev->parent;
+
+ if (device_add(dev) != 0) {
+ dev_err(dev, "%s: failed\n", __func__);
+ put_device(dev);
+ }
+ put_device(dev);
+- if (dev->parent)
+- put_device(dev->parent);
++ if (parent)
++ put_device(parent);
+ }
+
+ static void nd_async_device_unregister(void *d, async_cookie_t cookie)
--- /dev/null
+From 2c98a8fbd6aa647414c6248dacf254ebe91c79ad Mon Sep 17 00:00:00 2001
+From: Helge Deller <deller@gmx.de>
+Date: Mon, 9 Mar 2026 15:16:37 +0100
+Subject: parisc: Flush correct cache in cacheflush() syscall
+
+From: Helge Deller <deller@gmx.de>
+
+commit 2c98a8fbd6aa647414c6248dacf254ebe91c79ad upstream.
+
+The assembly flush instructions were swapped for I- and D-cache flags:
+
+SYSCALL_DEFINE3(cacheflush, ...)
+{
+ if (cache & DCACHE) {
+ "fic ...\n"
+ }
+ if (cache & ICACHE && error == 0) {
+ "fdc ...\n"
+ }
+
+Fix it by using fdc for DCACHE, and fic for ICACHE flushing.
+
+Reported-by: Felix Lechner <felix.lechner@lease-up.com>
+Fixes: c6d96328fecd ("parisc: Add cacheflush() syscall")
+Cc: <stable@vger.kernel.org> # v6.5+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/kernel/cache.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/parisc/kernel/cache.c
++++ b/arch/parisc/kernel/cache.c
+@@ -953,7 +953,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned lon
+ #else
+ "1: cmpb,<<,n %0,%2,1b\n"
+ #endif
+- " fic,m %3(%4,%0)\n"
++ " fdc,m %3(%4,%0)\n"
+ "2: sync\n"
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
+ : "+r" (start), "+r" (error)
+@@ -968,7 +968,7 @@ SYSCALL_DEFINE3(cacheflush, unsigned lon
+ #else
+ "1: cmpb,<<,n %0,%2,1b\n"
+ #endif
+- " fdc,m %3(%4,%0)\n"
++ " fic,m %3(%4,%0)\n"
+ "2: sync\n"
+ ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 2b, "%1")
+ : "+r" (start), "+r" (error)
--- /dev/null
+From 5d4c6c132ea9a967d48890dd03e6a786c060e968 Mon Sep 17 00:00:00 2001
+From: Benjamin Tissoires <bentiss@kernel.org>
+Date: Fri, 13 Mar 2026 08:40:24 +0100
+Subject: selftests/hid: fix compilation when bpf_wq and hid_device are not exported
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Benjamin Tissoires <bentiss@kernel.org>
+
+commit 5d4c6c132ea9a967d48890dd03e6a786c060e968 upstream.
+
+This can happen in situations when CONFIG_HID_SUPPORT is set to no, or
+some complex situations where struct bpf_wq is not exported.
+
+So do the usual dance of hiding them before including vmlinux.h, and
+then redefining them and make use of CO-RE to have the correct offsets.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Closes: https://lore.kernel.org/oe-kbuild-all/202603111558.KLCIxsZB-lkp@intel.com/
+Fixes: fe8d561db3e8 ("selftests/hid: add wq test for hid_bpf_input_report()")
+Cc: stable@vger.kernel.org
+Acked-by: Jiri Kosina <jkosina@suse.com>
+Reviewed-by: Thomas Weißschuh <thomas.weissschuh@linutronix.de>
+Signed-off-by: Benjamin Tissoires <bentiss@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/hid/progs/hid_bpf_helpers.h | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
++++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
+@@ -6,8 +6,10 @@
+ #define __HID_BPF_HELPERS_H
+
+ /* "undefine" structs and enums in vmlinux.h, because we "override" them below */
++#define bpf_wq bpf_wq___not_used
+ #define hid_bpf_ctx hid_bpf_ctx___not_used
+ #define hid_bpf_ops hid_bpf_ops___not_used
++#define hid_device hid_device___not_used
+ #define hid_report_type hid_report_type___not_used
+ #define hid_class_request hid_class_request___not_used
+ #define hid_bpf_attach_flags hid_bpf_attach_flags___not_used
+@@ -27,8 +29,10 @@
+
+ #include "vmlinux.h"
+
++#undef bpf_wq
+ #undef hid_bpf_ctx
+ #undef hid_bpf_ops
++#undef hid_device
+ #undef hid_report_type
+ #undef hid_class_request
+ #undef hid_bpf_attach_flags
+@@ -55,6 +59,14 @@ enum hid_report_type {
+ HID_REPORT_TYPES,
+ };
+
++struct hid_device {
++ unsigned int id;
++} __attribute__((preserve_access_index));
++
++struct bpf_wq {
++ __u64 __opaque[2];
++};
++
+ struct hid_bpf_ctx {
+ struct hid_device *hid;
+ __u32 allocated_size;
--- /dev/null
+nfsd-defer-sub-object-cleanup-in-export-put-callbacks.patch
+nfsd-hold-net-reference-for-the-lifetime-of-proc-fs-nfs-exports-fd.patch
+nfsd-fix-heap-overflow-in-nfsv4.0-lock-replay-cache.patch
+selftests-hid-fix-compilation-when-bpf_wq-and-hid_device-are-not-exported.patch
+hid-bpf-prevent-buffer-overflow-in-hid_hw_request.patch
+sunrpc-fix-cache_request-leak-in-cache_release.patch
+nvdimm-bus-fix-potential-use-after-free-in-asynchronous-initialization.patch
+crash_dump-don-t-log-dm-crypt-key-bytes-in-read_key_from_user_keying.patch
+mm-rmap-fix-incorrect-pte-restoration-for-lazyfree-folios.patch
+mm-huge_memory-fix-use-of-null-folio-in-move_pages_huge_pmd.patch
+mm-huge_memory-fix-early-failure-try_to_migrate-when-split-huge-pmd-for-shared-thp.patch
+loongarch-give-more-information-if-kmem-access-failed.patch
+loongarch-no-need-to-flush-icache-if-text-copy-failed.patch
+nfc-nxp-nci-allow-gpios-to-sleep.patch
+net-macb-fix-use-after-free-access-to-ptp-clock.patch
+bnxt_en-fix-oob-access-in-dbg_buf_producer-async-event-handler.patch
+parisc-flush-correct-cache-in-cacheflush-syscall.patch
+batman-adv-avoid-ogm-aggregation-when-skb-tailroom-is-insufficient.patch
+mac80211-fix-crash-in-ieee80211_chan_bw_change-for-ap_vlan-stations.patch
+crypto-padlock-sha-disable-for-zhaoxin-processor.patch
+bluetooth-l2cap-fix-type-confusion-in-l2cap_ecred_reconf_rsp.patch
+bluetooth-l2cap-validate-l2cap_info_rsp-payload-length-before-access.patch
--- /dev/null
+From 17ad31b3a43b72aec3a3d83605891e1397d0d065 Mon Sep 17 00:00:00 2001
+From: Jeff Layton <jlayton@kernel.org>
+Date: Mon, 23 Feb 2026 12:09:58 -0500
+Subject: sunrpc: fix cache_request leak in cache_release
+
+From: Jeff Layton <jlayton@kernel.org>
+
+commit 17ad31b3a43b72aec3a3d83605891e1397d0d065 upstream.
+
+When a reader's file descriptor is closed while in the middle of reading
+a cache_request (rp->offset != 0), cache_release() decrements the
+request's readers count but never checks whether it should free the
+request.
+
+In cache_read(), when readers drops to 0 and CACHE_PENDING is clear, the
+cache_request is removed from the queue and freed along with its buffer
+and cache_head reference. cache_release() lacks this cleanup.
+
+The only other path that frees requests with readers == 0 is
+cache_dequeue(), but it runs only when CACHE_PENDING transitions from
+set to clear. If that transition already happened while readers was
+still non-zero, cache_dequeue() will have skipped the request, and no
+subsequent call will clean it up.
+
+Add the same cleanup logic from cache_read() to cache_release(): after
+decrementing readers, check if it reached 0 with CACHE_PENDING clear,
+and if so, dequeue and free the cache_request.
+
+Reported-by: NeilBrown <neilb@ownmail.net>
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Cc: stable@kernel.org
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/sunrpc/cache.c | 26 +++++++++++++++++++++-----
+ 1 file changed, 21 insertions(+), 5 deletions(-)
+
+--- a/net/sunrpc/cache.c
++++ b/net/sunrpc/cache.c
+@@ -1061,14 +1061,25 @@ static int cache_release(struct inode *i
+ struct cache_reader *rp = filp->private_data;
+
+ if (rp) {
++ struct cache_request *rq = NULL;
++
+ spin_lock(&queue_lock);
+ if (rp->offset) {
+ struct cache_queue *cq;
+- for (cq= &rp->q; &cq->list != &cd->queue;
+- cq = list_entry(cq->list.next, struct cache_queue, list))
++ for (cq = &rp->q; &cq->list != &cd->queue;
++ cq = list_entry(cq->list.next,
++ struct cache_queue, list))
+ if (!cq->reader) {
+- container_of(cq, struct cache_request, q)
+- ->readers--;
++ struct cache_request *cr =
++ container_of(cq,
++ struct cache_request, q);
++ cr->readers--;
++ if (cr->readers == 0 &&
++ !test_bit(CACHE_PENDING,
++ &cr->item->flags)) {
++ list_del(&cr->q.list);
++ rq = cr;
++ }
+ break;
+ }
+ rp->offset = 0;
+@@ -1076,9 +1087,14 @@ static int cache_release(struct inode *i
+ list_del(&rp->q.list);
+ spin_unlock(&queue_lock);
+
++ if (rq) {
++ cache_put(rq->item, cd);
++ kfree(rq->buf);
++ kfree(rq);
++ }
++
+ filp->private_data = NULL;
+ kfree(rp);
+-
+ }
+ if (filp->f_mode & FMODE_WRITE) {
+ atomic_dec(&cd->writers);