--- /dev/null
+From stable+bounces-247657-greg=kroah.com@vger.kernel.org Fri May 15 13:28:18 2026
+From: Sven Eckelmann <sven@narfation.org>
+Date: Fri, 15 May 2026 12:56:49 +0200
+Subject: batman-adv: stop tp_meter sessions during mesh teardown
+To: stable@vger.kernel.org
+Cc: Jiexun Wang <wangjiexun2025@gmail.com>, stable@kernel.org, Yuan Tan <yuantan098@gmail.com>, Yifan Wu <yifanwucs@gmail.com>, Juefei Pu <tomapufckgml@gmail.com>, Xin Liu <bird@lzu.edu.cn>, Luxing Yin <tr0jan@lzu.edu.cn>, Ren Wei <n05ec@lzu.edu.cn>, Sven Eckelmann <sven@narfation.org>
+Message-ID: <20260515105649.180995-1-sven@narfation.org>
+
+From: Jiexun Wang <wangjiexun2025@gmail.com>
+
+commit 3d3cf6a7314aca4df0a6dde28ce784a2a30d0166 upstream.
+
+TP meter sessions remain linked on bat_priv->tp_list after the netlink
+request has already finished. When the mesh interface is removed,
+batadv_mesh_free() currently tears down the mesh without first draining
+these sessions.
+
+A running sender thread or a late incoming tp_meter packet can then keep
+processing against a mesh instance which is already shutting down.
+Synchronize tp_meter with the mesh lifetime by stopping all active
+sessions from batadv_mesh_free() and waiting for sender threads to exit
+before teardown continues.
+
+Fixes: 33a3bb4a3345 ("batman-adv: throughput meter implementation")
+Cc: stable@kernel.org
+Reported-by: Yuan Tan <yuantan098@gmail.com>
+Reported-by: Yifan Wu <yifanwucs@gmail.com>
+Reported-by: Juefei Pu <tomapufckgml@gmail.com>
+Reported-by: Xin Liu <bird@lzu.edu.cn>
+Co-developed-by: Luxing Yin <tr0jan@lzu.edu.cn>
+Signed-off-by: Luxing Yin <tr0jan@lzu.edu.cn>
+Signed-off-by: Jiexun Wang <wangjiexun2025@gmail.com>
+Signed-off-by: Ren Wei <n05ec@lzu.edu.cn>
+[ Context ]
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/main.c | 1
+ net/batman-adv/tp_meter.c | 94 +++++++++++++++++++++++++++++++++++++---------
+ net/batman-adv/tp_meter.h | 1
+ net/batman-adv/types.h | 4 +
+ 4 files changed, 82 insertions(+), 18 deletions(-)
+
+--- a/net/batman-adv/main.c
++++ b/net/batman-adv/main.c
+@@ -262,6 +262,7 @@ void batadv_mesh_free(struct net_device
+ atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
+
+ batadv_purge_outstanding_packets(bat_priv, NULL);
++ batadv_tp_stop_all(bat_priv);
+
+ batadv_gw_node_free(bat_priv);
+
+--- a/net/batman-adv/tp_meter.c
++++ b/net/batman-adv/tp_meter.c
+@@ -12,6 +12,7 @@
+ #include <linux/byteorder/generic.h>
+ #include <linux/cache.h>
+ #include <linux/compiler.h>
++#include <linux/completion.h>
+ #include <linux/container_of.h>
+ #include <linux/err.h>
+ #include <linux/etherdevice.h>
+@@ -365,23 +366,38 @@ static void batadv_tp_vars_put(struct ba
+ }
+
+ /**
+- * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
+- * @bat_priv: the bat priv with all the soft interface information
+- * @tp_vars: the private data of the current TP meter session to cleanup
++ * batadv_tp_list_detach() - remove tp session from mesh session list once
++ * @tp_vars: the private data of the current TP meter session
+ */
+-static void batadv_tp_sender_cleanup(struct batadv_priv *bat_priv,
+- struct batadv_tp_vars *tp_vars)
++static void batadv_tp_list_detach(struct batadv_tp_vars *tp_vars)
+ {
+- cancel_delayed_work(&tp_vars->finish_work);
++ bool detached = false;
+
+ spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
+- hlist_del_rcu(&tp_vars->list);
++ if (!hlist_unhashed(&tp_vars->list)) {
++ hlist_del_init_rcu(&tp_vars->list);
++ detached = true;
++ }
+ spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
+
++ if (!detached)
++ return;
++
++ atomic_dec(&tp_vars->bat_priv->tp_num);
++
+ /* drop list reference */
+ batadv_tp_vars_put(tp_vars);
++}
+
+- atomic_dec(&tp_vars->bat_priv->tp_num);
++/**
++ * batadv_tp_sender_cleanup() - cleanup sender data and drop and timer
++ * @tp_vars: the private data of the current TP meter session to cleanup
++ */
++static void batadv_tp_sender_cleanup(struct batadv_tp_vars *tp_vars)
++{
++ cancel_delayed_work_sync(&tp_vars->finish_work);
++
++ batadv_tp_list_detach(tp_vars);
+
+ /* kill the timer and remove its reference */
+ del_timer_sync(&tp_vars->timer);
+@@ -886,7 +902,8 @@ out:
+ batadv_orig_node_put(orig_node);
+
+ batadv_tp_sender_end(bat_priv, tp_vars);
+- batadv_tp_sender_cleanup(bat_priv, tp_vars);
++ batadv_tp_sender_cleanup(tp_vars);
++ complete(&tp_vars->finished);
+
+ batadv_tp_vars_put(tp_vars);
+
+@@ -918,7 +935,8 @@ static void batadv_tp_start_kthread(stru
+ batadv_tp_vars_put(tp_vars);
+
+ /* cleanup of failed tp meter variables */
+- batadv_tp_sender_cleanup(bat_priv, tp_vars);
++ batadv_tp_sender_cleanup(tp_vars);
++ complete(&tp_vars->finished);
+ return;
+ }
+
+@@ -1024,6 +1042,7 @@ void batadv_tp_start(struct batadv_priv
+ tp_vars->start_time = jiffies;
+
+ init_waitqueue_head(&tp_vars->more_bytes);
++ init_completion(&tp_vars->finished);
+
+ spin_lock_init(&tp_vars->unacked_lock);
+ INIT_LIST_HEAD(&tp_vars->unacked_list);
+@@ -1126,14 +1145,7 @@ static void batadv_tp_receiver_shutdown(
+ "Shutting down for inactivity (more than %dms) from %pM\n",
+ BATADV_TP_RECV_TIMEOUT, tp_vars->other_end);
+
+- spin_lock_bh(&tp_vars->bat_priv->tp_list_lock);
+- hlist_del_rcu(&tp_vars->list);
+- spin_unlock_bh(&tp_vars->bat_priv->tp_list_lock);
+-
+- /* drop list reference */
+- batadv_tp_vars_put(tp_vars);
+-
+- atomic_dec(&bat_priv->tp_num);
++ batadv_tp_list_detach(tp_vars);
+
+ spin_lock_bh(&tp_vars->unacked_lock);
+ list_for_each_entry_safe(un, safe, &tp_vars->unacked_list, list) {
+@@ -1497,6 +1509,52 @@ out:
+ }
+
+ /**
++ * batadv_tp_stop_all() - stop all currently running tp meter sessions
++ * @bat_priv: the bat priv with all the mesh interface information
++ */
++void batadv_tp_stop_all(struct batadv_priv *bat_priv)
++{
++ struct batadv_tp_vars *tp_vars[BATADV_TP_MAX_NUM];
++ struct batadv_tp_vars *tp_var;
++ size_t count = 0;
++ size_t i;
++
++ spin_lock_bh(&bat_priv->tp_list_lock);
++ hlist_for_each_entry(tp_var, &bat_priv->tp_list, list) {
++ if (WARN_ON_ONCE(count >= BATADV_TP_MAX_NUM))
++ break;
++
++ if (!kref_get_unless_zero(&tp_var->refcount))
++ continue;
++
++ tp_vars[count++] = tp_var;
++ }
++ spin_unlock_bh(&bat_priv->tp_list_lock);
++
++ for (i = 0; i < count; i++) {
++ tp_var = tp_vars[i];
++
++ switch (tp_var->role) {
++ case BATADV_TP_SENDER:
++ batadv_tp_sender_shutdown(tp_var,
++ BATADV_TP_REASON_CANCEL);
++ wake_up(&tp_var->more_bytes);
++ wait_for_completion(&tp_var->finished);
++ break;
++ case BATADV_TP_RECEIVER:
++ batadv_tp_list_detach(tp_var);
++ if (timer_shutdown_sync(&tp_var->timer))
++ batadv_tp_vars_put(tp_var);
++ break;
++ }
++
++ batadv_tp_vars_put(tp_var);
++ }
++
++ synchronize_net();
++}
++
++/**
+ * batadv_tp_meter_init() - initialize global tp_meter structures
+ */
+ void __init batadv_tp_meter_init(void)
+--- a/net/batman-adv/tp_meter.h
++++ b/net/batman-adv/tp_meter.h
+@@ -17,6 +17,7 @@ void batadv_tp_start(struct batadv_priv
+ u32 test_length, u32 *cookie);
+ void batadv_tp_stop(struct batadv_priv *bat_priv, const u8 *dst,
+ u8 return_value);
++void batadv_tp_stop_all(struct batadv_priv *bat_priv);
+ void batadv_tp_meter_recv(struct batadv_priv *bat_priv, struct sk_buff *skb);
+
+ #endif /* _NET_BATMAN_ADV_TP_METER_H_ */
+--- a/net/batman-adv/types.h
++++ b/net/batman-adv/types.h
+@@ -14,6 +14,7 @@
+ #include <linux/average.h>
+ #include <linux/bitops.h>
+ #include <linux/compiler.h>
++#include <linux/completion.h>
+ #include <linux/if.h>
+ #include <linux/if_ether.h>
+ #include <linux/kref.h>
+@@ -1396,6 +1397,9 @@ struct batadv_tp_vars {
+ /** @finish_work: work item for the finishing procedure */
+ struct delayed_work finish_work;
+
++ /** @finished: completion signaled when a sender thread exits */
++ struct completion finished;
++
+ /** @test_length: test length in milliseconds */
+ u32 test_length;
+
--- /dev/null
+From stable+bounces-247707-greg=kroah.com@vger.kernel.org Fri May 15 14:39:24 2026
+From: Sven Eckelmann <sven@narfation.org>
+Date: Fri, 15 May 2026 13:49:53 +0200
+Subject: batman-adv: tp_meter: fix tp_num leak on kmalloc failure
+To: stable@vger.kernel.org
+Cc: Sven Eckelmann <sven@narfation.org>, stable@kernel.org
+Message-ID: <20260515114953.437624-1-sven@narfation.org>
+
+From: Sven Eckelmann <sven@narfation.org>
+
+commit ce425dd05d0fe7594930a0fb103634f35ac47bb6 upstream.
+
+When batadv_tp_start() or batadv_tp_init_recv() fail to allocate a new
+tp_vars object, the previously incremented bat_priv->tp_num counter is
+never decremented. This causes tp_num to drift upward on each allocation
+failure. Since only BATADV_TP_MAX_NUM sessions can be started and the count
+is never reduced for these failed allocations, it causes to an exhaustion
+of throughput meter sessions. In worst case, no new throughput meter
+session can be started until the mesh interface is removed.
+
+The error handling must decrement tp_num releasing the lock and aborting
+the creation of an throughput meter session
+
+Cc: stable@kernel.org
+Fixes: 33a3bb4a3345 ("batman-adv: throughput meter implementation")
+[ Context ]
+Signed-off-by: Sven Eckelmann <sven@narfation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/batman-adv/tp_meter.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
+index 7f3dd3c393e0..16da48b23f57 100644
+--- a/net/batman-adv/tp_meter.c
++++ b/net/batman-adv/tp_meter.c
+@@ -969,6 +969,7 @@ void batadv_tp_start(struct batadv_priv *bat_priv, const u8 *dst,
+
+ tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
+ if (!tp_vars) {
++ atomic_dec(&bat_priv->tp_num);
+ spin_unlock_bh(&bat_priv->tp_list_lock);
+ batadv_dbg(BATADV_DBG_TP_METER, bat_priv,
+ "Meter: %s cannot allocate list elements\n",
+@@ -1344,8 +1345,10 @@ batadv_tp_init_recv(struct batadv_priv *bat_priv,
+ }
+
+ tp_vars = kmalloc(sizeof(*tp_vars), GFP_ATOMIC);
+- if (!tp_vars)
++ if (!tp_vars) {
++ atomic_dec(&bat_priv->tp_num);
+ goto out_unlock;
++ }
+
+ ether_addr_copy(tp_vars->other_end, icmp->orig);
+ tp_vars->role = BATADV_TP_RECEIVER;
+--
+2.47.3
+
--- /dev/null
+From 78a88d43dab8d23aeef934ed8ce34d40e6b3d613 Mon Sep 17 00:00:00 2001
+From: Siwei Zhang <oss@fourdim.xyz>
+Date: Wed, 15 Apr 2026 16:53:36 -0400
+Subject: Bluetooth: L2CAP: Fix null-ptr-deref in l2cap_sock_get_sndtimeo_cb()
+
+From: Siwei Zhang <oss@fourdim.xyz>
+
+commit 78a88d43dab8d23aeef934ed8ce34d40e6b3d613 upstream.
+
+Add the same NULL guard already present in
+l2cap_sock_resume_cb() and l2cap_sock_ready_cb().
+
+Fixes: 8d836d71e222 ("Bluetooth: Access sk_sndtimeo indirectly in l2cap_core.c")
+Cc: stable@kernel.org
+Signed-off-by: Siwei Zhang <oss@fourdim.xyz>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/bluetooth/l2cap_sock.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1731,6 +1731,9 @@ static long l2cap_sock_get_sndtimeo_cb(s
+ {
+ struct sock *sk = chan->data;
+
++ if (!sk)
++ return 0;
++
+ return sk->sk_sndtimeo;
+ }
+
--- /dev/null
+From e9acda52fd2ee0cdca332f996da7a95c5fd25294 Mon Sep 17 00:00:00 2001
+From: Nikolay Aleksandrov <razor@blackwall.org>
+Date: Fri, 23 Jan 2026 14:06:59 +0200
+Subject: bonding: fix use-after-free due to enslave fail after slave array update
+
+From: Nikolay Aleksandrov <razor@blackwall.org>
+
+commit e9acda52fd2ee0cdca332f996da7a95c5fd25294 upstream.
+
+Fix a use-after-free which happens due to enslave failure after the new
+slave has been added to the array. Since the new slave can be used for Tx
+immediately, we can use it after it has been freed by the enslave error
+cleanup path which frees the allocated slave memory. Slave update array is
+supposed to be called last when further enslave failures are not expected.
+Move it after xdp setup to avoid any problems.
+
+It is very easy to reproduce the problem with a simple xdp_pass prog:
+ ip l add bond1 type bond mode balance-xor
+ ip l set bond1 up
+ ip l set dev bond1 xdp object xdp_pass.o sec xdp_pass
+ ip l add dumdum type dummy
+
+Then run in parallel:
+ while :; do ip l set dumdum master bond1 1>/dev/null 2>&1; done;
+ mausezahn bond1 -a own -b rand -A rand -B 1.1.1.1 -c 0 -t tcp "dp=1-1023, flags=syn"
+
+The crash happens almost immediately:
+ [ 605.602850] Oops: general protection fault, probably for non-canonical address 0xe0e6fc2460000137: 0000 [#1] SMP KASAN NOPTI
+ [ 605.602916] KASAN: maybe wild-memory-access in range [0x07380123000009b8-0x07380123000009bf]
+ [ 605.602946] CPU: 0 UID: 0 PID: 2445 Comm: mausezahn Kdump: loaded Tainted: G B 6.19.0-rc6+ #21 PREEMPT(voluntary)
+ [ 605.602979] Tainted: [B]=BAD_PAGE
+ [ 605.602998] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+ [ 605.603032] RIP: 0010:netdev_core_pick_tx+0xcd/0x210
+ [ 605.603063] Code: 48 89 fa 48 c1 ea 03 80 3c 02 00 0f 85 3e 01 00 00 48 b8 00 00 00 00 00 fc ff df 4c 8b 6b 08 49 8d 7d 30 48 89 fa 48 c1 ea 03 <80> 3c 02 00 0f 85 25 01 00 00 49 8b 45 30 4c 89 e2 48 89 ee 48 89
+ [ 605.603111] RSP: 0018:ffff88817b9af348 EFLAGS: 00010213
+ [ 605.603145] RAX: dffffc0000000000 RBX: ffff88817d28b420 RCX: 0000000000000000
+ [ 605.603172] RDX: 00e7002460000137 RSI: 0000000000000008 RDI: 07380123000009be
+ [ 605.603199] RBP: ffff88817b541a00 R08: 0000000000000001 R09: fffffbfff3ed8c0c
+ [ 605.603226] R10: ffffffff9f6c6067 R11: 0000000000000001 R12: 0000000000000000
+ [ 605.603253] R13: 073801230000098e R14: ffff88817d28b448 R15: ffff88817b541a84
+ [ 605.603286] FS: 00007f6570ef67c0(0000) GS:ffff888221dfa000(0000) knlGS:0000000000000000
+ [ 605.603319] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [ 605.603343] CR2: 00007f65712fae40 CR3: 000000011371b000 CR4: 0000000000350ef0
+ [ 605.603373] Call Trace:
+ [ 605.603392] <TASK>
+ [ 605.603410] __dev_queue_xmit+0x448/0x32a0
+ [ 605.603434] ? __pfx_vprintk_emit+0x10/0x10
+ [ 605.603461] ? __pfx_vprintk_emit+0x10/0x10
+ [ 605.603484] ? __pfx___dev_queue_xmit+0x10/0x10
+ [ 605.603507] ? bond_start_xmit+0xbfb/0xc20 [bonding]
+ [ 605.603546] ? _printk+0xcb/0x100
+ [ 605.603566] ? __pfx__printk+0x10/0x10
+ [ 605.603589] ? bond_start_xmit+0xbfb/0xc20 [bonding]
+ [ 605.603627] ? add_taint+0x5e/0x70
+ [ 605.603648] ? add_taint+0x2a/0x70
+ [ 605.603670] ? end_report.cold+0x51/0x75
+ [ 605.603693] ? bond_start_xmit+0xbfb/0xc20 [bonding]
+ [ 605.603731] bond_start_xmit+0x623/0xc20 [bonding]
+
+Fixes: 9e2ee5c7e7c3 ("net, bonding: Add XDP support to the bonding driver")
+Signed-off-by: Nikolay Aleksandrov <razor@blackwall.org>
+Reported-by: Chen Zhen <chenzhen126@huawei.com>
+Closes: https://lore.kernel.org/netdev/fae17c21-4940-5605-85b2-1d5e17342358@huawei.com/
+CC: Jussi Maki <joamaki@gmail.com>
+CC: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://patch.msgid.link/20260123120659.571187-1-razor@blackwall.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Tested-by: Yunseong Kim <yunseong.kim@est.tech>
+Signed-off-by: Yunseong Kim <yunseong.kim@est.tech>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/bonding/bond_main.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2309,9 +2309,6 @@ skip_mac_set:
+ unblock_netpoll_tx();
+ }
+
+- if (bond_mode_can_use_xmit_hash(bond))
+- bond_update_slave_arr(bond, NULL);
+-
+ if (!slave_dev->netdev_ops->ndo_bpf ||
+ !slave_dev->netdev_ops->ndo_xdp_xmit) {
+ if (bond->xdp_prog) {
+@@ -2345,6 +2342,9 @@ skip_mac_set:
+ bpf_prog_inc(bond->xdp_prog);
+ }
+
++ if (bond_mode_can_use_xmit_hash(bond))
++ bond_update_slave_arr(bond, NULL);
++
+ bond_xdp_set_features(bond_dev);
+
+ slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n",
--- /dev/null
+From stable+bounces-247728-greg=kroah.com@vger.kernel.org Fri May 15 14:40:14 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 May 2026 08:06:20 -0400
+Subject: btrfs: fix btrfs_ioctl_space_info() slot_count TOCTOU which can lead to info-leak
+To: stable@vger.kernel.org
+Cc: Yochai Eisenrich <yochaie@sweet.security>, Yochai Eisenrich <echelonh@gmail.com>, David Sterba <dsterba@suse.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260515120620.3073457-1-sashal@kernel.org>
+
+From: Yochai Eisenrich <yochaie@sweet.security>
+
+[ Upstream commit 973e57c726c1f8e77259d1c8e519519f1e9aea77 ]
+
+btrfs_ioctl_space_info() has a TOCTOU race between two passes over the
+block group RAID type lists. The first pass counts entries to determine
+the allocation size, then the second pass fills the buffer. The
+groups_sem rwlock is released between passes, allowing concurrent block
+group removal to reduce the entry count.
+
+When the second pass fills fewer entries than the first pass counted,
+copy_to_user() copies the full alloc_size bytes including trailing
+uninitialized kmalloc bytes to userspace.
+
+Fix by copying only total_spaces entries (the actually-filled count from
+the second pass) instead of alloc_size bytes, and switch to kzalloc so
+any future copy size mismatch cannot leak heap data.
+
+Fixes: 7fde62bffb57 ("Btrfs: buffer results in the space_info ioctl")
+CC: stable@vger.kernel.org # 3.0
+Signed-off-by: Yochai Eisenrich <echelonh@gmail.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+[ adapted upstream's `return -EFAULT;` to stable's `ret = -EFAULT;` fall-through to existing `out:` cleanup label ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/ioctl.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
+index 4723013995f5b..d17d1eff8eff4 100644
+--- a/fs/btrfs/ioctl.c
++++ b/fs/btrfs/ioctl.c
+@@ -3087,7 +3087,7 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
+ return -ENOMEM;
+
+ space_args.total_spaces = 0;
+- dest = kmalloc(alloc_size, GFP_KERNEL);
++ dest = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dest)
+ return -ENOMEM;
+ dest_orig = dest;
+@@ -3143,7 +3143,8 @@ static long btrfs_ioctl_space_info(struct btrfs_fs_info *fs_info,
+ user_dest = (struct btrfs_ioctl_space_info __user *)
+ (arg + sizeof(struct btrfs_ioctl_space_args));
+
+- if (copy_to_user(user_dest, dest_orig, alloc_size))
++ if (copy_to_user(user_dest, dest_orig,
++ space_args.total_spaces * sizeof(*dest_orig)))
+ ret = -EFAULT;
+
+ kfree(dest_orig);
+--
+2.53.0
+
--- /dev/null
+From stable+bounces-244957-greg=kroah.com@vger.kernel.org Sat May 9 16:22:24 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 May 2026 10:22:16 -0400
+Subject: ceph: only d_add() negative dentries when they are unhashed
+To: stable@vger.kernel.org
+Cc: Max Kellermann <max.kellermann@ionos.com>, Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>, Ilya Dryomov <idryomov@gmail.com>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260509142216.3462686-1-sashal@kernel.org>
+
+From: Max Kellermann <max.kellermann@ionos.com>
+
+[ Upstream commit 803447f93d75ab6e40c85e6d12b5630d281d70d6 ]
+
+Ceph can call d_add(dentry, NULL) on a negative dentry that is already
+present in the primary dcache hash.
+
+In the current VFS that is not safe. d_add() goes through __d_add()
+to __d_rehash(), which unconditionally reinserts dentry->d_hash into
+the hlist_bl bucket. If the dentry is already hashed, reinserting the
+same node can corrupt the bucket, including creating a self-loop.
+Once that happens, __d_lookup() can spin forever in the hlist_bl walk,
+typically looping only on the d_name.hash mismatch check and
+eventually triggering RCU stall reports like this one:
+
+ rcu: INFO: rcu_sched self-detected stall on CPU
+ rcu: 87-....: (2100 ticks this GP) idle=3a4c/1/0x4000000000000000 softirq=25003319/25003319 fqs=829
+ rcu: (t=2101 jiffies g=79058445 q=698988 ncpus=192)
+ CPU: 87 UID: 2952868916 PID: 3933303 Comm: php-cgi8.3 Not tainted 6.18.17-i1-amd #950 NONE
+ Hardware name: Dell Inc. PowerEdge R7615/0G9DHV, BIOS 1.6.6 09/22/2023
+ RIP: 0010:__d_lookup+0x46/0xb0
+ Code: c1 e8 07 48 8d 04 c2 48 8b 00 49 89 fc 49 89 f5 48 89 c3 48 83 e3 fe 48 83 f8 01 77 0f eb 2d 0f 1f 44 00 00 48 8b 1b 48 85 db <74> 20 39 6b 18 75 f3 48 8d 7b 78 e8 ba 85 d0 00 4c 39 63 10 74 1f
+ RSP: 0018:ff745a70c8253898 EFLAGS: 00000282
+ RAX: ff26e470054cb208 RBX: ff26e470054cb208 RCX: 000000006e958966
+ RDX: ff26e48267340000 RSI: ff745a70c82539b0 RDI: ff26e458f74655c0
+ RBP: 000000006e958966 R08: 0000000000000180 R09: 9cd08d909b919a89
+ R10: ff26e458f74655c0 R11: 0000000000000000 R12: ff26e458f74655c0
+ R13: ff745a70c82539b0 R14: d0d0d0d0d0d0d0d0 R15: 2f2f2f2f2f2f2f2f
+ FS: 00007f5770896980(0000) GS:ff26e482c5d88000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007f5764de50c0 CR3: 000000a72abb5001 CR4: 0000000000771ef0
+ PKRU: 55555554
+ Call Trace:
+ <TASK>
+ lookup_fast+0x9f/0x100
+ walk_component+0x1f/0x150
+ link_path_walk+0x20e/0x3d0
+ path_lookupat+0x68/0x180
+ filename_lookup+0xdc/0x1e0
+ vfs_statx+0x6c/0x140
+ vfs_fstatat+0x67/0xa0
+ __do_sys_newfstatat+0x24/0x60
+ do_syscall_64+0x6a/0x230
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+This is reachable with reused cached negative dentries. A Ceph lookup
+or atomic_open can be handed a negative dentry that is already hashed,
+and fs/ceph/dir.c then hits one of two paths that incorrectly assume
+"negative" also means "unhashed":
+
+ - ceph_finish_lookup():
+ MDS reply is -ENOENT with no trace
+ -> d_add(dentry, NULL)
+
+ - ceph_lookup():
+ local ENOENT fast path for a complete directory with shared caps
+ -> d_add(dentry, NULL)
+
+Both paths can therefore re-add an already-hashed negative dentry.
+
+Ceph already uses the correct pattern elsewhere: ceph_fill_trace() only
+calls d_add(dn, NULL) for a negative null-dentry reply when d_unhashed(dn)
+is true.
+
+Fix both fs/ceph/dir.c sites the same way: only call d_add() for a
+negative dentry when it is actually unhashed. If the negative dentry
+is already hashed, leave it in place and reuse it as-is.
+
+This preserves the existing behavior for unhashed dentries while
+avoiding d_hash list corruption for reused hashed negatives.
+
+Cc: stable@vger.kernel.org
+Fixes: 2817b000b02c ("ceph: directory operations")
+Signed-off-by: Max Kellermann <max.kellermann@ionos.com>
+Reviewed-by: Viacheslav Dubeyko <Slava.Dubeyko@ibm.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+[ kept existing dout() debug call instead of upstream's doutc() form when adding the d_unhashed() guard around d_add() ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/dir.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/fs/ceph/dir.c
++++ b/fs/ceph/dir.c
+@@ -745,7 +745,8 @@ struct dentry *ceph_finish_lookup(struct
+ d_drop(dentry);
+ err = -ENOENT;
+ } else {
+- d_add(dentry, NULL);
++ if (d_unhashed(dentry))
++ d_add(dentry, NULL);
+ }
+ }
+ }
+@@ -813,7 +814,8 @@ static struct dentry *ceph_lookup(struct
+ __ceph_touch_fmode(ci, mdsc, CEPH_FILE_MODE_RD);
+ spin_unlock(&ci->i_ceph_lock);
+ dout(" dir %p complete, -ENOENT\n", dir);
+- d_add(dentry, NULL);
++ if (d_unhashed(dentry))
++ d_add(dentry, NULL);
+ di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
+ return NULL;
+ }
--- /dev/null
+From 235e32320a470fcd3998fb3774f2290a0eb302a1 Mon Sep 17 00:00:00 2001
+From: Namjae Jeon <linkinjeon@kernel.org>
+Date: Sat, 4 Apr 2026 21:09:02 +0900
+Subject: ksmbd: fix use-after-free in __ksmbd_close_fd() via durable scavenger
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+commit 235e32320a470fcd3998fb3774f2290a0eb302a1 upstream.
+
+When a durable file handle survives session disconnect (TCP close without
+SMB2_LOGOFF), session_fd_check() sets fp->conn = NULL to preserve the
+handle for later reconnection. However, it did not clean up the byte-range
+locks on fp->lock_list.
+
+Later, when the durable scavenger thread times out and calls
+__ksmbd_close_fd(NULL, fp), the lock cleanup loop did:
+
+ spin_lock(&fp->conn->llist_lock);
+
+This caused a slab use-after-free because fp->conn was NULL and the
+original connection object had already been freed by
+ksmbd_tcp_disconnect().
+
+The root cause is asymmetric cleanup: lock entries (smb_lock->clist) were
+left dangling on the freed conn->lock_list while fp->conn was nulled out.
+
+To fix this issue properly, we need to handle the lifetime of
+smb_lock->clist across three paths:
+ - Safely skip clist deletion when list is empty and fp->conn is NULL.
+ - Remove the lock from the old connection's lock_list in
+ session_fd_check()
+ - Re-add the lock to the new connection's lock_list in
+ ksmbd_reopen_durable_fd().
+
+Fixes: c8efcc786146 ("ksmbd: add support for durable handles v1/v2")
+Co-developed-by: munan Huang <munanevil@gmail.com>
+Signed-off-by: munan Huang <munanevil@gmail.com>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+[ Minor context conflict resolved. ]
+Signed-off-by: Alva Lan <alvalan9@foxmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/smb/server/vfs_cache.c | 40 ++++++++++++++++++++++++++++++----------
+ 1 file changed, 30 insertions(+), 10 deletions(-)
+
+--- a/fs/smb/server/vfs_cache.c
++++ b/fs/smb/server/vfs_cache.c
+@@ -356,9 +356,11 @@ static void __ksmbd_close_fd(struct ksmb
+ * there are not accesses to fp->lock_list.
+ */
+ list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
+- spin_lock(&fp->conn->llist_lock);
+- list_del(&smb_lock->clist);
+- spin_unlock(&fp->conn->llist_lock);
++ if (!list_empty(&smb_lock->clist) && fp->conn) {
++ spin_lock(&fp->conn->llist_lock);
++ list_del(&smb_lock->clist);
++ spin_unlock(&fp->conn->llist_lock);
++ }
+
+ list_del(&smb_lock->flist);
+ locks_free_lock(smb_lock->fl);
+@@ -755,6 +757,7 @@ static bool session_fd_check(struct ksmb
+ struct ksmbd_inode *ci;
+ struct oplock_info *op;
+ struct ksmbd_conn *conn;
++ struct ksmbd_lock *smb_lock, *tmp_lock;
+
+ if (!is_reconnectable(fp))
+ return false;
+@@ -771,6 +774,12 @@ static bool session_fd_check(struct ksmb
+ }
+ up_write(&ci->m_lock);
+
++ list_for_each_entry_safe(smb_lock, tmp_lock, &fp->lock_list, flist) {
++ spin_lock(&fp->conn->llist_lock);
++ list_del_init(&smb_lock->clist);
++ spin_unlock(&fp->conn->llist_lock);
++ }
++
+ fp->conn = NULL;
+ fp->tcon = NULL;
+ fp->volatile_id = KSMBD_NO_FID;
+@@ -844,6 +853,9 @@ int ksmbd_reopen_durable_fd(struct ksmbd
+ {
+ struct ksmbd_inode *ci;
+ struct oplock_info *op;
++ struct ksmbd_conn *conn = work->conn;
++ struct ksmbd_lock *smb_lock;
++ unsigned int old_f_state;
+
+ if (!fp->is_durable || fp->conn || fp->tcon) {
+ pr_err("Invalid durable fd [%p:%p]\n", fp->conn, fp->tcon);
+@@ -855,9 +867,23 @@ int ksmbd_reopen_durable_fd(struct ksmbd
+ return -EBADF;
+ }
+
+- fp->conn = work->conn;
++ old_f_state = fp->f_state;
++ fp->f_state = FP_NEW;
++ __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
++ if (!has_file_id(fp->volatile_id)) {
++ fp->f_state = old_f_state;
++ return -EBADF;
++ }
++
++ fp->conn = conn;
+ fp->tcon = work->tcon;
+
++ list_for_each_entry(smb_lock, &fp->lock_list, flist) {
++ spin_lock(&conn->llist_lock);
++ list_add_tail(&smb_lock->clist, &conn->lock_list);
++ spin_unlock(&conn->llist_lock);
++ }
++
+ ci = fp->f_ci;
+ down_write(&ci->m_lock);
+ list_for_each_entry_rcu(op, &ci->m_op_list, op_entry) {
+@@ -868,12 +894,6 @@ int ksmbd_reopen_durable_fd(struct ksmbd
+ }
+ up_write(&ci->m_lock);
+
+- __open_id(&work->sess->file_table, fp, OPEN_ID_TYPE_VOLATILE_ID);
+- if (!has_file_id(fp->volatile_id)) {
+- fp->conn = NULL;
+- fp->tcon = NULL;
+- return -EBADF;
+- }
+ return 0;
+ }
+
--- /dev/null
+From stable+bounces-245815-greg=kroah.com@vger.kernel.org Tue May 12 16:51:01 2026
+From: Marc Zyngier <maz@kernel.org>
+Date: Tue, 12 May 2026 15:50:35 +0100
+Subject: KVM: arm64: Wake-up from WFI when iqrchip is in userspace
+To: stable@vger.kernel.org
+Message-ID: <20260512145035.3676967-1-maz@kernel.org>
+
+From: Marc Zyngier <maz@kernel.org>
+
+commit 4ce98bf0865c349e7026ad9c14f48da264920953 upstream
+
+It appears that there is nothing in the wake-up path that
+evaluates whether the in-kernel interrupts are pending unless
+we have a vgic.
+
+This means that the userspace irqchip support has been broken for
+about four years, and nobody noticed. It was also broken before
+as we wouldn't wake-up on a PMU interrupt, but hey, who cares...
+
+It is probably time to remove the feature altogether, because it
+was a terrible idea 10 years ago, and it still is.
+
+Fixes: b57de4ffd7c6d ("KVM: arm64: Simplify kvm_cpu_has_pending_timer()")
+Link: https://patch.msgid.link/20260423163607.486345-1-maz@kernel.org
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kvm/arm.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/arch/arm64/kvm/arm.c
++++ b/arch/arm64/kvm/arm.c
+@@ -557,6 +557,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(stru
+ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
+ {
+ bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF);
++
++ irq_lines |= (!irqchip_in_kernel(v->kvm) &&
++ (kvm_timer_should_notify_user(v) ||
++ kvm_pmu_should_notify_user(v)));
++
+ return ((irq_lines || kvm_vgic_vcpu_pending_irq(v))
+ && !kvm_arm_vcpu_stopped(v) && !v->arch.pause);
+ }
--- /dev/null
+From 8bbde987c2b84f80da0853f739f0a920386f8b99 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Mon, 6 Apr 2026 17:31:52 -0700
+Subject: mm/damon/core: disallow time-quota setting zero esz
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 8bbde987c2b84f80da0853f739f0a920386f8b99 upstream.
+
+When the throughput of a DAMOS scheme is very slow, DAMOS time quota can
+make the effective size quota smaller than damon_ctx->min_region_sz. In
+the case, damos_apply_scheme() will skip applying the action, because the
+action is tried at region level, which requires >=min_region_sz size.
+That is, the quota is effectively exceeded for the quota charge window.
+
+Because no action will be applied, the total_charged_sz and
+total_charged_ns are also not updated. damos_set_effective_quota() will
+try to update the effective size quota before starting the next charge
+window. However, because the total_charged_sz and total_charged_ns have
+not updated, the throughput and effective size quota are also not changed.
+Since effective size quota can only be decreased, other effective size
+quota update factors including DAMOS quota goals and size quota cannot
+make any change, either.
+
+As a result, the scheme is unexpectedly deactivated until the user notices
+and mitigates the situation. The users can mitigate this situation by
+changing the time quota online or re-install the scheme. While the
+mitigation is somewhat straightforward, finding the situation would be
+challenging, because DAMON is not providing good observabilities for that.
+Even if such observability is provided, doing the additional monitoring
+and the mitigation is somewhat cumbersome and not aligned to the intention
+of the time quota. The time quota was intended to help reduce the user's
+administration overhead.
+
+Fix the problem by setting time quota-modified effective size quota be at
+least min_region_sz always.
+
+The issue was discovered [1] by sashiko.
+
+Link: https://lore.kernel.org/20260407003153.79589-1-sj@kernel.org
+Link: https://lore.kernel.org/20260405192504.110014-1-sj@kernel.org [1]
+Fixes: 1cd243030059 ("mm/damon/schemes: implement time quota")
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> # 5.16.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -1026,6 +1026,7 @@ static void damos_set_effective_quota(st
+ else
+ throughput = PAGE_SIZE * 1024;
+ esz = throughput * quota->ms;
++ esz = max(DAMON_MIN_REGION, esz);
+
+ if (quota->sz && quota->sz < esz)
+ esz = quota->sz;
--- /dev/null
+From 4262c53236977de3ceaa3bf2aefdf772c9b874dd Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Thu, 15 Jan 2026 07:20:41 -0800
+Subject: mm/damon/core: implement damon_kdamond_pid()
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 4262c53236977de3ceaa3bf2aefdf772c9b874dd upstream.
+
+Patch series "mm/damon: hide kdamond and kdamond_lock from API callers".
+
+'kdamond' and 'kdamond_lock' fields initially exposed to DAMON API callers
+for flexible synchronization and use cases. As DAMON API became somewhat
+complicated compared to the early days, Keeping those exposed could only
+encourage the API callers to invent more creative but complicated and
+difficult-to-debug use cases.
+
+Fortunately DAMON API callers didn't invent that many creative use cases.
+There exist only two use cases of 'kdamond' and 'kdamond_lock'. Finding
+whether the kdamond is actively running, and getting the pid of the
+kdamond. For the first use case, a dedicated API function, namely
+'damon_is_running()' is provided, and all DAMON API callers are using the
+function for the use case. Hence only the second use case is where the
+fields are directly being used by DAMON API callers.
+
+To prevent future invention of complicated and erroneous use cases of the
+fields, hide the fields from the API callers. For that, provide new
+dedicated DAMON API functions for the remaining use case, namely
+damon_kdamond_pid(), migrate DAMON API callers to use the new function,
+and mark the fields as private fields.
+
+
+This patch (of 5):
+
+'kdamond' and 'kdamond_lock' are directly being used by DAMON API callers
+for getting the pid of the corresponding kdamond. To discourage invention
+of creative but complicated and erroneous new usages of the fields that
+require careful synchronization, implement a new API function that can
+simply be used without the manual synchronizations.
+
+Link: https://lkml.kernel.org/r/20260115152047.68415-1-sj@kernel.org
+Link: https://lkml.kernel.org/r/20260115152047.68415-2-sj@kernel.org
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/damon.h | 1 +
+ mm/damon/core.c | 17 +++++++++++++++++
+ 2 files changed, 18 insertions(+)
+
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -677,6 +677,7 @@ static inline unsigned int damon_max_nr_
+
+ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
++int damon_kdamond_pid(struct damon_ctx *ctx);
+
+ int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+ unsigned long *start, unsigned long *end);
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -762,6 +762,23 @@ int damon_stop(struct damon_ctx **ctxs,
+ return err;
+ }
+
++/**
++ * damon_kdamond_pid() - Return pid of a given DAMON context's worker thread.
++ * @ctx: The DAMON context of the question.
++ *
++ * Return: pid if @ctx is running, negative error code otherwise.
++ */
++int damon_kdamond_pid(struct damon_ctx *ctx)
++{
++ int pid = -EINVAL;
++
++ mutex_lock(&ctx->kdamond_lock);
++ if (ctx->kdamond)
++ pid = ctx->kdamond->pid;
++ mutex_unlock(&ctx->kdamond_lock);
++ return pid;
++}
++
+ /*
+ * Reset the aggregated monitoring results ('nr_accesses' of each region).
+ */
--- /dev/null
+From b98b7ff6025ae82570d4915e083f0cbd8d48b3cf Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sun, 19 Apr 2026 09:10:01 -0700
+Subject: mm/damon/lru_sort: detect and use fresh enabled and kdamond_pid values
+
+From: SeongJae Park <sj@kernel.org>
+
+commit b98b7ff6025ae82570d4915e083f0cbd8d48b3cf upstream.
+
+DAMON_LRU_SORT updates 'enabled' and 'kdamond_pid' parameter values, which
+represents the running status of its kdamond, when the user explicitly
+requests start/stop of the kdamond. The kdamond can, however, be stopped
+in events other than the explicit user request in the following three
+events.
+
+1. ctx->regions_score_histogram allocation failure at beginning of the
+ execution,
+2. damon_commit_ctx() failure due to invalid user input, and
+3. damon_commit_ctx() failure due to its internal allocation failures.
+
+Hence, if the kdamond is stopped by the above three events, the values of
+the status parameters can be stale. Users could show the stale values and
+be confused. This is already bad, but the real consequence is worse.
+DAMON_LRU_SORT avoids unnecessary damon_start() and damon_stop() calls
+based on the 'enabled' parameter value. And the update of 'enabled'
+parameter value depends on the damon_start() and damon_stop() call
+results. Hence, once the kdamond has stopped by the unintentional events,
+the user cannot restart the kdamond before the system reboot. For
+example, the issue can be reproduced via below steps.
+
+ # cd /sys/module/damon_lru_sort/parameters
+ #
+ # # start DAMON_LRU_SORT
+ # echo Y > enabled
+ # ps -ef | grep kdamond
+ root 806 2 0 17:53 ? 00:00:00 [kdamond.0]
+ root 808 803 0 17:53 pts/4 00:00:00 grep kdamond
+ #
+ # # commit wrong input to stop kdamond withou explicit stop request
+ # echo 3 > addr_unit
+ # echo Y > commit_inputs
+ bash: echo: write error: Invalid argument
+ #
+ # # confirm kdamond is stopped
+ # ps -ef | grep kdamond
+ root 811 803 0 17:53 pts/4 00:00:00 grep kdamond
+ #
+ # # users casn now show stable status
+ # cat enabled
+ Y
+ # cat kdamond_pid
+ 806
+ #
+ # # even after fixing the wrong parameter,
+ # # kdamond cannot be restarted.
+ # echo 1 > addr_unit
+ # echo Y > enabled
+ # ps -ef | grep kdamond
+ root 815 803 0 17:54 pts/4 00:00:00 grep kdamond
+
+The problem will only rarely happen in real and common setups for the
+following reasons. The allocation failures are unlikely in such setups
+since those allocations are arguably too small to fail. Also sane users
+on real production environments may not commit wrong input parameters.
+But once it happens, the consequence is quite bad. And the bug is a bug.
+
+The issue stems from the fact that there are multiple events that can
+change the status, and following all the events is challenging.
+Dynamically detect and use the fresh status for the parameters when those
+are requested.
+
+Link: https://lore.kernel.org/20260419161003.79176-3-sj@kernel.org
+Fixes: 40e983cca927 ("mm/damon: introduce DAMON-based LRU-lists Sorting")
+Co-developed-by: Liew Rui Yan <aethernet65535@gmail.com>
+Signed-off-by: Liew Rui Yan <aethernet65535@gmail.com>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> # 6.0.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+(port parts of 42b7491af14c ("mm/damon/core: introduce damon_call()")
+and d2b5be741a50 ("mm/damon/sysfs: use DAMON core API
+damon_is_running()") for damon_is_running() dependency)
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/damon.h | 1
+ mm/damon/core.c | 16 +++++++++
+ mm/damon/lru_sort.c | 88 +++++++++++++++++++++++++++++++-------------------
+ 3 files changed, 73 insertions(+), 32 deletions(-)
+
+--- a/include/linux/damon.h
++++ b/include/linux/damon.h
+@@ -677,6 +677,7 @@ static inline unsigned int damon_max_nr_
+
+ int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
+ int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
++bool damon_is_running(struct damon_ctx *ctx);
+ int damon_kdamond_pid(struct damon_ctx *ctx);
+
+ int damon_set_region_biggest_system_ram_default(struct damon_target *t,
+--- a/mm/damon/core.c
++++ b/mm/damon/core.c
+@@ -763,6 +763,22 @@ int damon_stop(struct damon_ctx **ctxs,
+ }
+
+ /**
++ * damon_is_running() - Returns if a given DAMON context is running.
++ * @ctx: The DAMON context to see if running.
++ *
++ * Return: true if @ctx is running, false otherwise.
++ */
++bool damon_is_running(struct damon_ctx *ctx)
++{
++ bool running;
++
++ mutex_lock(&ctx->kdamond_lock);
++ running = ctx->kdamond != NULL;
++ mutex_unlock(&ctx->kdamond_lock);
++ return running;
++}
++
++/**
+ * damon_kdamond_pid() - Return pid of a given DAMON context's worker thread.
+ * @ctx: The DAMON context of the question.
+ *
+--- a/mm/damon/lru_sort.c
++++ b/mm/damon/lru_sort.c
+@@ -111,15 +111,6 @@ module_param(monitor_region_start, ulong
+ static unsigned long monitor_region_end __read_mostly;
+ module_param(monitor_region_end, ulong, 0600);
+
+-/*
+- * PID of the DAMON thread
+- *
+- * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread.
+- * Else, -1.
+- */
+-static int kdamond_pid __read_mostly = -1;
+-module_param(kdamond_pid, int, 0400);
+-
+ static struct damos_stat damon_lru_sort_hot_stat;
+ DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_lru_sort_hot_stat,
+ lru_sort_tried_hot_regions, lru_sorted_hot_regions,
+@@ -249,60 +240,93 @@ static int damon_lru_sort_turn(bool on)
+ {
+ int err;
+
+- if (!on) {
+- err = damon_stop(&ctx, 1);
+- if (!err)
+- kdamond_pid = -1;
+- return err;
+- }
++ if (!on)
++ return damon_stop(&ctx, 1);
+
+ err = damon_lru_sort_apply_parameters();
+ if (err)
+ return err;
+
+- err = damon_start(&ctx, 1, true);
+- if (err)
+- return err;
+- kdamond_pid = ctx->kdamond->pid;
+- return 0;
++ return damon_start(&ctx, 1, true);
++}
++
++static bool damon_lru_sort_enabled(void)
++{
++ if (!ctx)
++ return false;
++ return damon_is_running(ctx);
+ }
+
+ static int damon_lru_sort_enabled_store(const char *val,
+ const struct kernel_param *kp)
+ {
+- bool is_enabled = enabled;
+- bool enable;
+ int err;
+
+- err = kstrtobool(val, &enable);
++ err = kstrtobool(val, &enabled);
+ if (err)
+ return err;
+
+- if (is_enabled == enable)
++ if (damon_lru_sort_enabled() == enabled)
+ return 0;
+
+ /* Called before init function. The function will handle this. */
+ if (!ctx)
+- goto set_param_out;
++ return 0;
+
+- err = damon_lru_sort_turn(enable);
+- if (err)
+- return err;
++ return damon_lru_sort_turn(enabled);
++}
+
+-set_param_out:
+- enabled = enable;
+- return err;
++static int damon_lru_sort_enabled_load(char *buffer,
++ const struct kernel_param *kp)
++{
++ return sprintf(buffer, "%c\n", damon_lru_sort_enabled() ? 'Y' : 'N');
+ }
+
+ static const struct kernel_param_ops enabled_param_ops = {
+ .set = damon_lru_sort_enabled_store,
+- .get = param_get_bool,
++ .get = damon_lru_sort_enabled_load,
+ };
+
+ module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
+ MODULE_PARM_DESC(enabled,
+ "Enable or disable DAMON_LRU_SORT (default: disabled)");
+
++static int damon_lru_sort_kdamond_pid_store(const char *val,
++ const struct kernel_param *kp)
++{
++ /*
++ * kdamond_pid is read-only, but kernel command line could write it.
++ * Do nothing here.
++ */
++ return 0;
++}
++
++static int damon_lru_sort_kdamond_pid_load(char *buffer,
++ const struct kernel_param *kp)
++{
++ int kdamond_pid = -1;
++
++ if (ctx) {
++ kdamond_pid = damon_kdamond_pid(ctx);
++ if (kdamond_pid < 0)
++ kdamond_pid = -1;
++ }
++ return sprintf(buffer, "%d\n", kdamond_pid);
++}
++
++static const struct kernel_param_ops kdamond_pid_param_ops = {
++ .set = damon_lru_sort_kdamond_pid_store,
++ .get = damon_lru_sort_kdamond_pid_load,
++};
++
++/*
++ * PID of the DAMON thread
++ *
++ * If DAMON_LRU_SORT is enabled, this becomes the PID of the worker thread.
++ * Else, -1.
++ */
++module_param_cb(kdamond_pid, &kdamond_pid_param_ops, NULL, 0400);
++
+ static int damon_lru_sort_handle_commit_inputs(void)
+ {
+ int err;
--- /dev/null
+From 64a140afa5ed1c6f5ba6d451512cbdbbab1ba339 Mon Sep 17 00:00:00 2001
+From: SeongJae Park <sj@kernel.org>
+Date: Sun, 19 Apr 2026 09:10:00 -0700
+Subject: mm/damon/reclaim: detect and use fresh enabled and kdamond_pid values
+
+From: SeongJae Park <sj@kernel.org>
+
+commit 64a140afa5ed1c6f5ba6d451512cbdbbab1ba339 upstream.
+
+Patch series "mm/damon/modules: detect and use fresh status", v3.
+
+DAMON modules including DAMON_RECLAIM, DAMON_LRU_SORT and DAMON_STAT
+commonly expose the kdamond running status via their parameters. Under
+certain scenarios including wrong user inputs and memory allocation
+failures, those parameter values can be stale. It can confuse users. For
+DAMON_RECLAIM and DAMON_LRU_SORT, it even makes the kdamond unable to be
+restarted before the system reboot.
+
+The problem comes from the fact that there are multiple events for the
+status changes and it is difficult to follow up all the scenarios. Fix
+the issue by detecting and using the status on demand, instead of using a
+cached status that is difficult to be updated.
+
+Patches 1-3 fix the bugs in DAMON_RECLAIM, DAMON_LRU_SORT and DAMON_STAT
+in the order.
+
+
+This patch (of 3):
+
+DAMON_RECLAIM updates 'enabled' and 'kdamond_pid' parameter values, which
+represents the running status of its kdamond, when the user explicitly
+requests start/stop of the kdamond. The kdamond can, however, be stopped
+in events other than the explicit user request in the following three
+events.
+
+1. ctx->regions_score_histogram allocation failure at beginning of the
+ execution,
+2. damon_commit_ctx() failure due to invalid user input, and
+3. damon_commit_ctx() failure due to its internal allocation failures.
+
+Hence, if the kdamond is stopped by the above three events, the values of
+the status parameters can be stale. Users could show the stale values and
+be confused. This is already bad, but the real consequence is worse.
+DAMON_RECLAIM avoids unnecessary damon_start() and damon_stop() calls
+based on the 'enabled' parameter value. And the update of 'enabled'
+parameter value depends on the damon_start() and damon_stop() call
+results. Hence, once the kdamond has stopped by the unintentional events,
+the user cannot restart the kdamond before the system reboot. For
+example, the issue can be reproduced via below steps.
+
+ # cd /sys/module/damon_reclaim/parameters
+ #
+ # # start DAMON_RECLAIM
+ # echo Y > enabled
+ # ps -ef | grep kdamond
+ root 806 2 0 17:53 ? 00:00:00 [kdamond.0]
+ root 808 803 0 17:53 pts/4 00:00:00 grep kdamond
+ #
+ # # commit wrong input to stop kdamond withou explicit stop request
+ # echo 3 > addr_unit
+ # echo Y > commit_inputs
+ bash: echo: write error: Invalid argument
+ #
+ # # confirm kdamond is stopped
+ # ps -ef | grep kdamond
+ root 811 803 0 17:53 pts/4 00:00:00 grep kdamond
+ #
+ # # users casn now show stable status
+ # cat enabled
+ Y
+ # cat kdamond_pid
+ 806
+ #
+ # # even after fixing the wrong parameter,
+ # # kdamond cannot be restarted.
+ # echo 1 > addr_unit
+ # echo Y > enabled
+ # ps -ef | grep kdamond
+ root 815 803 0 17:54 pts/4 00:00:00 grep kdamond
+
+The problem will only rarely happen in real and common setups for the
+following reasons. The allocation failures are unlikely in such setups
+since those allocations are arguably too small to fail. Also sane users
+on real production environments may not commit wrong input parameters.
+But once it happens, the consequence is quite bad. And the bug is a bug.
+
+The issue stems from the fact that there are multiple events that can
+change the status, and following all the events is challenging.
+Dynamically detect and use the fresh status for the parameters when those
+are requested.
+
+Link: https://lore.kernel.org/20260419161003.79176-1-sj@kernel.org
+Link: https://lore.kernel.org/20260419161003.79176-2-sj@kernel.org
+Fixes: e035c280f6df ("mm/damon/reclaim: support online inputs update")
+Co-developed-by: Liew Rui Yan <aethernet65535@gmail.com>
+Signed-off-by: Liew Rui Yan <aethernet65535@gmail.com>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Cc: <stable@vger.kernel.org> # 5.19.x
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: SeongJae Park <sj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/damon/reclaim.c | 88 +++++++++++++++++++++++++++++++++--------------------
+ 1 file changed, 56 insertions(+), 32 deletions(-)
+
+--- a/mm/damon/reclaim.c
++++ b/mm/damon/reclaim.c
+@@ -107,15 +107,6 @@ module_param(monitor_region_end, ulong,
+ static bool skip_anon __read_mostly;
+ module_param(skip_anon, bool, 0600);
+
+-/*
+- * PID of the DAMON thread
+- *
+- * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
+- * Else, -1.
+- */
+-static int kdamond_pid __read_mostly = -1;
+-module_param(kdamond_pid, int, 0400);
+-
+ static struct damos_stat damon_reclaim_stat;
+ DEFINE_DAMON_MODULES_DAMOS_STATS_PARAMS(damon_reclaim_stat,
+ reclaim_tried_regions, reclaimed_regions, quota_exceeds);
+@@ -203,60 +194,93 @@ static int damon_reclaim_turn(bool on)
+ {
+ int err;
+
+- if (!on) {
+- err = damon_stop(&ctx, 1);
+- if (!err)
+- kdamond_pid = -1;
+- return err;
+- }
++ if (!on)
++ return damon_stop(&ctx, 1);
+
+ err = damon_reclaim_apply_parameters();
+ if (err)
+ return err;
+
+- err = damon_start(&ctx, 1, true);
+- if (err)
+- return err;
+- kdamond_pid = ctx->kdamond->pid;
+- return 0;
++ return damon_start(&ctx, 1, true);
++}
++
++static bool damon_reclaim_enabled(void)
++{
++ if (!ctx)
++ return false;
++ return damon_is_running(ctx);
+ }
+
+ static int damon_reclaim_enabled_store(const char *val,
+ const struct kernel_param *kp)
+ {
+- bool is_enabled = enabled;
+- bool enable;
+ int err;
+
+- err = kstrtobool(val, &enable);
++ err = kstrtobool(val, &enabled);
+ if (err)
+ return err;
+
+- if (is_enabled == enable)
++ if (damon_reclaim_enabled() == enabled)
+ return 0;
+
+ /* Called before init function. The function will handle this. */
+ if (!ctx)
+- goto set_param_out;
++ return 0;
+
+- err = damon_reclaim_turn(enable);
+- if (err)
+- return err;
++ return damon_reclaim_turn(enabled);
++}
+
+-set_param_out:
+- enabled = enable;
+- return err;
++static int damon_reclaim_enabled_load(char *buffer,
++ const struct kernel_param *kp)
++{
++ return sprintf(buffer, "%c\n", damon_reclaim_enabled() ? 'Y' : 'N');
+ }
+
+ static const struct kernel_param_ops enabled_param_ops = {
+ .set = damon_reclaim_enabled_store,
+- .get = param_get_bool,
++ .get = damon_reclaim_enabled_load,
+ };
+
+ module_param_cb(enabled, &enabled_param_ops, &enabled, 0600);
+ MODULE_PARM_DESC(enabled,
+ "Enable or disable DAMON_RECLAIM (default: disabled)");
+
++static int damon_reclaim_kdamond_pid_store(const char *val,
++ const struct kernel_param *kp)
++{
++ /*
++ * kdamond_pid is read-only, but kernel command line could write it.
++ * Do nothing here.
++ */
++ return 0;
++}
++
++static int damon_reclaim_kdamond_pid_load(char *buffer,
++ const struct kernel_param *kp)
++{
++ int kdamond_pid = -1;
++
++ if (ctx) {
++ kdamond_pid = damon_kdamond_pid(ctx);
++ if (kdamond_pid < 0)
++ kdamond_pid = -1;
++ }
++ return sprintf(buffer, "%d\n", kdamond_pid);
++}
++
++static const struct kernel_param_ops kdamond_pid_param_ops = {
++ .set = damon_reclaim_kdamond_pid_store,
++ .get = damon_reclaim_kdamond_pid_load,
++};
++
++/*
++ * PID of the DAMON thread
++ *
++ * If DAMON_RECLAIM is enabled, this becomes the PID of the worker thread.
++ * Else, -1.
++ */
++module_param_cb(kdamond_pid, &kdamond_pid_param_ops, NULL, 0400);
++
+ static int damon_reclaim_handle_commit_inputs(void)
+ {
+ int err;
--- /dev/null
+From 18bcb4aa54eab75dce41e5c176a1c2bff94f0f79 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Cs=C3=B3k=C3=A1s=2C=20Bence?= <csokas.bence@prolan.hu>
+Date: Wed, 10 Jul 2024 11:14:01 +0200
+Subject: mtd: spi-nor: sst: Factor out common write operation to `sst_nor_write_data()`
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Bence Csókás <csokas.bence@prolan.hu>
+
+commit 18bcb4aa54eab75dce41e5c176a1c2bff94f0f79 upstream.
+
+Writing to the Flash in `sst_nor_write()` is a 3-step process:
+first an optional one-byte write to get 2-byte-aligned, then the
+bulk of the data is written out in vendor-specific 2-byte writes.
+Finally, if there's a byte left over, another one-byte write.
+This was implemented 3 times in the body of `sst_nor_write()`.
+To reduce code duplication, factor out these sub-steps to their
+own function.
+
+Signed-off-by: Csókás, Bence <csokas.bence@prolan.hu>
+Reviewed-by: Pratyush Yadav <pratyush@kernel.org>
+[pratyush@kernel.org: fixup whitespace, use %zu instead of %i in WARN()]
+Signed-off-by: Pratyush Yadav <pratyush@kernel.org>
+Link: https://lore.kernel.org/r/20240710091401.1282824-1-csokas.bence@prolan.hu
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/spi-nor/sst.c | 39 +++++++++++++++++++--------------------
+ 1 file changed, 19 insertions(+), 20 deletions(-)
+
+--- a/drivers/mtd/spi-nor/sst.c
++++ b/drivers/mtd/spi-nor/sst.c
+@@ -123,6 +123,21 @@ static const struct flash_info sst_nor_p
+ .fixups = &sst26vf_nor_fixups },
+ };
+
++static int sst_nor_write_data(struct spi_nor *nor, loff_t to, size_t len,
++ const u_char *buf)
++{
++ u8 op = (len == 1) ? SPINOR_OP_BP : SPINOR_OP_AAI_WP;
++ int ret;
++
++ nor->program_opcode = op;
++ ret = spi_nor_write_data(nor, to, 1, buf);
++ if (ret < 0)
++ return ret;
++ WARN(ret != len, "While writing %zu byte written %i bytes\n", len, ret);
++
++ return spi_nor_wait_till_ready(nor);
++}
++
+ static int sst_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
+ size_t *retlen, const u_char *buf)
+ {
+@@ -144,16 +159,10 @@ static int sst_nor_write(struct mtd_info
+
+ /* Start write from odd address. */
+ if (to % 2) {
+- nor->program_opcode = SPINOR_OP_BP;
+-
+ /* write one byte. */
+- ret = spi_nor_write_data(nor, to, 1, buf);
++ ret = sst_nor_write_data(nor, to, 1, buf);
+ if (ret < 0)
+ goto out;
+- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+- ret = spi_nor_wait_till_ready(nor);
+- if (ret)
+- goto out;
+
+ to++;
+ actual++;
+@@ -161,16 +170,11 @@ static int sst_nor_write(struct mtd_info
+
+ /* Write out most of the data here. */
+ for (; actual < len - 1; actual += 2) {
+- nor->program_opcode = SPINOR_OP_AAI_WP;
+-
+ /* write two bytes. */
+- ret = spi_nor_write_data(nor, to, 2, buf + actual);
++ ret = sst_nor_write_data(nor, to, 2, buf + actual);
+ if (ret < 0)
+ goto out;
+- WARN(ret != 2, "While writing 2 bytes written %i bytes\n", ret);
+- ret = spi_nor_wait_till_ready(nor);
+- if (ret)
+- goto out;
++
+ to += 2;
+ nor->sst_write_second = true;
+ }
+@@ -190,14 +194,9 @@ static int sst_nor_write(struct mtd_info
+ if (ret)
+ goto out;
+
+- nor->program_opcode = SPINOR_OP_BP;
+- ret = spi_nor_write_data(nor, to, 1, buf + actual);
++ ret = sst_nor_write_data(nor, to, 1, buf + actual);
+ if (ret < 0)
+ goto out;
+- WARN(ret != 1, "While writing 1 byte written %i bytes\n", ret);
+- ret = spi_nor_wait_till_ready(nor);
+- if (ret)
+- goto out;
+
+ actual += 1;
+
--- /dev/null
+From a0f64241d3566a49c0a9b33ba7ae458ae22003a9 Mon Sep 17 00:00:00 2001
+From: Sanjaikumar V S <sanjaikumar.vs@dicortech.com>
+Date: Wed, 11 Mar 2026 10:30:56 +0000
+Subject: mtd: spi-nor: sst: Fix write enable before AAI sequence
+
+From: Sanjaikumar V S <sanjaikumar.vs@dicortech.com>
+
+commit a0f64241d3566a49c0a9b33ba7ae458ae22003a9 upstream.
+
+When writing to SST flash starting at an odd address, a single byte is
+first programmed using the byte program (BP) command. After this
+operation completes, the flash hardware automatically clears the Write
+Enable Latch (WEL) bit.
+
+If an AAI (Auto Address Increment) word program sequence follows, it
+requires WEL to be set. Without re-enabling writes, the AAI sequence
+fails.
+
+Add spi_nor_write_enable() after the odd-address byte program when more
+data needs to be written. Use a local boolean for clarity.
+
+Fixes: b199489d37b2 ("mtd: spi-nor: add the framework for SPI NOR")
+Cc: stable@vger.kernel.org
+Signed-off-by: Sanjaikumar V S <sanjaikumar.vs@dicortech.com>
+Tested-by: Hendrik Donner <hd@os-cillation.de>
+Reviewed-by: Hendrik Donner <hd@os-cillation.de>
+Signed-off-by: Pratyush Yadav (Google) <pratyush@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mtd/spi-nor/sst.c | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+--- a/drivers/mtd/spi-nor/sst.c
++++ b/drivers/mtd/spi-nor/sst.c
+@@ -159,6 +159,8 @@ static int sst_nor_write(struct mtd_info
+
+ /* Start write from odd address. */
+ if (to % 2) {
++ bool needs_write_enable = (len > 1);
++
+ /* write one byte. */
+ ret = sst_nor_write_data(nor, to, 1, buf);
+ if (ret < 0)
+@@ -166,6 +168,17 @@ static int sst_nor_write(struct mtd_info
+
+ to++;
+ actual++;
++
++ /*
++ * Byte program clears the write enable latch. If more
++ * data needs to be written using the AAI sequence,
++ * re-enable writes.
++ */
++ if (needs_write_enable) {
++ ret = spi_nor_write_enable(nor);
++ if (ret)
++ goto out;
++ }
+ }
+
+ /* Write out most of the data here. */
--- /dev/null
+From 3962c24f2d14e8a7f8a23f56b7ce320523947342 Mon Sep 17 00:00:00 2001
+From: "Viorel Suman (OSS)" <viorel.suman@oss.nxp.com>
+Date: Wed, 11 Mar 2026 14:33:09 +0200
+Subject: pwm: imx-tpm: Count the number of enabled channels in probe
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Viorel Suman (OSS) <viorel.suman@oss.nxp.com>
+
+commit 3962c24f2d14e8a7f8a23f56b7ce320523947342 upstream.
+
+On a soft reset TPM PWM IP may preserve its internal state from previous
+runtime, therefore on a subsequent OS boot and driver probe
+"enable_count" value and TPM PWM IP internal channels "enabled" states
+may get unaligned. In consequence on a suspend/resume cycle the call "if
+(--tpm->enable_count == 0)" may lead to "enable_count" overflow the
+system being blocked from entering suspend due to:
+
+ if (tpm->enable_count > 0)
+ return -EBUSY;
+
+Fix the problem by counting the enabled channels in probe function.
+
+Signed-off-by: Viorel Suman (OSS) <viorel.suman@oss.nxp.com>
+Fixes: 738a1cfec2ed ("pwm: Add i.MX TPM PWM driver support")
+Link: https://patch.msgid.link/20260311123309.348904-1-viorel.suman@oss.nxp.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Uwe Kleine-König <ukleinek@kernel.org>
+[ukleinek: backport to linux-6.6.y]
+Signed-off-by: Uwe Kleine-König <ukleinek@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pwm/pwm-imx-tpm.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/pwm/pwm-imx-tpm.c
++++ b/drivers/pwm/pwm-imx-tpm.c
+@@ -350,6 +350,7 @@ static int pwm_imx_tpm_probe(struct plat
+ {
+ struct imx_tpm_pwm_chip *tpm;
+ int ret;
++ unsigned int i;
+ u32 val;
+
+ tpm = devm_kzalloc(&pdev->dev, sizeof(*tpm), GFP_KERNEL);
+@@ -383,6 +384,13 @@ static int pwm_imx_tpm_probe(struct plat
+
+ mutex_init(&tpm->lock);
+
++ /* count the enabled channels */
++ for (i = 0; i < tpm->chip.npwm; ++i) {
++ val = readl(tpm->base + PWM_IMX_TPM_CnSC(i));
++ if (FIELD_GET(PWM_IMX_TPM_CnSC_ELS, val))
++ ++tpm->enable_count;
++ }
++
+ ret = pwmchip_add(&tpm->chip);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to add PWM chip: %d\n", ret);
--- /dev/null
+From aa54b1d27fe0c2b78e664a34fd0fdf7cd1960d71 Mon Sep 17 00:00:00 2001
+From: Hyunwoo Kim <imv4bel@gmail.com>
+Date: Fri, 8 May 2026 17:53:09 +0900
+Subject: rxrpc: Also unshare DATA/RESPONSE packets when paged frags are present
+
+From: Hyunwoo Kim <imv4bel@gmail.com>
+
+commit aa54b1d27fe0c2b78e664a34fd0fdf7cd1960d71 upstream.
+
+The DATA-packet handler in rxrpc_input_call_event() and the RESPONSE
+handler in rxrpc_verify_response() copy the skb to a linear one before
+calling into the security ops only when skb_cloned() is true. An skb
+that is not cloned but still carries externally-owned paged fragments
+(e.g. SKBFL_SHARED_FRAG set by splice() into a UDP socket via
+__ip_append_data, or a chained skb_has_frag_list()) falls through to
+the in-place decryption path, which binds the frag pages directly into
+the AEAD/skcipher SGL via skb_to_sgvec().
+
+Extend the gate to also unshare when skb_has_frag_list() or
+skb_has_shared_frag() is true. This catches the splice-loopback vector
+and other externally-shared frag sources while preserving the
+zero-copy fast path for skbs whose frags are kernel-private (e.g. NIC
+page_pool RX, GRO). The OOM/trace handling already in place is reused.
+
+Fixes: d0d5c0cd1e71 ("rxrpc: Use skb_unshare() rather than skb_cow_data()")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hyunwoo Kim <imv4bel@gmail.com>
+Reviewed-by: Jiayuan Chen <jiayuan.chen@linux.dev>
+Acked-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/rxrpc/call_event.c | 4 +++-
+ net/rxrpc/conn_event.c | 3 ++-
+ 2 files changed, 5 insertions(+), 2 deletions(-)
+
+--- a/net/rxrpc/call_event.c
++++ b/net/rxrpc/call_event.c
+@@ -461,7 +461,9 @@ bool rxrpc_input_call_event(struct rxrpc
+
+ if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
+ sp->hdr.securityIndex != 0 &&
+- skb_cloned(skb)) {
++ (skb_cloned(skb) ||
++ skb_has_frag_list(skb) ||
++ skb_has_shared_frag(skb))) {
+ /* Unshare the packet so that it can be modified by
+ * in-place decryption.
+ */
+--- a/net/rxrpc/conn_event.c
++++ b/net/rxrpc/conn_event.c
+@@ -231,7 +231,8 @@ static int rxrpc_verify_response(struct
+ {
+ int ret;
+
+- if (skb_cloned(skb)) {
++ if (skb_cloned(skb) || skb_has_frag_list(skb) ||
++ skb_has_shared_frag(skb)) {
+ /* Copy the packet if shared so that we can do in-place
+ * decryption.
+ */
spi-microchip-core-qspi-fix-controller-deregistration.patch
fbcon-avoid-oob-font-access-if-console-rotation-fails.patch
rxrpc-fix-conn-level-packet-handling-to-unshare-response-packets.patch
+bluetooth-l2cap-fix-null-ptr-deref-in-l2cap_sock_get_sndtimeo_cb.patch
+bonding-fix-use-after-free-due-to-enslave-fail-after-slave-array-update.patch
+mm-damon-core-disallow-time-quota-setting-zero-esz.patch
+rxrpc-also-unshare-data-response-packets-when-paged-frags-are-present.patch
+mm-damon-core-implement-damon_kdamond_pid.patch
+mm-damon-lru_sort-detect-and-use-fresh-enabled-and-kdamond_pid-values.patch
+usb-typec-tcpm-reset-internal-port-states-on-soft-reset-ams.patch
+mm-damon-reclaim-detect-and-use-fresh-enabled-and-kdamond_pid-values.patch
+ksmbd-fix-use-after-free-in-__ksmbd_close_fd-via-durable-scavenger.patch
+mtd-spi-nor-sst-factor-out-common-write-operation-to-sst_nor_write_data.patch
+mtd-spi-nor-sst-fix-write-enable-before-aai-sequence.patch
+pwm-imx-tpm-count-the-number-of-enabled-channels-in-probe.patch
+batman-adv-stop-tp_meter-sessions-during-mesh-teardown.patch
+batman-adv-tp_meter-fix-tp_num-leak-on-kmalloc-failure.patch
+btrfs-fix-btrfs_ioctl_space_info-slot_count-toctou-which-can-lead-to-info-leak.patch
+tracing-probes-limit-size-of-event-probe-to-3k.patch
+usb-dwc3-move-guid-programming-after-phy-initialization.patch
+ceph-only-d_add-negative-dentries-when-they-are-unhashed.patch
+kvm-arm64-wake-up-from-wfi-when-iqrchip-is-in-userspace.patch
--- /dev/null
+From stable+bounces-247730-greg=kroah.com@vger.kernel.org Fri May 15 14:40:38 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 May 2026 08:06:51 -0400
+Subject: tracing/probes: Limit size of event probe to 3K
+To: stable@vger.kernel.org
+Cc: Steven Rostedt <rostedt@goodmis.org>, Mathieu Desnoyers <mathieu.desnoyers@efficios.com>, "Masami Hiramatsu (Google)" <mhiramat@kernel.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260515120651.3074726-1-sashal@kernel.org>
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+[ Upstream commit b2aa3b4d64e460ac606f386c24e7d8a873ce6f1a ]
+
+There currently isn't a max limit an event probe can be. One could make an
+event greater than PAGE_SIZE, which makes the event useless because if
+it's bigger than the max event that can be recorded into the ring buffer,
+then it will never be recorded.
+
+A event probe should never need to be greater than 3K, so make that the
+max size. As long as the max is less than the max that can be recorded
+onto the ring buffer, it should be fine.
+
+Cc: stable@vger.kernel.org
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Acked-by: Masami Hiramatsu (Google) <mhiramat@kernel.org>
+Fixes: 93ccae7a22274 ("tracing/kprobes: Support basic types on dynamic events")
+Link: https://patch.msgid.link/20260428122302.706610ba@gandalf.local.home
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+[ adjusted context to place MAX_PROBE_EVENT_SIZE near MAX_STRING_SIZE and appended EVENT_TOO_BIG after NEED_STRING_TYPE ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/trace/trace_probe.c | 6 ++++++
+ kernel/trace/trace_probe.h | 4 +++-
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c
+index d46a1033ba5b3..dee9494ed189a 100644
+--- a/kernel/trace/trace_probe.c
++++ b/kernel/trace/trace_probe.c
+@@ -1366,6 +1366,12 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size,
+ parg->offset = *size;
+ *size += parg->type->size * (parg->count ?: 1);
+
++ if (*size > MAX_PROBE_EVENT_SIZE) {
++ ret = -E2BIG;
++ trace_probe_log_err(ctx->offset, EVENT_TOO_BIG);
++ goto fail;
++ }
++
+ if (parg->count) {
+ len = strlen(parg->type->fmttype) + 6;
+ parg->fmt = kmalloc(len, GFP_KERNEL);
+diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h
+index c71fa9c2f3815..ce5a0935cd45c 100644
+--- a/kernel/trace/trace_probe.h
++++ b/kernel/trace/trace_probe.h
+@@ -35,6 +35,7 @@
+ #define MAX_ARG_NAME_LEN 32
+ #define MAX_BTF_ARGS_LEN 128
+ #define MAX_STRING_SIZE PATH_MAX
++#define MAX_PROBE_EVENT_SIZE 3072
+
+ /* Reserved field names */
+ #define FIELD_STRING_IP "__probe_ip"
+@@ -546,7 +547,8 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call,
+ C(NO_BTF_FIELD, "This field is not found."), \
+ C(BAD_BTF_TID, "Failed to get BTF type info."),\
+ C(BAD_TYPE4STR, "This type does not fit for string."),\
+- C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),
++ C(NEED_STRING_TYPE, "$comm and immediate-string only accepts string type"),\
++ C(EVENT_TOO_BIG, "Event too big (too many fields?)"),
+
+ #undef C
+ #define C(a, b) TP_ERR_##a
+--
+2.53.0
+
--- /dev/null
+From sashal@kernel.org Wed May 13 15:11:20 2026
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 May 2026 09:11:17 -0400
+Subject: usb: dwc3: Move GUID programming after PHY initialization
+To: stable@vger.kernel.org
+Cc: Selvarasu Ganesan <selvarasu.g@samsung.com>, stable <stable@kernel.org>, Pritam Manohar Sutar <pritam.sutar@samsung.com>, Thinh Nguyen <Thinh.Nguyen@synopsys.com>, Greg Kroah-Hartman <gregkh@linuxfoundation.org>, Sasha Levin <sashal@kernel.org>
+Message-ID: <20260513131117.3723171-1-sashal@kernel.org>
+
+From: Selvarasu Ganesan <selvarasu.g@samsung.com>
+
+[ Upstream commit aad35f9c926ec220b0742af1ada45666ae667956 ]
+
+The Linux Version Code is currently written to the GUID register before
+PHY initialization. Certain PHY implementations (such as Synopsys eUSB
+PHY performing link_sw_reset) clear the GUID register to its default
+value during initialization, causing the kernel version information to
+be lost.
+
+Move the GUID register programming to occur after PHY initialization
+completes to ensure the Linux version information persists.
+
+Fixes: fa0ea13e9f1c ("usb: dwc3: core: write LINUX_VERSION_CODE to our GUID register")
+Cc: stable <stable@kernel.org>
+Reported-by: Pritam Manohar Sutar <pritam.sutar@samsung.com>
+Signed-off-by: Selvarasu Ganesan <selvarasu.g@samsung.com>
+Acked-by: Thinh Nguyen <Thinh.Nguyen@synopsys.com>
+Link: https://patch.msgid.link/20260417063314.2359-1-selvarasu.g@samsung.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+[ adapted dwc3_writel(dwc, ...) to dwc3_writel(dwc->regs, ...) ]
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/dwc3/core.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/usb/dwc3/core.c
++++ b/drivers/usb/dwc3/core.c
+@@ -1240,12 +1240,6 @@ static int dwc3_core_init(struct dwc3 *d
+
+ hw_mode = DWC3_GHWPARAMS0_MODE(dwc->hwparams.hwparams0);
+
+- /*
+- * Write Linux Version Code to our GUID register so it's easy to figure
+- * out which kernel version a bug was found.
+- */
+- dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
+-
+ ret = dwc3_phy_setup(dwc);
+ if (ret)
+ return ret;
+@@ -1277,6 +1271,12 @@ static int dwc3_core_init(struct dwc3 *d
+ if (ret)
+ goto err_exit_phy;
+
++ /*
++ * Write Linux Version Code to our GUID register so it's easy to figure
++ * out which kernel version a bug was found.
++ */
++ dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE);
++
+ dwc3_core_setup_global_control(dwc);
+ dwc3_core_num_eps(dwc);
+
--- /dev/null
+From 2909f0d4994fb4306bf116df5ccee797791fce2c Mon Sep 17 00:00:00 2001
+From: Amit Sunil Dhamne <amitsd@google.com>
+Date: Tue, 14 Apr 2026 00:58:32 +0000
+Subject: usb: typec: tcpm: reset internal port states on soft reset AMS
+
+From: Amit Sunil Dhamne <amitsd@google.com>
+
+commit 2909f0d4994fb4306bf116df5ccee797791fce2c upstream.
+
+Reset internal port states (such as vdm_sm_running and
+explicit_contract) on soft reset AMS as the port needs to negotiate a
+new contract. The consequence of leaving the states in as-is cond are as
+follows:
+ * port is in SRC power role and an explicit contract is negotiated
+ with the port partner (in sink role)
+ * port partner sends a Soft Reset AMS while VDM State Machine is
+ running
+ * port accepts the Soft Reset request and the port advertises src caps
+ * port partner sends a Request message but since the explicit_contract
+ and vdm_sm_running are true from previous negotiation, the port ends
+ up sending Soft Reset instead of Accept msg.
+
+Stub Log:
+[ 203.653942] AMS DISCOVER_IDENTITY start
+[ 203.653947] PD TX, header: 0x176f
+[ 203.655901] PD TX complete, status: 0
+[ 203.657470] PD RX, header: 0x124f [1]
+[ 203.657477] Rx VDM cmd 0xff008081 type 2 cmd 1 len 1
+[ 203.657482] AMS DISCOVER_IDENTITY finished
+[ 203.657484] cc:=4
+[ 204.155698] PD RX, header: 0x144f [1]
+[ 204.155718] Rx VDM cmd 0xeeee8001 type 0 cmd 1 len 1
+[ 204.155741] PD TX, header: 0x196f
+[ 204.157622] PD TX complete, status: 0
+[ 204.160060] PD RX, header: 0x4d [1]
+[ 204.160066] state change SRC_READY -> SOFT_RESET [rev2 SOFT_RESET_AMS]
+[ 204.160076] PD TX, header: 0x163
+[ 204.162486] PD TX complete, status: 0
+[ 204.162832] AMS SOFT_RESET_AMS finished
+[ 204.162840] cc:=4
+[ 204.162891] AMS POWER_NEGOTIATION start
+[ 204.162896] state change SOFT_RESET -> AMS_START [rev2 POWER_NEGOTIATION]
+[ 204.162908] state change AMS_START -> SRC_SEND_CAPABILITIES [rev2 POWER_NEGOTIATION]
+[ 204.162913] PD TX, header: 0x1361
+[ 204.165529] PD TX complete, status: 0
+[ 204.165571] pending state change SRC_SEND_CAPABILITIES -> SRC_SEND_CAPABILITIES_TIMEOUT @ 60 ms [rev2 POWER_NEGOTIATION]
+[ 204.166996] PD RX, header: 0x1242 [1]
+[ 204.167009] state change SRC_SEND_CAPABILITIES -> SRC_SOFT_RESET_WAIT_SNK_TX [rev2 POWER_NEGOTIATION]
+[ 204.167019] AMS POWER_NEGOTIATION finished
+[ 204.167020] cc:=4
+[ 204.167083] AMS SOFT_RESET_AMS start
+[ 204.167086] state change SRC_SOFT_RESET_WAIT_SNK_TX -> SOFT_RESET_SEND [rev2 SOFT_RESET_AMS]
+[ 204.167092] PD TX, header: 0x16d
+[ 204.168824] PD TX complete, status: 0
+[ 204.168854] pending state change SOFT_RESET_SEND -> HARD_RESET_SEND @ 60 ms [rev2 SOFT_RESET_AMS]
+[ 204.171876] PD RX, header: 0x43 [1]
+[ 204.171879] AMS SOFT_RESET_AMS finished
+
+This causes COMMON.PROC.PD.11.2 check failure for
+TEST.PD.VDM.SRC.2_Rev2Src test on the PD compliance tester.
+
+Signed-off-by: Amit Sunil Dhamne <amitsd@google.com>
+Fixes: 8d3a0578ad1a ("usb: typec: tcpm: Respond Wait if VDM state machine is running")
+Fixes: f0690a25a140 ("staging: typec: USB Type-C Port Manager (tcpm)")
+Cc: stable <stable@kernel.org>
+Reviewed-by: Badhri Jagan Sridharan <badhri@google.com>
+Acked-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
+Link: https://patch.msgid.link/20260414-fix-soft-reset-v1-1-01d7cb9764e2@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/usb/typec/tcpm/tcpm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/usb/typec/tcpm/tcpm.c
++++ b/drivers/usb/typec/tcpm/tcpm.c
+@@ -4610,6 +4610,8 @@ static void run_state_machine(struct tcp
+ usb_power_delivery_unregister_capabilities(port->partner_source_caps);
+ port->partner_source_caps = NULL;
+ tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
++ port->vdm_sm_running = false;
++ port->explicit_contract = false;
+ tcpm_ams_finish(port);
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = SRC_SEND_CAPABILITIES;