--- /dev/null
+From 9b1dbd69ba6f8f8c69bc7b77c2ce3b9c6ed05ba6 Mon Sep 17 00:00:00 2001
+From: Mehul Rao <mehulrao@gmail.com>
+Date: Thu, 5 Mar 2026 14:35:07 -0500
+Subject: ALSA: pcm: fix use-after-free on linked stream runtime in snd_pcm_drain()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Mehul Rao <mehulrao@gmail.com>
+
+commit 9b1dbd69ba6f8f8c69bc7b77c2ce3b9c6ed05ba6 upstream.
+
+In the drain loop, the local variable 'runtime' is reassigned to a
+linked stream's runtime (runtime = s->runtime at line 2157). After
+releasing the stream lock at line 2169, the code accesses
+runtime->no_period_wakeup, runtime->rate, and runtime->buffer_size
+(lines 2170-2178) — all referencing the linked stream's runtime without
+any lock or refcount protecting its lifetime.
+
+A concurrent close() on the linked stream's fd triggers
+snd_pcm_release_substream() → snd_pcm_drop() → pcm_release_private()
+→ snd_pcm_unlink() → snd_pcm_detach_substream() → kfree(runtime).
+No synchronization prevents kfree(runtime) from completing while the
+drain path dereferences the stale pointer.
+
+Fix by caching the needed runtime fields (no_period_wakeup, rate,
+buffer_size) into local variables while still holding the stream lock,
+and using the cached values after the lock is released.
+
+Fixes: f2b3614cefb6 ("ALSA: PCM - Don't check DMA time-out too shortly")
+Cc: stable@vger.kernel.org
+Signed-off-by: Mehul Rao <mehulrao@gmail.com>
+Link: https://patch.msgid.link/20260305193508.311096-1-mehulrao@gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/core/pcm_native.c | 19 ++++++++++++++++---
+ 1 file changed, 16 insertions(+), 3 deletions(-)
+
+--- a/sound/core/pcm_native.c
++++ b/sound/core/pcm_native.c
+@@ -2144,6 +2144,10 @@ static int snd_pcm_drain(struct snd_pcm_
+ for (;;) {
+ long tout;
+ struct snd_pcm_runtime *to_check;
++ unsigned int drain_rate;
++ snd_pcm_uframes_t drain_bufsz;
++ bool drain_no_period_wakeup;
++
+ if (signal_pending(current)) {
+ result = -ERESTARTSYS;
+ break;
+@@ -2163,16 +2167,25 @@ static int snd_pcm_drain(struct snd_pcm_
+ snd_pcm_group_unref(group, substream);
+ if (!to_check)
+ break; /* all drained */
++ /*
++ * Cache the runtime fields needed after unlock.
++ * A concurrent close() on the linked stream may free
++ * its runtime via snd_pcm_detach_substream() once we
++ * release the stream lock below.
++ */
++ drain_no_period_wakeup = to_check->no_period_wakeup;
++ drain_rate = to_check->rate;
++ drain_bufsz = to_check->buffer_size;
+ init_waitqueue_entry(&wait, current);
+ set_current_state(TASK_INTERRUPTIBLE);
+ add_wait_queue(&to_check->sleep, &wait);
+ snd_pcm_stream_unlock_irq(substream);
+- if (runtime->no_period_wakeup)
++ if (drain_no_period_wakeup)
+ tout = MAX_SCHEDULE_TIMEOUT;
+ else {
+ tout = 100;
+- if (runtime->rate) {
+- long t = runtime->buffer_size * 1100 / runtime->rate;
++ if (drain_rate) {
++ long t = drain_bufsz * 1100 / drain_rate;
+ tout = max(t, tout);
+ }
+ tout = msecs_to_jiffies(tout);
--- /dev/null
+From df1d8abf36ca3681c21a6809eaa9a1e01ef897a6 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Mon, 9 Mar 2026 11:46:27 +0100
+Subject: ALSA: usb-audio: Check endpoint numbers at parsing Scarlett2 mixer interfaces
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit df1d8abf36ca3681c21a6809eaa9a1e01ef897a6 upstream.
+
+The Scarlett2 mixer quirk in USB-audio driver may hit a NULL
+dereference when a malformed USB descriptor is passed, since it
+assumes the presence of an endpoint in the parsed interface in
+scarlett2_find_fc_interface(), as reported by fuzzer.
+
+For avoiding the NULL dereference, just add the sanity check of
+bNumEndpoints and skip the invalid interface.
+
+Reported-by: syzbot+8f29539ef9a1c8334f42@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/69acbbe1.050a0220.310d8.0001.GAE@google.com
+Reported-by: syzbot+ae893a8901067fde2741@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/69acf72a.050a0220.310d8.0004.GAE@google.com
+Cc: <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20260309104632.141895-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/usb/mixer_scarlett2.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/sound/usb/mixer_scarlett2.c
++++ b/sound/usb/mixer_scarlett2.c
+@@ -8251,6 +8251,8 @@ static int scarlett2_find_fc_interface(s
+
+ if (desc->bInterfaceClass != 255)
+ continue;
++ if (desc->bNumEndpoints < 1)
++ continue;
+
+ epd = get_endpoint(intf->altsetting, 0);
+ private->bInterfaceNumber = desc->bInterfaceNumber;
--- /dev/null
+From 2df6162785f31f1bbb598cfc3b08e4efc88f80b6 Mon Sep 17 00:00:00 2001
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+Date: Thu, 19 Feb 2026 13:57:34 +0100
+Subject: can: gs_usb: gs_can_open(): always configure bitrates before starting device
+
+From: Marc Kleine-Budde <mkl@pengutronix.de>
+
+commit 2df6162785f31f1bbb598cfc3b08e4efc88f80b6 upstream.
+
+So far the driver populated the struct can_priv::do_set_bittiming() and
+struct can_priv::fd::do_set_data_bittiming() callbacks.
+
+Before bringing up the interface, user space has to configure the bitrates.
+With these callbacks the configuration is directly forwarded into the CAN
+hardware. Then the interface can be brought up.
+
+An ifdown-ifup cycle (without changing the bit rates) doesn't re-configure
+the bitrates in the CAN hardware. This leads to a problem with the
+CANable-2.5 [1] firmware, which resets the configured bit rates during
+ifdown.
+
+To fix the problem remove both bit timing callbacks and always configure
+the bitrates in the struct net_device_ops::ndo_open() callback.
+
+[1] https://github.com/Elmue/CANable-2.5-firmware-Slcan-and-Candlelight
+
+Cc: stable@vger.kernel.org
+Fixes: d08e973a77d1 ("can: gs_usb: Added support for the GS_USB CAN devices")
+Link: https://patch.msgid.link/20260219-gs_usb-always-configure-bitrates-v2-1-671f8ba5b0a5@pengutronix.de
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/can/usb/gs_usb.c | 22 ++++++++++++++++------
+ 1 file changed, 16 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/can/usb/gs_usb.c
++++ b/drivers/net/can/usb/gs_usb.c
+@@ -772,9 +772,8 @@ device_detach:
+ }
+ }
+
+-static int gs_usb_set_bittiming(struct net_device *netdev)
++static int gs_usb_set_bittiming(struct gs_can *dev)
+ {
+- struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.bittiming;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+@@ -791,9 +790,8 @@ static int gs_usb_set_bittiming(struct n
+ GFP_KERNEL);
+ }
+
+-static int gs_usb_set_data_bittiming(struct net_device *netdev)
++static int gs_usb_set_data_bittiming(struct gs_can *dev)
+ {
+- struct gs_can *dev = netdev_priv(netdev);
+ struct can_bittiming *bt = &dev->can.fd.data_bittiming;
+ struct gs_device_bittiming dbt = {
+ .prop_seg = cpu_to_le32(bt->prop_seg),
+@@ -1057,6 +1055,20 @@ static int gs_can_open(struct net_device
+ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP)
+ flags |= GS_CAN_MODE_HW_TIMESTAMP;
+
++ rc = gs_usb_set_bittiming(dev);
++ if (rc) {
++ netdev_err(netdev, "failed to set bittiming: %pe\n", ERR_PTR(rc));
++ goto out_usb_kill_anchored_urbs;
++ }
++
++ if (ctrlmode & CAN_CTRLMODE_FD) {
++ rc = gs_usb_set_data_bittiming(dev);
++ if (rc) {
++ netdev_err(netdev, "failed to set data bittiming: %pe\n", ERR_PTR(rc));
++ goto out_usb_kill_anchored_urbs;
++ }
++ }
++
+ /* finally start device */
+ dev->can.state = CAN_STATE_ERROR_ACTIVE;
+ dm.flags = cpu_to_le32(flags);
+@@ -1357,7 +1369,6 @@ static struct gs_can *gs_make_candev(uns
+ dev->can.state = CAN_STATE_STOPPED;
+ dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can);
+ dev->can.bittiming_const = &dev->bt_const;
+- dev->can.do_set_bittiming = gs_usb_set_bittiming;
+
+ dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC;
+
+@@ -1381,7 +1392,6 @@ static struct gs_can *gs_make_candev(uns
+ * GS_CAN_FEATURE_BT_CONST_EXT is set.
+ */
+ dev->can.fd.data_bittiming_const = &dev->bt_const;
+- dev->can.fd.do_set_data_bittiming = gs_usb_set_data_bittiming;
+ }
+
+ if (feature & GS_CAN_FEATURE_TERMINATION) {
--- /dev/null
+From 5ee01f1a7343d6a3547b6802ca2d4cdce0edacb1 Mon Sep 17 00:00:00 2001
+From: Qingye Zhao <zhaoqingye@honor.com>
+Date: Wed, 11 Feb 2026 09:24:04 +0000
+Subject: cgroup: fix race between task migration and iteration
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Qingye Zhao <zhaoqingye@honor.com>
+
+commit 5ee01f1a7343d6a3547b6802ca2d4cdce0edacb1 upstream.
+
+When a task is migrated out of a css_set, cgroup_migrate_add_task()
+first moves it from cset->tasks to cset->mg_tasks via:
+
+ list_move_tail(&task->cg_list, &cset->mg_tasks);
+
+If a css_task_iter currently has it->task_pos pointing to this task,
+css_set_move_task() calls css_task_iter_skip() to keep the iterator
+valid. However, since the task has already been moved to ->mg_tasks,
+the iterator is advanced relative to the mg_tasks list instead of the
+original tasks list. As a result, remaining tasks on cset->tasks, as
+well as tasks queued on cset->mg_tasks, can be skipped by iteration.
+
+Fix this by calling css_set_skip_task_iters() before unlinking
+task->cg_list from cset->tasks. This advances all active iterators to
+the next task on cset->tasks, so iteration continues correctly even
+when a task is concurrently being migrated.
+
+This race is hard to hit in practice without instrumentation, but it
+can be reproduced by artificially slowing down cgroup_procs_show().
+For example, on an Android device a temporary
+/sys/kernel/cgroup/cgroup_test knob can be added to inject a delay
+into cgroup_procs_show(), and then:
+
+ 1) Spawn three long-running tasks (PIDs 101, 102, 103).
+ 2) Create a test cgroup and move the tasks into it.
+ 3) Enable a large delay via /sys/kernel/cgroup/cgroup_test.
+ 4) In one shell, read cgroup.procs from the test cgroup.
+ 5) Within the delay window, in another shell migrate PID 102 by
+ writing it to a different cgroup.procs file.
+
+Under this setup, cgroup.procs can intermittently show only PID 101
+while skipping PID 103. Once the migration completes, reading the
+file again shows all tasks as expected.
+
+Note that this change does not allow removing the existing
+css_set_skip_task_iters() call in css_set_move_task(). The new call
+in cgroup_migrate_add_task() only handles iterators that are racing
+with migration while the task is still on cset->tasks. Iterators may
+also start after the task has been moved to cset->mg_tasks. If we
+dropped css_set_skip_task_iters() from css_set_move_task(), such
+iterators could keep task_pos pointing to a migrating task, causing
+css_task_iter_advance() to malfunction on the destination css_set,
+up to and including crashes or infinite loops.
+
+The race window between migration and iteration is very small, and
+css_task_iter is not on a hot path. In the worst case, when an
+iterator is positioned on the first thread of the migrating process,
+cgroup_migrate_add_task() may have to skip multiple tasks via
+css_set_skip_task_iters(). However, this only happens when migration
+and iteration actually race, so the performance impact is negligible
+compared to the correctness fix provided here.
+
+Fixes: b636fd38dc40 ("cgroup: Implement css_task_iter_skip()")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Qingye Zhao <zhaoqingye@honor.com>
+Reviewed-by: Michal Koutný <mkoutny@suse.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/cgroup/cgroup.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -2611,6 +2611,7 @@ static void cgroup_migrate_add_task(stru
+
+ mgctx->tset.nr_tasks++;
+
++ css_set_skip_task_iters(cset, task);
+ list_move_tail(&task->cg_list, &cset->mg_tasks);
+ if (list_empty(&cset->mg_node))
+ list_add_tail(&cset->mg_node,
--- /dev/null
+From e4f774a0cc955ce762aec91c66915a6e15087ab7 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Thu, 5 Mar 2026 15:34:26 +0100
+Subject: net: usb: lan78xx: fix silent drop of packets with checksum errors
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit e4f774a0cc955ce762aec91c66915a6e15087ab7 upstream.
+
+Do not drop packets with checksum errors at the USB driver level;
+pass them to the network stack.
+
+Previously, the driver dropped all packets where the 'Receive Error
+Detected' (RED) bit was set, regardless of the specific error type. This
+caused packets with only IP or TCP/UDP checksum errors to be dropped
+before reaching the kernel, preventing the network stack from accounting
+for them or performing software fallback.
+
+Add a mask for hard hardware errors to safely drop genuinely corrupt
+frames, while allowing checksum-errored frames to pass with their
+ip_summed field explicitly set to CHECKSUM_NONE.
+
+Fixes: 55d7de9de6c3 ("Microchip's LAN7800 family USB 2/3 to 10/100/1000 Ethernet device driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/20260305143429.530909-2-o.rempel@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/lan78xx.c | 4 +++-
+ drivers/net/usb/lan78xx.h | 3 +++
+ 2 files changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3829,6 +3829,7 @@ static void lan78xx_rx_csum_offload(stru
+ */
+ if (!(dev->net->features & NETIF_F_RXCSUM) ||
+ unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
++ unlikely(rx_cmd_a & RX_CMD_A_CSE_MASK_) ||
+ ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+ !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
+ skb->ip_summed = CHECKSUM_NONE;
+@@ -3901,7 +3902,8 @@ static int lan78xx_rx(struct lan78xx_net
+ return 0;
+ }
+
+- if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
++ if (unlikely(rx_cmd_a & RX_CMD_A_RED_) &&
++ (rx_cmd_a & RX_CMD_A_RX_HARD_ERRS_MASK_)) {
+ netif_dbg(dev, rx_err, dev->net,
+ "Error rx_cmd_a=0x%08x", rx_cmd_a);
+ } else {
+--- a/drivers/net/usb/lan78xx.h
++++ b/drivers/net/usb/lan78xx.h
+@@ -74,6 +74,9 @@
+ #define RX_CMD_A_ICSM_ (0x00004000)
+ #define RX_CMD_A_LEN_MASK_ (0x00003FFF)
+
++#define RX_CMD_A_RX_HARD_ERRS_MASK_ \
++ (RX_CMD_A_RX_ERRS_MASK_ & ~RX_CMD_A_CSE_MASK_)
++
+ /* Rx Command B */
+ #define RX_CMD_B_CSUM_SHIFT_ (16)
+ #define RX_CMD_B_CSUM_MASK_ (0xFFFF0000)
--- /dev/null
+From 50988747c30df47b73b787f234f746027cb7ec6c Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Thu, 5 Mar 2026 15:34:27 +0100
+Subject: net: usb: lan78xx: fix TX byte statistics for small packets
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit 50988747c30df47b73b787f234f746027cb7ec6c upstream.
+
+Account for hardware auto-padding in TX byte counters to reflect actual
+wire traffic.
+
+The LAN7850 hardware automatically pads undersized frames to the minimum
+Ethernet frame length (ETH_ZLEN, 60 bytes). However, the driver tracks
+the network statistics based on the unpadded socket buffer length. This
+results in the tx_bytes counter under-reporting the actual physical
+bytes placed on the Ethernet wire for small packets (like short ARP or
+ICMP requests).
+
+Use max_t() to ensure the transmission statistics accurately account for
+the hardware-generated padding.
+
+Fixes: d383216a7efe ("lan78xx: Introduce Tx URB processing improvements")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/20260305143429.530909-3-o.rempel@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/lan78xx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -4178,7 +4178,7 @@ static struct skb_data *lan78xx_tx_buf_f
+ }
+
+ tx_data += len;
+- entry->length += len;
++ entry->length += max_t(unsigned int, len, ETH_ZLEN);
+ entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
+
+ dev_kfree_skb_any(skb);
--- /dev/null
+From 312c816c6bc30342bc30dca0d6db617ab4d3ae4e Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Thu, 5 Mar 2026 15:34:29 +0100
+Subject: net: usb: lan78xx: fix WARN in __netif_napi_del_locked on disconnect
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit 312c816c6bc30342bc30dca0d6db617ab4d3ae4e upstream.
+
+Remove redundant netif_napi_del() call from disconnect path.
+
+A WARN may be triggered in __netif_napi_del_locked() during USB device
+disconnect:
+
+ WARNING: CPU: 0 PID: 11 at net/core/dev.c:7417 __netif_napi_del_locked+0x2b4/0x350
+
+This happens because netif_napi_del() is called in the disconnect path while
+NAPI is still enabled. However, it is not necessary to call netif_napi_del()
+explicitly, since unregister_netdev() will handle NAPI teardown automatically
+and safely. Removing the redundant call avoids triggering the warning.
+
+Full trace:
+ lan78xx 1-1:1.0 enu1: Failed to read register index 0x000000c4. ret = -ENODEV
+ lan78xx 1-1:1.0 enu1: Failed to set MAC down with error -ENODEV
+ lan78xx 1-1:1.0 enu1: Link is Down
+ lan78xx 1-1:1.0 enu1: Failed to read register index 0x00000120. ret = -ENODEV
+ ------------[ cut here ]------------
+ WARNING: CPU: 0 PID: 11 at net/core/dev.c:7417 __netif_napi_del_locked+0x2b4/0x350
+ Modules linked in: flexcan can_dev fuse
+ CPU: 0 UID: 0 PID: 11 Comm: kworker/0:1 Not tainted 6.16.0-rc2-00624-ge926949dab03 #9 PREEMPT
+ Hardware name: SKOV IMX8MP CPU revC - bd500 (DT)
+ Workqueue: usb_hub_wq hub_event
+ pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--)
+ pc : __netif_napi_del_locked+0x2b4/0x350
+ lr : __netif_napi_del_locked+0x7c/0x350
+ sp : ffffffc085b673c0
+ x29: ffffffc085b673c0 x28: ffffff800b7f2000 x27: ffffff800b7f20d8
+ x26: ffffff80110bcf58 x25: ffffff80110bd978 x24: 1ffffff0022179eb
+ x23: ffffff80110bc000 x22: ffffff800b7f5000 x21: ffffff80110bc000
+ x20: ffffff80110bcf38 x19: ffffff80110bcf28 x18: dfffffc000000000
+ x17: ffffffc081578940 x16: ffffffc08284cee0 x15: 0000000000000028
+ x14: 0000000000000006 x13: 0000000000040000 x12: ffffffb0022179e8
+ x11: 1ffffff0022179e7 x10: ffffffb0022179e7 x9 : dfffffc000000000
+ x8 : 0000004ffdde8619 x7 : ffffff80110bcf3f x6 : 0000000000000001
+ x5 : ffffff80110bcf38 x4 : ffffff80110bcf38 x3 : 0000000000000000
+ x2 : 0000000000000000 x1 : 1ffffff0022179e7 x0 : 0000000000000000
+ Call trace:
+ __netif_napi_del_locked+0x2b4/0x350 (P)
+ lan78xx_disconnect+0xf4/0x360
+ usb_unbind_interface+0x158/0x718
+ device_remove+0x100/0x150
+ device_release_driver_internal+0x308/0x478
+ device_release_driver+0x1c/0x30
+ bus_remove_device+0x1a8/0x368
+ device_del+0x2e0/0x7b0
+ usb_disable_device+0x244/0x540
+ usb_disconnect+0x220/0x758
+ hub_event+0x105c/0x35e0
+ process_one_work+0x760/0x17b0
+ worker_thread+0x768/0xce8
+ kthread+0x3bc/0x690
+ ret_from_fork+0x10/0x20
+ irq event stamp: 211604
+ hardirqs last enabled at (211603): [<ffffffc0828cc9ec>] _raw_spin_unlock_irqrestore+0x84/0x98
+ hardirqs last disabled at (211604): [<ffffffc0828a9a84>] el1_dbg+0x24/0x80
+ softirqs last enabled at (211296): [<ffffffc080095f10>] handle_softirqs+0x820/0xbc8
+ softirqs last disabled at (210993): [<ffffffc080010288>] __do_softirq+0x18/0x20
+ ---[ end trace 0000000000000000 ]---
+ lan78xx 1-1:1.0 enu1: failed to kill vid 0081/0
+
+Fixes: e110bc825897 ("net: usb: lan78xx: Convert to PHYLINK for improved PHY and MAC management")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/20260305143429.530909-5-o.rempel@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/lan78xx.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -4548,8 +4548,6 @@ static void lan78xx_disconnect(struct us
+ phylink_disconnect_phy(dev->phylink);
+ rtnl_unlock();
+
+- netif_napi_del(&dev->napi);
+-
+ unregister_netdev(net);
+
+ timer_shutdown_sync(&dev->stat_monitor);
--- /dev/null
+From d9cc0e440f0664f6f3e2c26e39ab9dd5f3badba7 Mon Sep 17 00:00:00 2001
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+Date: Thu, 5 Mar 2026 15:34:28 +0100
+Subject: net: usb: lan78xx: skip LTM configuration for LAN7850
+
+From: Oleksij Rempel <o.rempel@pengutronix.de>
+
+commit d9cc0e440f0664f6f3e2c26e39ab9dd5f3badba7 upstream.
+
+Do not configure Latency Tolerance Messaging (LTM) on USB 2.0 hardware.
+
+The LAN7850 is a High-Speed (USB 2.0) only device and does not support
+SuperSpeed features like LTM. Currently, the driver unconditionally
+attempts to configure LTM registers during initialization. On the
+LAN7850, these registers do not exist, resulting in writes to invalid
+or undocumented memory space.
+
+This issue was identified during a port to the regmap API with strict
+register validation enabled. While no functional issues or crashes have
+been observed from these invalid writes, bypassing LTM initialization
+on the LAN7850 ensures the driver strictly adheres to the hardware's
+valid register map.
+
+Fixes: 55d7de9de6c3 ("Microchip's LAN7800 family USB 2/3 to 10/100/1000 Ethernet device driver")
+Cc: stable@vger.kernel.org
+Signed-off-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/20260305143429.530909-4-o.rempel@pengutronix.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/usb/lan78xx.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/net/usb/lan78xx.c
++++ b/drivers/net/usb/lan78xx.c
+@@ -3119,6 +3119,10 @@ static int lan78xx_init_ltm(struct lan78
+ int ret;
+ u32 buf;
+
++ /* LAN7850 is USB 2.0 and does not support LTM */
++ if (dev->chipid == ID_REV_CHIP_ID_7850_)
++ return 0;
++
+ ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
+ if (ret < 0)
+ goto init_ltm_failed;
--- /dev/null
+From 592c61f3bfceaa29f8275696bd67c3dfad7ef72e Mon Sep 17 00:00:00 2001
+From: Miguel Ojeda <ojeda@kernel.org>
+Date: Thu, 12 Mar 2026 12:10:14 +0100
+Subject: rust: kbuild: allow `unused_features`
+
+From: Miguel Ojeda <ojeda@kernel.org>
+
+commit 592c61f3bfceaa29f8275696bd67c3dfad7ef72e upstream.
+
+Starting with the upcoming Rust 1.96.0 (to be released 2026-05-28),
+`rustc` introduces the new lint `unused_features` [1], which warns [2]:
+
+ warning: feature `used_with_arg` is declared but not used
+ --> <crate attribute>:1:93
+ |
+ 1 | #![feature(asm_const,asm_goto,arbitrary_self_types,lint_reasons,offset_of_nested,raw_ref_op,used_with_arg)]
+ | ^^^^^^^^^^^^^
+ |
+ = note: `#[warn(unused_features)]` (part of `#[warn(unused)]`) on by default
+
+The original goal of using `-Zcrate-attr` automatically was that there
+is a consistent set of features enabled and managed globally for all
+Rust kernel code (modulo exceptions like the `rust/` crated).
+
+While we could require crates to enable features manually (even if we
+still keep the `-Zallow-features=` list, i.e. removing the `-Zcrate-attr`
+list), it is not really worth making all developers worry about it just
+for a new lint.
+
+The features are expected to eventually become stable anyway (most already
+did), and thus having to remove features in every file that may use them
+is not worth it either.
+
+Thus just allow the new lint globally.
+
+The lint actually existed for a long time, which is why `rustc` does
+not complain about an unknown lint in the stable versions we support,
+but it was "disabled" years ago [3], and now it was made to work again.
+
+For extra context, the new implementation of the lint has already been
+improved to avoid linting about features that became stable thanks to
+Benno's report and the ensuing discussion [4] [5], but while that helps,
+it is still the case that we may have features enabled that are not used
+for one reason or another in a particular crate.
+
+Cc: stable@vger.kernel.org # Needed in 6.12.y and later (Rust is pinned in older LTSs).
+Link: https://github.com/rust-lang/rust/pull/152164 [1]
+Link: https://github.com/Rust-for-Linux/pin-init/pull/114 [2]
+Link: https://github.com/rust-lang/rust/issues/44232 [3]
+Link: https://github.com/rust-lang/rust/issues/153523 [4]
+Link: https://github.com/rust-lang/rust/pull/153610 [5]
+Reviewed-by: Benno Lossin <lossin@kernel.org>
+Reviewed-by: Gary Guo <gary@garyguo.net>
+Link: https://patch.msgid.link/20260312111014.74198-1-ojeda@kernel.org
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ Makefile | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/Makefile
++++ b/Makefile
+@@ -473,6 +473,7 @@ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
+ export rust_common_flags := --edition=2021 \
+ -Zbinary_dep_depinfo=y \
+ -Astable_features \
++ -Aunused_features \
+ -Dnon_ascii_idents \
+ -Dunsafe_op_in_unsafe_fn \
+ -Wmissing_docs \
--- /dev/null
+From 3ac88a9948792b092a4b11323e2abd1ecbe0cc68 Mon Sep 17 00:00:00 2001
+From: Alexandre Courbot <acourbot@nvidia.com>
+Date: Tue, 24 Feb 2026 11:25:34 +0900
+Subject: rust: str: make NullTerminatedFormatter public
+
+From: Alexandre Courbot <acourbot@nvidia.com>
+
+commit 3ac88a9948792b092a4b11323e2abd1ecbe0cc68 upstream.
+
+If `CONFIG_BLOCK` is disabled, the following warnings are displayed
+during build:
+
+ warning: struct `NullTerminatedFormatter` is never constructed
+ --> ../rust/kernel/str.rs:667:19
+ |
+ 667 | pub(crate) struct NullTerminatedFormatter<'a> {
+ | ^^^^^^^^^^^^^^^^^^^^^^^
+ |
+ = note: `#[warn(dead_code)]` (part of `#[warn(unused)]`) on by default
+
+ warning: associated function `new` is never used
+ --> ../rust/kernel/str.rs:673:19
+ |
+ 671 | impl<'a> NullTerminatedFormatter<'a> {
+ | ------------------------------------ associated function in this implementation
+ 672 | /// Create a new [`Self`] instance.
+ 673 | pub(crate) fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> {
+
+Fix them by making `NullTerminatedFormatter` public, as it could be
+useful for drivers anyway.
+
+Fixes: cdde7a1951ff ("rust: str: introduce `NullTerminatedFormatter`")
+Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: Andreas Hindborg <a.hindborg@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://patch.msgid.link/20260224-nullterminatedformatter-v1-1-5bef7b9b3d4c@nvidia.com
+Signed-off-by: Miguel Ojeda <ojeda@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ rust/kernel/str.rs | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/rust/kernel/str.rs
++++ b/rust/kernel/str.rs
+@@ -880,13 +880,13 @@ impl fmt::Write for Formatter<'_> {
+ ///
+ /// * The first byte of `buffer` is always zero.
+ /// * The length of `buffer` is at least 1.
+-pub(crate) struct NullTerminatedFormatter<'a> {
++pub struct NullTerminatedFormatter<'a> {
+ buffer: &'a mut [u8],
+ }
+
+ impl<'a> NullTerminatedFormatter<'a> {
+ /// Create a new [`Self`] instance.
+- pub(crate) fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> {
++ pub fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> {
+ *(buffer.first_mut()?) = 0;
+
+ // INVARIANT:
--- /dev/null
+From 4cb9e13fec0de7c942f5f927469beb8e48ddd20f Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Wed, 18 Feb 2026 11:53:27 +0000
+Subject: rust_binder: avoid reading the written value in offsets array
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit 4cb9e13fec0de7c942f5f927469beb8e48ddd20f upstream.
+
+When sending a transaction, its offsets array is first copied into the
+target proc's vma, and then the values are read back from there. This is
+normally fine because the vma is a read-only mapping, so the target
+process cannot change the value under us.
+
+However, if the target process somehow gains the ability to write to its
+own vma, it could change the offset before it's read back, causing the
+kernel to misinterpret what the sender meant. If the sender happens to
+send a payload with a specific shape, this could in the worst case lead
+to the receiver being able to privilege escalate into the sender.
+
+The intent is that gaining the ability to change the read-only vma of
+your own process should not be exploitable, so remove this TOCTOU read
+even though it's unexploitable without another Binder bug.
+
+Cc: stable <stable@kernel.org>
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Reported-by: Jann Horn <jannh@google.com>
+Reviewed-by: Jann Horn <jannh@google.com>
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Link: https://patch.msgid.link/20260218-binder-vma-check-v2-2-60f9d695a990@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/thread.rs | 17 ++++++-----------
+ 1 file changed, 6 insertions(+), 11 deletions(-)
+
+--- a/drivers/android/binder/thread.rs
++++ b/drivers/android/binder/thread.rs
+@@ -1018,12 +1018,9 @@ impl Thread {
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+- {
+- let mut reader =
+- UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+- .reader();
+- alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
+- }
++ let mut offsets_reader =
++ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
++ .reader();
+
+ let offsets_start = aligned_data_size;
+ let offsets_end = aligned_data_size + offsets_size;
+@@ -1044,11 +1041,9 @@ impl Thread {
+ .step_by(size_of::<u64>())
+ .enumerate()
+ {
+- let offset: usize = view
+- .alloc
+- .read::<u64>(index_offset)?
+- .try_into()
+- .map_err(|_| EINVAL)?;
++ let offset = offsets_reader.read::<u64>()?;
++ view.alloc.write(index_offset, &offset)?;
++ let offset: usize = offset.try_into().map_err(|_| EINVAL)?;
+
+ if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
+ pr_warn!("Got transaction with invalid offset.");
--- /dev/null
+From 2e303f0febb65a434040774b793ba8356698802b Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Tue, 24 Feb 2026 18:16:39 +0000
+Subject: rust_binder: call set_notification_done() without proc lock
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit 2e303f0febb65a434040774b793ba8356698802b upstream.
+
+Consider the following sequence of events on a death listener:
+1. The remote process dies and sends a BR_DEAD_BINDER message.
+2. The local process invokes the BC_CLEAR_DEATH_NOTIFICATION command.
+3. The local process then invokes the BC_DEAD_BINDER_DONE.
+Then, the kernel will reply to the BC_DEAD_BINDER_DONE command with a
+BR_CLEAR_DEATH_NOTIFICATION_DONE reply using push_work_if_looper().
+
+However, this can result in a deadlock if the current thread is not a
+looper. This is because dead_binder_done() still holds the proc lock
+during set_notification_done(), which called push_work_if_looper().
+Normally, push_work_if_looper() takes the thread lock, which is fine to
+take under the proc lock. But if the current thread is not a looper,
+then it falls back to delivering the reply to the process work queue,
+which involves taking the proc lock. Since the proc lock is already
+held, this is a deadlock.
+
+Fix this by releasing the proc lock during set_notification_done(). It
+was not intentional that it was held during that function to begin with.
+
+I don't think this ever happens in Android because BC_DEAD_BINDER_DONE
+is only invoked in response to BR_DEAD_BINDER messages, and the kernel
+always delivers BR_DEAD_BINDER to a looper. So there's no scenario where
+Android userspace will call BC_DEAD_BINDER_DONE on a non-looper thread.
+
+Cc: stable <stable@kernel.org>
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Reported-by: syzbot+c8287e65a57a89e7fb72@syzkaller.appspotmail.com
+Tested-by: syzbot+c8287e65a57a89e7fb72@syzkaller.appspotmail.com
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: Gary Guo <gary@garyguo.net>
+Reviewed-by: Andreas Hindborg <a.hindborg@kernel.org>
+Link: https://patch.msgid.link/20260224-binder-dead-binder-done-proc-lock-v1-1-bbe1b8a6e74a@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/process.rs | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/android/binder/process.rs
++++ b/drivers/android/binder/process.rs
+@@ -1259,7 +1259,8 @@ impl Process {
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
+- if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
++ let death = self.inner.lock().pull_delivered_death(cookie);
++ if let Some(death) = death {
+ death.set_notification_done(thread);
+ }
+ }
--- /dev/null
+From 8ef2c15aeae07647f530d30f6daaf79eb801bcd1 Mon Sep 17 00:00:00 2001
+From: Alice Ryhl <aliceryhl@google.com>
+Date: Wed, 18 Feb 2026 11:53:26 +0000
+Subject: rust_binder: check ownership before using vma
+
+From: Alice Ryhl <aliceryhl@google.com>
+
+commit 8ef2c15aeae07647f530d30f6daaf79eb801bcd1 upstream.
+
+When installing missing pages (or zapping them), Rust Binder will look
+up the vma in the mm by address, and then call vm_insert_page (or
+zap_page_range_single). However, if the vma is closed and replaced with
+a different vma at the same address, this can lead to Rust Binder
+installing pages into the wrong vma.
+
+By installing the page into a writable vma, it becomes possible to write
+to your own binder pages, which are normally read-only. Although you're
+not supposed to be able to write to those pages, the intent behind the
+design of Rust Binder is that even if you get that ability, it should not
+lead to anything bad. Unfortunately, due to another bug, that is not the
+case.
+
+To fix this, store a pointer in vm_private_data and check that the vma
+returned by vma_lookup() has the right vm_ops and vm_private_data before
+trying to use the vma. This should ensure that Rust Binder will refuse
+to interact with any other VMA. The plan is to introduce more vma
+abstractions to avoid this unsafe access to vm_ops and vm_private_data,
+but for now let's start with the simplest possible fix.
+
+C Binder performs the same check in a slightly different way: it
+provides a vm_ops->close that sets a boolean to true, then checks that
+boolean after calling vma_lookup(), but this is more fragile
+than the solution in this patch. (We probably still want to do both, but
+the vm_ops->close callback will be added later as part of the follow-up
+vma API changes.)
+
+It's still possible to remap the vma so that pages appear in the right
+vma, but at the wrong offset, but this is a separate issue and will be
+fixed when Rust Binder gets a vm_ops->close callback.
+
+Cc: stable <stable@kernel.org>
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Reported-by: Jann Horn <jannh@google.com>
+Reviewed-by: Jann Horn <jannh@google.com>
+Signed-off-by: Alice Ryhl <aliceryhl@google.com>
+Acked-by: Danilo Krummrich <dakr@kernel.org>
+Acked-by: Liam R. Howlett <Liam.Howlett@oracle.com>
+Link: https://patch.msgid.link/20260218-binder-vma-check-v2-1-60f9d695a990@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/page_range.rs | 83 ++++++++++++++++++++++++++---------
+ 1 file changed, 63 insertions(+), 20 deletions(-)
+
+--- a/drivers/android/binder/page_range.rs
++++ b/drivers/android/binder/page_range.rs
+@@ -142,6 +142,30 @@ pub(crate) struct ShrinkablePageRange {
+ _pin: PhantomPinned,
+ }
+
++// We do not define any ops. For now, used only to check identity of vmas.
++static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed();
++
++// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we
++// check its vm_ops and private data before using it.
++fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> {
++ // SAFETY: Just reading the vm_ops pointer of any active vma is safe.
++ let vm_ops = unsafe { (*vma.as_ptr()).vm_ops };
++ if !ptr::eq(vm_ops, &BINDER_VM_OPS) {
++ return None;
++ }
++
++ // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe.
++ let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data };
++ // The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once
++ // the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any
++ // VMA associated with it, so there can't be any false positives due to pointer reuse here.
++ if !ptr::eq(vm_private_data, owner.cast()) {
++ return None;
++ }
++
++ vma.as_mixedmap_vma()
++}
++
+ struct Inner {
+ /// Array of pages.
+ ///
+@@ -308,6 +332,18 @@ impl ShrinkablePageRange {
+ inner.size = num_pages;
+ inner.vma_addr = vma.start();
+
++ // This pointer is only used for comparison - it's not dereferenced.
++ //
++ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
++ // `vm_private_data`.
++ unsafe {
++ (*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::<c_void>()
++ };
++
++ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
++ // `vm_ops`.
++ unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS };
++
+ Ok(num_pages)
+ }
+
+@@ -399,22 +435,24 @@ impl ShrinkablePageRange {
+ //
+ // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
+ // workqueue.
+- MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
+- .mmap_read_lock()
+- .vma_lookup(vma_addr)
+- .ok_or(ESRCH)?
+- .as_mixedmap_vma()
+- .ok_or(ESRCH)?
+- .vm_insert_page(user_page_addr, &new_page)
+- .inspect_err(|err| {
+- pr_warn!(
+- "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
+- user_page_addr,
+- vma_addr,
+- i,
+- err
+- )
+- })?;
++ check_vma(
++ MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
++ .mmap_read_lock()
++ .vma_lookup(vma_addr)
++ .ok_or(ESRCH)?,
++ self,
++ )
++ .ok_or(ESRCH)?
++ .vm_insert_page(user_page_addr, &new_page)
++ .inspect_err(|err| {
++ pr_warn!(
++ "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
++ user_page_addr,
++ vma_addr,
++ i,
++ err
++ )
++ })?;
+
+ let inner = self.lock.lock();
+
+@@ -667,12 +705,15 @@ unsafe extern "C" fn rust_shrink_free_pa
+ let mmap_read;
+ let mm_mutex;
+ let vma_addr;
++ let range_ptr;
+
+ {
+ // CAST: The `list_head` field is first in `PageInfo`.
+ let info = item as *mut PageInfo;
+ // SAFETY: The `range` field of `PageInfo` is immutable.
+- let range = unsafe { &*((*info).range) };
++ range_ptr = unsafe { (*info).range };
++ // SAFETY: The `range` outlives its `PageInfo` values.
++ let range = unsafe { &*range_ptr };
+
+ mm = match range.mm.mmget_not_zero() {
+ Some(mm) => MmWithUser::into_mmput_async(mm),
+@@ -717,9 +758,11 @@ unsafe extern "C" fn rust_shrink_free_pa
+ // SAFETY: The lru lock is locked when this method is called.
+ unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
+
+- if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
+- let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+- vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
++ if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) {
++ if let Some(vma) = check_vma(unchecked_vma, range_ptr) {
++ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
++ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
++ }
+ }
+
+ drop(mmap_read);
--- /dev/null
+From 4fc87c240b8f30e22b7ebaae29d57105589e1c0b Mon Sep 17 00:00:00 2001
+From: Carlos Llamas <cmllamas@google.com>
+Date: Tue, 10 Feb 2026 23:28:20 +0000
+Subject: rust_binder: fix oneway spam detection
+
+From: Carlos Llamas <cmllamas@google.com>
+
+commit 4fc87c240b8f30e22b7ebaae29d57105589e1c0b upstream.
+
+The spam detection logic in TreeRange was executed before the current
+request was inserted into the tree. So the new request was not being
+factored in the spam calculation. Fix this by moving the logic after
+the new range has been inserted.
+
+Also, the detection logic for ArrayRange was missing altogether which
+meant large spamming transactions could get away without being detected.
+Fix this by implementing an equivalent low_oneway_space() in ArrayRange.
+
+Note that I looked into centralizing this logic in RangeAllocator but
+iterating through 'state' and 'size' got a bit too complicated (for me)
+and I abandoned this effort.
+
+Cc: stable <stable@kernel.org>
+Cc: Alice Ryhl <aliceryhl@google.com>
+Fixes: eafedbc7c050 ("rust_binder: add Rust Binder driver")
+Signed-off-by: Carlos Llamas <cmllamas@google.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Link: https://patch.msgid.link/20260210232949.3770644-1-cmllamas@google.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/android/binder/range_alloc/array.rs | 35 ++++++++++++++++++++++++++--
+ drivers/android/binder/range_alloc/mod.rs | 4 +--
+ drivers/android/binder/range_alloc/tree.rs | 18 +++++++-------
+ 3 files changed, 44 insertions(+), 13 deletions(-)
+
+--- a/drivers/android/binder/range_alloc/array.rs
++++ b/drivers/android/binder/range_alloc/array.rs
+@@ -118,7 +118,7 @@ impl<T> ArrayRangeAllocator<T> {
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+- ) -> Result<usize> {
++ ) -> Result<(usize, bool)> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+@@ -146,7 +146,38 @@ impl<T> ArrayRangeAllocator<T> {
+ .ok()
+ .unwrap();
+
+- Ok(insert_at_offset)
++ // Start detecting spammers once we have less than 20%
++ // of async space left (which is less than 10% of total
++ // buffer size).
++ //
++ // (This will short-circuit, so `low_oneway_space` is
++ // only called when necessary.)
++ let oneway_spam_detected =
++ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
++
++ Ok((insert_at_offset, oneway_spam_detected))
++ }
++
++ /// Find the amount and size of buffers allocated by the current caller.
++ ///
++ /// The idea is that once we cross the threshold, whoever is responsible
++ /// for the low async space is likely to try to send another async transaction,
++ /// and at some point we'll catch them in the act. This is more efficient
++ /// than keeping a map per pid.
++ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
++ let mut total_alloc_size = 0;
++ let mut num_buffers = 0;
++
++ // Warn if this pid has more than 50 transactions, or more than 50% of
++ // async space (which is 25% of total buffer size). Oneway spam is only
++ // detected when the threshold is exceeded.
++ for range in &self.ranges {
++ if range.state.is_oneway() && range.state.pid() == calling_pid {
++ total_alloc_size += range.size;
++ num_buffers += 1;
++ }
++ }
++ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+--- a/drivers/android/binder/range_alloc/mod.rs
++++ b/drivers/android/binder/range_alloc/mod.rs
+@@ -188,11 +188,11 @@ impl<T> RangeAllocator<T> {
+ self.reserve_new(args)
+ }
+ Impl::Array(array) => {
+- let offset =
++ let (offset, oneway_spam_detected) =
+ array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+- oneway_spam_detected: false,
++ oneway_spam_detected,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: args.tree_alloc,
+--- a/drivers/android/binder/range_alloc/tree.rs
++++ b/drivers/android/binder/range_alloc/tree.rs
+@@ -164,15 +164,6 @@ impl<T> TreeRangeAllocator<T> {
+ self.free_oneway_space
+ };
+
+- // Start detecting spammers once we have less than 20%
+- // of async space left (which is less than 10% of total
+- // buffer size).
+- //
+- // (This will short-circut, so `low_oneway_space` is
+- // only called when necessary.)
+- let oneway_spam_detected =
+- is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+-
+ let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
+ None => {
+ pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
+@@ -203,6 +194,15 @@ impl<T> TreeRangeAllocator<T> {
+ self.free_tree.insert(free_tree_node);
+ }
+
++ // Start detecting spammers once we have less than 20%
++ // of async space left (which is less than 10% of total
++ // buffer size).
++ //
++ // (This will short-circuit, so `low_oneway_space` is
++ // only called when necessary.)
++ let oneway_spam_detected =
++ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
++
+ Ok((found_off, oneway_spam_detected))
+ }
+
--- /dev/null
+From 1336b579f6079fb8520be03624fcd9ba443c930b Mon Sep 17 00:00:00 2001
+From: Cheng-Yang Chou <yphbchou0911@gmail.com>
+Date: Tue, 3 Mar 2026 22:35:30 +0800
+Subject: sched_ext: Remove redundant css_put() in scx_cgroup_init()
+
+From: Cheng-Yang Chou <yphbchou0911@gmail.com>
+
+commit 1336b579f6079fb8520be03624fcd9ba443c930b upstream.
+
+The iterator css_for_each_descendant_pre() walks the cgroup hierarchy
+under cgroup_lock(). It does not increment the reference counts on
+yielded css structs.
+
+According to the cgroup documentation, css_put() should only be used
+to release a reference obtained via css_get() or css_tryget_online().
+Since the iterator does not use either of these to acquire a reference,
+calling css_put() in the error path of scx_cgroup_init() causes a
+refcount underflow.
+
+Remove the unbalanced css_put() to prevent a potential Use-After-Free
+(UAF) vulnerability.
+
+Fixes: 819513666966 ("sched_ext: Add cgroup support")
+Cc: stable@vger.kernel.org # v6.12+
+Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com>
+Reviewed-by: Andrea Righi <arighi@nvidia.com>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/sched/ext.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/kernel/sched/ext.c
++++ b/kernel/sched/ext.c
+@@ -3450,7 +3450,6 @@ static int scx_cgroup_init(struct scx_sc
+ ret = SCX_CALL_OP_RET(sch, SCX_KF_UNLOCKED, cgroup_init, NULL,
+ css->cgroup, &args);
+ if (ret) {
+- css_put(css);
+ scx_error(sch, "ops.cgroup_init() failed (%d)", ret);
+ return ret;
+ }
iio-imu-inv-mpu9150-fix-irq-ack-preventing-irq-storm.patch
usb-gadget-f_mass_storage-fix-potential-integer-over.patch
drm-amdgpu-ensure-no_hw_access-is-visible-before-mmio.patch
+cgroup-fix-race-between-task-migration-and-iteration.patch
+sched_ext-remove-redundant-css_put-in-scx_cgroup_init.patch
+alsa-pcm-fix-use-after-free-on-linked-stream-runtime-in-snd_pcm_drain.patch
+alsa-usb-audio-check-endpoint-numbers-at-parsing-scarlett2-mixer-interfaces.patch
+can-gs_usb-gs_can_open-always-configure-bitrates-before-starting-device.patch
+net-usb-lan78xx-fix-silent-drop-of-packets-with-checksum-errors.patch
+net-usb-lan78xx-fix-tx-byte-statistics-for-small-packets.patch
+net-usb-lan78xx-fix-warn-in-__netif_napi_del_locked-on-disconnect.patch
+net-usb-lan78xx-skip-ltm-configuration-for-lan7850.patch
+rust_binder-fix-oneway-spam-detection.patch
+rust_binder-check-ownership-before-using-vma.patch
+rust_binder-avoid-reading-the-written-value-in-offsets-array.patch
+rust_binder-call-set_notification_done-without-proc-lock.patch
+rust-kbuild-allow-unused_features.patch
+rust-str-make-nullterminatedformatter-public.patch