--- /dev/null
+From 0001554d7d4f419e149c711c4c03cbb14374ab8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 09:51:24 +0530
+Subject: amd-xgbe: fix sleep while atomic on suspend/resume
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit e2f27363aa6d983504c6836dd0975535e2e9dba0 ]
+
+The xgbe_powerdown() and xgbe_powerup() functions use spinlocks
+(spin_lock_irqsave) while calling functions that may sleep:
+- napi_disable() can sleep waiting for NAPI polling to complete
+- flush_workqueue() can sleep waiting for pending work items
+
+This causes a "BUG: scheduling while atomic" error during suspend/resume
+cycles on systems using the AMD XGBE Ethernet controller.
+
+The spinlock protection in these functions is unnecessary as these
+functions are called from suspend/resume paths which are already serialized
+by the PM core
+
+Fix this by removing the spinlock. Since only code that takes this lock
+is xgbe_powerdown() and xgbe_powerup(), remove it completely.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260302042124.1386445-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ----------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 -
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ---
+ 3 files changed, 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 3de7674a84675..00f2df29ed617 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1181,7 +1181,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+@@ -1192,8 +1191,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+@@ -1209,8 +1206,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+
+ pdata->power_down = 1;
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+@@ -1220,7 +1215,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+@@ -1231,8 +1225,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+@@ -1247,8 +1239,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ xgbe_start_timers(pdata);
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index a218dc6f2edd5..dfd1add6dbaac 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -185,7 +185,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+- spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 61f22462197ae..7a755c1fd5ef2 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1050,9 +1050,6 @@ struct xgbe_prv_data {
+ unsigned int pp3;
+ unsigned int pp4;
+
+- /* Overall device lock */
+- spinlock_t lock;
+-
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+--
+2.51.0
+
--- /dev/null
+From 586af2c5a84ef3fef8de89a7af0aff53976a6dcf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:32:40 +0800
+Subject: atm: lec: fix null-ptr-deref in lec_arp_clear_vccs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 101bacb303e89dc2e0640ae6a5e0fb97c4eb45bb ]
+
+syzkaller reported a null-ptr-deref in lec_arp_clear_vccs().
+This issue can be easily reproduced using the syzkaller reproducer.
+
+In the ATM LANE (LAN Emulation) module, the same atm_vcc can be shared by
+multiple lec_arp_table entries (e.g., via entry->vcc or entry->recv_vcc).
+When the underlying VCC is closed, lec_vcc_close() iterates over all
+ARP entries and calls lec_arp_clear_vccs() for each matched entry.
+
+For example, when lec_vcc_close() iterates through the hlists in
+priv->lec_arp_empty_ones or other ARP tables:
+
+1. In the first iteration, for the first matched ARP entry sharing the VCC,
+lec_arp_clear_vccs() frees the associated vpriv (which is vcc->user_back)
+and sets vcc->user_back to NULL.
+2. In the second iteration, for the next matched ARP entry sharing the same
+VCC, lec_arp_clear_vccs() is called again. It obtains a NULL vpriv from
+vcc->user_back (via LEC_VCC_PRIV(vcc)) and then attempts to dereference it
+via `vcc->pop = vpriv->old_pop`, leading to a null-ptr-deref crash.
+
+Fix this by adding a null check for vpriv before dereferencing
+it. If vpriv is already NULL, it means the VCC has been cleared
+by a previous call, so we can safely skip the cleanup and just
+clear the entry's vcc/recv_vcc pointers.
+
+The entire cleanup block (including vcc_release_async()) is placed inside
+the vpriv guard because a NULL vpriv indicates the VCC has already been
+fully released by a prior iteration — repeating the teardown would
+redundantly set flags and trigger callbacks on an already-closing socket.
+
+The Fixes tag points to the initial commit because the entry->vcc path has
+been vulnerable since the original code. The entry->recv_vcc path was later
+added by commit 8d9f73c0ad2f ("atm: fix a memory leak of vcc->user_back")
+with the same pattern, and both paths are fixed here.
+
+Reported-by: syzbot+72e3ea390c305de0e259@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68c95a83.050a0220.3c6139.0e5c.GAE@google.com/T/
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260225123250.189289-1-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index 73078306504c0..768df9d7cd676 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1262,24 +1262,28 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+ struct net_device *dev = (struct net_device *)vcc->proto_data;
+
+- vcc->pop = vpriv->old_pop;
+- if (vpriv->xoff)
+- netif_wake_queue(dev);
+- kfree(vpriv);
+- vcc->user_back = NULL;
+- vcc->push = entry->old_push;
+- vcc_release_async(vcc, -EPIPE);
++ if (vpriv) {
++ vcc->pop = vpriv->old_pop;
++ if (vpriv->xoff)
++ netif_wake_queue(dev);
++ kfree(vpriv);
++ vcc->user_back = NULL;
++ vcc->push = entry->old_push;
++ vcc_release_async(vcc, -EPIPE);
++ }
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+- kfree(vpriv);
+- vcc->user_back = NULL;
++ if (vpriv) {
++ kfree(vpriv);
++ vcc->user_back = NULL;
+
+- entry->recv_vcc->push = entry->old_recv_push;
+- vcc_release_async(entry->recv_vcc, -EPIPE);
++ entry->recv_vcc->push = entry->old_recv_push;
++ vcc_release_async(entry->recv_vcc, -EPIPE);
++ }
+ entry->recv_vcc = NULL;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 7289dede7c598455dbd0547412752ff2a7f1a25c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 11:58:06 +0100
+Subject: can: bcm: fix locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c35636e91e392e1540949bbc67932167cb48bc3a ]
+
+Commit c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+added a locking for some variables that can be modified at runtime when
+updating the sending bcm_op with a new TX_SETUP command in bcm_tx_setup().
+
+Usually the RX_SETUP only handles and filters incoming traffic with one
+exception: When the RX_RTR_FRAME flag is set a predefined CAN frame is
+sent when a specific RTR frame is received. Therefore the rx bcm_op uses
+bcm_can_tx() which uses the bcm_tx_lock that was only initialized in
+bcm_tx_setup(). Add the missing spin_lock_init() when allocating the
+bcm_op in bcm_rx_setup() to handle the RTR case properly.
+
+Fixes: c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+Reported-by: syzbot+5b11eccc403dd1cea9f8@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-can/699466e4.a70a0220.2c38d7.00ff.GAE@google.com/
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20260218-bcm_spin_lock_init-v1-1-592634c8a5b5@hartkopp.net
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 200b66e85c1c6..414a2bb17397b 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1123,6 +1123,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->nframes = msg_head->nframes;
+ op->cfsiz = CFSIZ(msg_head->flags);
+--
+2.51.0
+
--- /dev/null
+From e414d2c08234561717916274942a9b684cb9b831 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:47:05 +0100
+Subject: can: mcp251x: fix deadlock in error path of mcp251x_open
+
+From: Alban Bedel <alban.bedel@lht.dlh.de>
+
+[ Upstream commit ab3f894de216f4a62adc3b57e9191888cbf26885 ]
+
+The mcp251x_open() function call free_irq() in its error path with the
+mpc_lock mutex held. But if an interrupt already occurred the
+interrupt handler will be waiting for the mpc_lock and free_irq() will
+deadlock waiting for the handler to finish.
+
+This issue is similar to the one fixed in commit 7dd9c26bd6cf ("can:
+mcp251x: fix deadlock if an interrupt occurs during mcp251x_open") but
+for the error path.
+
+To solve this issue move the call to free_irq() after the lock is
+released. Setting `priv->force_quit = 1` beforehand ensure that the IRQ
+handler will exit right away once it acquired the lock.
+
+Signed-off-by: Alban Bedel <alban.bedel@lht.dlh.de>
+Link: https://patch.msgid.link/20260209144706.2261954-1-alban.bedel@lht.dlh.de
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 88d065718e990..b06b15debafac 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1207,6 +1207,7 @@ static int mcp251x_open(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
++ bool release_irq = false;
+ unsigned long flags = 0;
+ int ret;
+
+@@ -1252,12 +1253,24 @@ static int mcp251x_open(struct net_device *net)
+ return 0;
+
+ out_free_irq:
+- free_irq(spi->irq, priv);
++ /* The IRQ handler might be running, and if so it will be waiting
++ * for the lock. But free_irq() must wait for the handler to finish
++ * so calling it here would deadlock.
++ *
++ * Setting priv->force_quit will let the handler exit right away
++ * without any access to the hardware. This make it safe to call
++ * free_irq() after the lock is released.
++ */
++ priv->force_quit = 1;
++ release_irq = true;
++
+ mcp251x_hw_sleep(spi);
+ out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
++ if (release_irq)
++ free_irq(spi->irq, priv);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c102dbee3ed9b8b6a79d0961c99c5c9217ec3b12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 17:26:03 +0000
+Subject: indirect_call_wrapper: do not reevaluate function pointer
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 710f5c76580306cdb9ec51fac8fcf6a8faff7821 ]
+
+We have an increasing number of READ_ONCE(xxx->function)
+combined with INDIRECT_CALL_[1234]() helpers.
+
+Unfortunately this forces INDIRECT_CALL_[1234]() to read
+xxx->function many times, which is not what we wanted.
+
+Fix these macros so that xxx->function value is not reloaded.
+
+$ scripts/bloat-o-meter -t vmlinux.0 vmlinux
+add/remove: 0/0 grow/shrink: 1/65 up/down: 122/-1084 (-962)
+Function old new delta
+ip_push_pending_frames 59 181 +122
+ip6_finish_output 687 681 -6
+__udp_enqueue_schedule_skb 1078 1072 -6
+ioam6_output 2319 2312 -7
+xfrm4_rcv_encap_finish2 64 56 -8
+xfrm4_output 297 289 -8
+vrf_ip_local_out 278 270 -8
+vrf_ip6_local_out 278 270 -8
+seg6_input_finish 64 56 -8
+rpl_output 700 692 -8
+ipmr_forward_finish 124 116 -8
+ip_forward_finish 143 135 -8
+ip6mr_forward2_finish 100 92 -8
+ip6_forward_finish 73 65 -8
+input_action_end_bpf 1091 1083 -8
+dst_input 52 44 -8
+__xfrm6_output 801 793 -8
+__xfrm4_output 83 75 -8
+bpf_input 500 491 -9
+__tcp_check_space 530 521 -9
+input_action_end_dt6 291 280 -11
+vti6_tnl_xmit 1634 1622 -12
+bpf_xmit 1203 1191 -12
+rpl_input 497 483 -14
+rawv6_send_hdrinc 1355 1341 -14
+ndisc_send_skb 1030 1016 -14
+ipv6_srh_rcv 1377 1363 -14
+ip_send_unicast_reply 1253 1239 -14
+ip_rcv_finish 226 212 -14
+ip6_rcv_finish 300 286 -14
+input_action_end_x_core 205 191 -14
+input_action_end_x 355 341 -14
+input_action_end_t 205 191 -14
+input_action_end_dx6_finish 127 113 -14
+input_action_end_dx4_finish 373 359 -14
+input_action_end_dt4 426 412 -14
+input_action_end_core 186 172 -14
+input_action_end_b6_encap 292 278 -14
+input_action_end_b6 198 184 -14
+igmp6_send 1332 1318 -14
+ip_sublist_rcv 864 848 -16
+ip6_sublist_rcv 1091 1075 -16
+ipv6_rpl_srh_rcv 1937 1920 -17
+xfrm_policy_queue_process 1246 1228 -18
+seg6_output_core 903 885 -18
+mld_sendpack 856 836 -20
+NF_HOOK 756 736 -20
+vti_tunnel_xmit 1447 1426 -21
+input_action_end_dx6 664 642 -22
+input_action_end 1502 1480 -22
+sock_sendmsg_nosec 134 111 -23
+ip6mr_forward2 388 364 -24
+sock_recvmsg_nosec 134 109 -25
+seg6_input_core 836 810 -26
+ip_send_skb 172 146 -26
+ip_local_out 140 114 -26
+ip6_local_out 140 114 -26
+__sock_sendmsg 162 136 -26
+__ip_queue_xmit 1196 1170 -26
+__ip_finish_output 405 379 -26
+ipmr_queue_fwd_xmit 373 346 -27
+sock_recvmsg 173 145 -28
+ip6_xmit 1635 1607 -28
+xfrm_output_resume 1418 1389 -29
+ip_build_and_send_pkt 625 591 -34
+dst_output 504 432 -72
+Total: Before=25217686, After=25216724, chg -0.00%
+
+Fixes: 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260227172603.1700433-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/indirect_call_wrapper.h | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
+index cfcfef37b2f1a..6afdd080c7c1b 100644
+--- a/include/linux/indirect_call_wrapper.h
++++ b/include/linux/indirect_call_wrapper.h
+@@ -16,22 +16,26 @@
+ */
+ #define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
++ typeof(f) __f1 = (f); \
++ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+- likely(f == f2) ? f2(__VA_ARGS__) : \
+- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
++ typeof(f) __f2 = (f); \
++ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
++ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f3) ? f3(__VA_ARGS__) : \
+- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
++ typeof(f) __f3 = (f); \
++ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
++ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f4) ? f4(__VA_ARGS__) : \
+- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
++ typeof(f) __f4 = (f); \
++ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
++ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
+ })
+
+ #define INDIRECT_CALLABLE_DECLARE(f) f
+--
+2.51.0
+
--- /dev/null
+From db713fb569c7bd59a018b6b1ca41ed187c0165a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 11:45:48 -0800
+Subject: ipv6: fix NULL pointer deref in ip6_rt_get_dev_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2ffb4f5c2ccb2fa1c049dd11899aee7967deef5a ]
+
+l3mdev_master_dev_rcu() can return NULL when the slave device is being
+un-slaved from a VRF. All other callers deal with this, but we lost
+the fallback to loopback in ip6_rt_pcpu_alloc() -> ip6_rt_get_dev_rcu()
+with commit 4832c30d5458 ("net: ipv6: put host and anycast routes on
+device with address").
+
+ KASAN: null-ptr-deref in range [0x0000000000000108-0x000000000000010f]
+ RIP: 0010:ip6_rt_pcpu_alloc (net/ipv6/route.c:1418)
+ Call Trace:
+ ip6_pol_route (net/ipv6/route.c:2318)
+ fib6_rule_lookup (net/ipv6/fib6_rules.c:115)
+ ip6_route_output_flags (net/ipv6/route.c:2607)
+ vrf_process_v6_outbound (drivers/net/vrf.c:437)
+
+I was tempted to rework the un-slaving code to clear the flag first
+and insert synchronize_rcu() before we remove the upper. But looks like
+the explicit fallback to loopback_dev is an established pattern.
+And I guess avoiding the synchronize_rcu() is nice, too.
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260301194548.927324-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index b47f89600c2f8..ac24f8b624e34 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1013,7 +1013,8 @@ static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
+ */
+ if (netif_is_l3_slave(dev) &&
+ !rt6_need_strict(&res->f6i->fib6_dst.addr))
+- dev = l3mdev_master_dev_rcu(dev);
++ dev = l3mdev_master_dev_rcu(dev) ? :
++ dev_net(dev)->loopback_dev;
+ else if (!netif_is_l3_master(dev))
+ dev = dev_net(dev)->loopback_dev;
+ /* last case is netif_is_l3_master(dev) is true in which
+--
+2.51.0
+
--- /dev/null
+From e21aa9ca825149aa8432394d3b18017a99e7c123 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:56 +0100
+Subject: net: bridge: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit e5e890630533bdc15b26a34bb8e7ef539bdf1322 ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. Then, if neigh_suppress is enabled and an ICMPv6
+Neighbor Discovery packet reaches the bridge, br_do_suppress_nd() will
+dereference ipv6_stub->nd_tbl which is NULL, passing it to
+neigh_lookup(). This causes a kernel NULL pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000268
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x16/0xe0
+ [...]
+ Call Trace:
+ <IRQ>
+ ? neigh_lookup+0x16/0xe0
+ br_do_suppress_nd+0x160/0x290 [bridge]
+ br_handle_frame_finish+0x500/0x620 [bridge]
+ br_handle_frame+0x353/0x440 [bridge]
+ __netif_receive_skb_core.constprop.0+0x298/0x1110
+ __netif_receive_skb_one_core+0x3d/0xa0
+ process_backlog+0xa0/0x140
+ __napi_poll+0x2c/0x170
+ net_rx_action+0x2c4/0x3a0
+ handle_softirqs+0xd0/0x270
+ do_softirq+0x3f/0x60
+
+Fix this by replacing IS_ENABLED(IPV6) call with ipv6_mod_enabled() in
+the callers. This is in essence disabling NS/NA suppression when IPv6 is
+disabled.
+
+Fixes: ed842faeb2bd ("bridge: suppress nd pkts on BR_NEIGH_SUPPRESS ports")
+Reported-by: Guruprasad C P <gurucp2005@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAHXs0ORzd62QOG-Fttqa2Cx_A_VFp=utE2H2VTX5nqfgs7LDxQ@mail.gmail.com/
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260304120357.9778-1-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_device.c | 2 +-
+ net/bridge/br_input.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 84e37108c6b5e..2c59e3f918ca2 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -70,7 +70,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ br_do_proxy_suppress_arp(skb, br, vid, NULL);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index f9d4b86e3186d..4d7e99a547784 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -124,7 +124,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ (skb->protocol == htons(ETH_P_ARP) ||
+ skb->protocol == htons(ETH_P_RARP))) {
+ br_do_proxy_suppress_arp(skb, br, vid, p);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+--
+2.51.0
+
--- /dev/null
+From fde77afad2ecf021401b14c94c8b8d553306d160 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 23:43:59 +0530
+Subject: net: ethernet: ti: am65-cpsw-nuss/cpsw-ale: Fix multicast entry
+ handling in ALE table
+
+From: Chintan Vankar <c-vankar@ti.com>
+
+[ Upstream commit be11a537224d72b906db6b98510619770298c8a4 ]
+
+In the current implementation, flushing multicast entries in MAC mode
+incorrectly deletes entries for all ports instead of only the target port,
+disrupting multicast traffic on other ports. The cause is adding multicast
+entries by setting only host port bit, and not setting the MAC port bits.
+
+Fix this by setting the MAC port's bit in the port mask while adding the
+multicast entry. Also fix the flush logic to preserve the host port bit
+during removal of MAC port and free ALE entries when mask contains only
+host port.
+
+Fixes: 5c50a856d550 ("drivers: net: ethernet: cpsw: add multicast address to ALE table")
+Signed-off-by: Chintan Vankar <c-vankar@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260224181359.2055322-1-c-vankar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ drivers/net/ethernet/ti/cpsw_ale.c | 9 ++++-----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 07510e068742e..2dc3e5be1d717 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -305,7 +305,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+- port_mask = ALE_PORT_HOST;
++ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index bec6a68a973c4..eb4262017d235 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -420,14 +420,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+- mask &= ~port_mask;
++ mask &= (~port_mask | ALE_PORT_HOST);
+
+- /* free if only remaining port is host port */
+- if (mask)
++ if (mask == 0x0 || mask == ALE_PORT_HOST)
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+- else
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+--
+2.51.0
+
--- /dev/null
+From 82d41f8dbd17a8ab9a45fe6cf4df023bd59fc0a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 19:38:13 +0800
+Subject: net: ipv6: fix panic when IPv4 route references loopback IPv6 nexthop
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 21ec92774d1536f71bdc90b0e3d052eff99cf093 ]
+
+When a standalone IPv6 nexthop object is created with a loopback device
+(e.g., "ip -6 nexthop add id 100 dev lo"), fib6_nh_init() misclassifies
+it as a reject route. This is because nexthop objects have no destination
+prefix (fc_dst=::), causing fib6_is_reject() to match any loopback
+nexthop. The reject path skips fib_nh_common_init(), leaving
+nhc_pcpu_rth_output unallocated. If an IPv4 route later references this
+nexthop, __mkroute_output() dereferences NULL nhc_pcpu_rth_output and
+panics.
+
+Simplify the check in fib6_nh_init() to only match explicit reject
+routes (RTF_REJECT) instead of using fib6_is_reject(). The loopback
+promotion heuristic in fib6_is_reject() is handled separately by
+ip6_route_info_create_nh(). After this change, the three cases behave
+as follows:
+
+1. Explicit reject route ("ip -6 route add unreachable 2001:db8::/64"):
+ RTF_REJECT is set, enters reject path, skips fib_nh_common_init().
+ No behavior change.
+
+2. Implicit loopback reject route ("ip -6 route add 2001:db8::/32 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. ip6_route_info_create_nh() still promotes it to reject
+ afterward. nhc_pcpu_rth_output is allocated but unused, which is
+ harmless.
+
+3. Standalone nexthop object ("ip -6 nexthop add id 100 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. nhc_pcpu_rth_output is properly allocated, fixing the crash
+ when IPv4 routes reference this nexthop.
+
+Suggested-by: Ido Schimmel <idosch@nvidia.com>
+Fixes: 493ced1ac47c ("ipv4: Allow routes to use nexthop objects")
+Reported-by: syzbot+334190e097a98a1b81bb@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698f8482.a70a0220.2c38d7.00ca.GAE@google.com/T/
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260304113817.294966-2-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index ac24f8b624e34..27736b5847378 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3408,7 +3408,6 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ {
+ struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
+- int addr_type;
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
+@@ -3449,11 +3448,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+
+ fib6_nh->fib_nh_weight = 1;
+
+- /* We cannot add true routes via loopback here,
+- * they would result in kernel looping; promote them to reject routes
++ /* Reset the nexthop device to the loopback device in case of reject
++ * routes.
+ */
+- addr_type = ipv6_addr_type(&cfg->fc_dst);
+- if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
++ if (cfg->fc_flags & RTF_REJECT) {
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != net->loopback_dev) {
+ if (dev) {
+--
+2.51.0
+
--- /dev/null
+From 06d283832824ddfcd3ba11fb2015c79e497247b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 18:32:37 +0200
+Subject: net: nfc: nci: Fix zero-length proprietary notifications
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit f7d92f11bd33a6eb49c7c812255ef4ab13681f0f ]
+
+NCI NFC controllers may have proprietary OIDs with zero-length payload.
+One example is: drivers/nfc/nxp-nci/core.c, NXP_NCI_RF_TXLDO_ERROR_NTF.
+
+Allow a zero length payload in proprietary notifications *only*.
+
+Before:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+-- >8 --
+
+After:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x23, plen=0
+kernel: nci: nci_ntf_packet: unknown ntf opcode 0x123
+kernel: nfc nfc0: NFC: RF transmitter couldn't start. Bad power and/or configuration?
+-- >8 --
+
+After fixing the hardware:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 27
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x5, plen=24
+kernel: nci: nci_rf_intf_activated_ntf_packet: rf_discovery_id 1
+-- >8 --
+
+Fixes: d24b03535e5e ("nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260302163238.140576-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 3514686eb53f9..188677c322f4c 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1460,10 +1460,20 @@ static bool nci_valid_size(struct sk_buff *skb)
+ BUILD_BUG_ON(NCI_CTRL_HDR_SIZE != NCI_DATA_HDR_SIZE);
+
+ if (skb->len < hdr_size ||
+- !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
++
++ if (!nci_plen(skb->data)) {
++ /* Allow zero length in proprietary notifications (0x20 - 0x3F). */
++ if (nci_opcode_oid(nci_opcode(skb->data)) >= 0x20 &&
++ nci_mt(skb->data) == NCI_MT_NTF_PKT)
++ return true;
++
++ /* Disallow zero length otherwise. */
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From dbd88cfc6ecd3952ee6bf7474e3b81e6f10041d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:57 +0100
+Subject: net: vxlan: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit 168ff39e4758897d2eee4756977d036d52884c7e ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. If an IPv6 packet is injected into the interface,
+route_shortcircuit() is called and a NULL pointer dereference happens on
+neigh_lookup().
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000380
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x20/0x270
+ [...]
+ Call Trace:
+ <TASK>
+ vxlan_xmit+0x638/0x1ef0 [vxlan]
+ dev_hard_start_xmit+0x9e/0x2e0
+ __dev_queue_xmit+0xbee/0x14e0
+ packet_sendmsg+0x116f/0x1930
+ __sys_sendto+0x1f5/0x200
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x12f/0x1590
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by adding an early check on route_shortcircuit() when protocol
+is ETH_P_IPV6. Note that ipv6_mod_enabled() cannot be used here because
+VXLAN can be built-in even when IPv6 is built as a module.
+
+Fixes: e15a00aafa4b ("vxlan: add ipv6 route short circuit support")
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Link: https://patch.msgid.link/20260304120357.9778-2-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 7973d4070ee3b..c24535dc051c0 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2258,6 +2258,11 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct ipv6hdr *pip6;
+
++ /* check if nd_tbl is not initiliazed due to
++ * ipv6.disable=1 set during boot
++ */
++ if (!ipv6_stub->nd_tbl)
++ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+--
+2.51.0
+
--- /dev/null
+From b42364d14cd7524b1510c6cdc1c99315eb70b2b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:44 -0800
+Subject: nfc: nci: clear NCI_DATA_EXCHANGE before calling completion callback
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0efdc02f4f6d52f8ca5d5889560f325a836ce0a8 ]
+
+Move clear_bit(NCI_DATA_EXCHANGE) before invoking the data exchange
+callback in nci_data_exchange_complete().
+
+The callback (e.g. rawsock_data_exchange_complete) may immediately
+schedule another data exchange via schedule_work(tx_work). On a
+multi-CPU system, tx_work can run and reach nci_transceive() before
+the current nci_data_exchange_complete() clears the flag, causing
+test_and_set_bit(NCI_DATA_EXCHANGE) to return -EBUSY and the new
+transfer to fail.
+
+This causes intermittent flakes in nci/nci_dev in NIPA:
+
+ # # RUN NCI.NCI1_0.t4t_tag_read ...
+ # # t4t_tag_read: Test terminated by timeout
+ # # FAIL NCI.NCI1_0.t4t_tag_read
+ # not ok 3 NCI.NCI1_0.t4t_tag_read
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-5-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index b4548d8874899..4f06a2903ae75 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -33,7 +33,8 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+- goto exit;
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++ return;
+ }
+
+ cb = conn_info->data_exchange_cb;
+@@ -45,6 +46,12 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ del_timer_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
++ /* Mark the exchange as done before calling the callback.
++ * The callback (e.g. rawsock_data_exchange_complete) may
++ * want to immediately queue another data exchange.
++ */
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++
+ if (cb) {
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+@@ -54,9 +61,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+-
+-exit:
+- clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+ }
+
+ /* ----------------- NCI TX Data ----------------- */
+--
+2.51.0
+
--- /dev/null
+From 05c0023a485800ca7fe60070a8e140088d945e28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:41 -0800
+Subject: nfc: nci: free skb on nci_transceive early error paths
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7bd4b0c4779f978a6528c9b7937d2ca18e936e2c ]
+
+nci_transceive() takes ownership of the skb passed by the caller,
+but the -EPROTO, -EINVAL, and -EBUSY error paths return without
+freeing it.
+
+Due to issues clearing NCI_DATA_EXCHANGE fixed by subsequent changes
+the nci/nci_dev selftest hits the error path occasionally in NIPA,
+and kmemleak detects leaks:
+
+unreferenced object 0xff11000015ce6a40 (size 640):
+ comm "nci_dev", pid 3954, jiffies 4295441246
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 00 a4 00 0c 02 e1 03 6b 6b 6b 6b 6b kkkk.......kkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ backtrace (crc 7c40cc2a):
+ kmem_cache_alloc_node_noprof+0x492/0x630
+ __alloc_skb+0x11e/0x5f0
+ alloc_skb_with_frags+0xc6/0x8f0
+ sock_alloc_send_pskb+0x326/0x3f0
+ nfc_alloc_send_skb+0x94/0x1d0
+ rawsock_sendmsg+0x162/0x4c0
+ do_syscall_64+0x117/0xfc0
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 188677c322f4c..873c9073e4111 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1014,18 +1014,23 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+- if (!conn_info)
++ if (!conn_info) {
++ kfree_skb(skb);
+ return -EPROTO;
++ }
+
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ pr_err("unable to exchange data, no active target\n");
++ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+- if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) {
++ kfree_skb(skb);
+ return -EBUSY;
++ }
+
+ /* store cb and context to be used on receiving data */
+ conn_info->data_exchange_cb = cb;
+--
+2.51.0
+
--- /dev/null
+From fb509a392ade2767b555550f35b0491c39897ea0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:45 -0800
+Subject: nfc: rawsock: cancel tx_work before socket teardown
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit d793458c45df2aed498d7f74145eab7ee22d25aa ]
+
+In rawsock_release(), cancel any pending tx_work and purge the write
+queue before orphaning the socket. rawsock_tx_work runs on the system
+workqueue and calls nfc_data_exchange which dereferences the NCI
+device. Without synchronization, tx_work can race with socket and
+device teardown when a process is killed (e.g. by SIGKILL), leading
+to use-after-free or leaked references.
+
+Set SEND_SHUTDOWN first so that if tx_work is already running it will
+see the flag and skip transmitting, then use cancel_work_sync to wait
+for any in-progress execution to finish, and finally purge any
+remaining queued skbs.
+
+Fixes: 23b7869c0fd0 ("NFC: add the NFC socket raw protocol")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-6-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/rawsock.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 5f1d438a0a23f..0e59706e4e8ab 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -66,6 +66,17 @@ static int rawsock_release(struct socket *sock)
+ if (sock->type == SOCK_RAW)
+ nfc_sock_unlink(&raw_sk_list, sk);
+
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* Prevent rawsock_tx_work from starting new transmits and
++ * wait for any in-progress work to finish. This must happen
++ * before the socket is orphaned to avoid a race where
++ * rawsock_tx_work runs after the NCI device has been freed.
++ */
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ cancel_work_sync(&nfc_rawsock(sk)->tx_work);
++ rawsock_write_queue_purge(sk);
++ }
++
+ sock_orphan(sk);
+ sock_put(sk);
+
+--
+2.51.0
+
--- /dev/null
+From b2a63ac58545221dd681b9376f433a2b1bdcdd81 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 01:01:29 +0000
+Subject: platform/x86: thinkpad_acpi: Fix errors reading battery thresholds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Teh <jonathan.teh@outlook.com>
+
+[ Upstream commit 53e977b1d50c46f2c4ec3865cd13a822f58ad3cd ]
+
+Check whether the battery supports the relevant charge threshold before
+reading the value to silence these errors:
+
+thinkpad_acpi: acpi_evalf(BCTG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCTG: evaluate failed
+thinkpad_acpi: acpi_evalf(BCSG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCSG: evaluate failed
+
+when reading the charge thresholds via sysfs on platforms that do not
+support them such as the ThinkPad T400.
+
+Fixes: 2801b9683f74 ("thinkpad_acpi: Add support for battery thresholds")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=202619
+Signed-off-by: Jonathan Teh <jonathan.teh@outlook.com>
+Reviewed-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://patch.msgid.link/MI0P293MB01967B206E1CA6F337EBFB12926CA@MI0P293MB0196.ITAP293.PROD.OUTLOOK.COM
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index d18b6ddba9829..1dbf19fe85599 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9412,14 +9412,16 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
+ {
+ switch (what) {
+ case THRESHOLD_START:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
++ if (!battery_info.batteries[battery].start_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery)))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
++ if (!battery_info.batteries[battery].stop_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery)))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+--
+2.51.0
+
squashfs-check-metadata-block-offset-is-within-range.patch
drbd-fix-logic-bug-in-drbd_al_begin_io_nonblock.patch
selftests-mptcp-more-stable-simult_flows-tests.patch
+platform-x86-thinkpad_acpi-fix-errors-reading-batter.patch
+net-ethernet-ti-am65-cpsw-nuss-cpsw-ale-fix-multicas.patch
+atm-lec-fix-null-ptr-deref-in-lec_arp_clear_vccs.patch
+can-bcm-fix-locking-for-bcm_op-runtime-updates.patch
+can-mcp251x-fix-deadlock-in-error-path-of-mcp251x_op.patch
+wifi-cw1200-fix-locking-in-error-paths.patch
+wifi-wlcore-fix-a-locking-bug.patch
+indirect_call_wrapper-do-not-reevaluate-function-poi.patch
+xen-acpi-processor-fix-_cst-detection-using-undersiz.patch
+ipv6-fix-null-pointer-deref-in-ip6_rt_get_dev_rcu.patch
+amd-xgbe-fix-sleep-while-atomic-on-suspend-resume.patch
+net-nfc-nci-fix-zero-length-proprietary-notification.patch
+nfc-nci-free-skb-on-nci_transceive-early-error-paths.patch
+nfc-nci-clear-nci_data_exchange-before-calling-compl.patch
+nfc-rawsock-cancel-tx_work-before-socket-teardown.patch
+net-bridge-fix-nd_tbl-null-dereference-when-ipv6-is-.patch
+net-vxlan-fix-nd_tbl-null-dereference-when-ipv6-is-d.patch
+net-ipv6-fix-panic-when-ipv4-route-references-loopba.patch
--- /dev/null
+From 3bbf53933b54e7753cea3fbceb19c7c00ee8bf9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:24 -0800
+Subject: wifi: cw1200: Fix locking in error paths
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit d98c24617a831e92e7224a07dcaed2dd0b02af96 ]
+
+cw1200_wow_suspend() must only return with priv->conf_mutex locked if it
+returns zero. This mutex must be unlocked if an error is returned. Add
+mutex_unlock() calls to the error paths from which that call is missing.
+This has been detected by the Clang thread-safety analyzer.
+
+Fixes: a910e4a94f69 ("cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-25-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/st/cw1200/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
+index a20ab577a3644..212b6f2af8de4 100644
+--- a/drivers/net/wireless/st/cw1200/pm.c
++++ b/drivers/net/wireless/st/cw1200/pm.c
+@@ -264,12 +264,14 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EAGAIN;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 757f2c4f77be3d2295d2548213969a784e6d1d76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:25 -0800
+Subject: wifi: wlcore: Fix a locking bug
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 72c6df8f284b3a49812ce2ac136727ace70acc7c ]
+
+Make sure that wl->mutex is locked before it is unlocked. This has been
+detected by the Clang thread-safety analyzer.
+
+Fixes: 45aa7f071b06 ("wlcore: Use generic runtime pm calls for wowlan elp configuration")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-26-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 109c51e497926..30430b4a4188f 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -1813,6 +1813,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
+
++ mutex_lock(&wl->mutex);
++
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+@@ -1829,8 +1831,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+- mutex_lock(&wl->mutex);
+-
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+--
+2.51.0
+
--- /dev/null
+From 99a0fcd1abdfaab21b6fba1c0064d96030f61c28 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 09:37:11 +0000
+Subject: xen/acpi-processor: fix _CST detection using undersized evaluation
+ buffer
+
+From: David Thomson <dt@linux-mail.net>
+
+[ Upstream commit 8b57227d59a86fc06d4f09de08f98133680f2cae ]
+
+read_acpi_id() attempts to evaluate _CST using a stack buffer of
+sizeof(union acpi_object) (48 bytes), but _CST returns a nested Package
+of sub-Packages (one per C-state, each containing a register descriptor,
+type, latency, and power) requiring hundreds of bytes. The evaluation
+always fails with AE_BUFFER_OVERFLOW.
+
+On modern systems using FFH/MWAIT entry (where pblk is zero), this
+causes the function to return before setting the acpi_id_cst_present
+bit. In check_acpi_ids(), flags.power is then zero for all Phase 2 CPUs
+(physical CPUs beyond dom0's vCPU count), so push_cxx_to_hypervisor() is
+never called for them.
+
+On a system with dom0_max_vcpus=2 and 8 physical CPUs, only PCPUs 0-1
+receive C-state data. PCPUs 2-7 are stuck in C0/C1 idle, unable to
+enter C2/C3. This costs measurable wall power (4W observed on an Intel
+Core Ultra 7 265K with Xen 4.20).
+
+The function never uses the _CST return value -- it only needs to know
+whether _CST exists. Replace the broken acpi_evaluate_object() call with
+acpi_has_method(), which correctly detects _CST presence using
+acpi_get_handle() without any buffer allocation. This brings C-state
+detection to parity with the P-state path, which already works correctly
+for Phase 2 CPUs.
+
+Fixes: 59a568029181 ("xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.")
+Signed-off-by: David Thomson <dt@linux-mail.net>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260224093707.19679-1-dt@linux-mail.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xen-acpi-processor.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index ce8ffb595a468..cb2affe11b02f 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -378,11 +378,8 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ acpi_psd[acpi_id].domain);
+ }
+
+- status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- if (!pblk)
+- return AE_OK;
+- }
++ if (!pblk && !acpi_has_method(handle, "_CST"))
++ return AE_OK;
+ /* .. and it has a C-state */
+ __set_bit(acpi_id, acpi_id_cst_present);
+
+--
+2.51.0
+
--- /dev/null
+From 6977ba6edbce814bcb4cd18afae652bec3e337dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 09:51:24 +0530
+Subject: amd-xgbe: fix sleep while atomic on suspend/resume
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit e2f27363aa6d983504c6836dd0975535e2e9dba0 ]
+
+The xgbe_powerdown() and xgbe_powerup() functions use spinlocks
+(spin_lock_irqsave) while calling functions that may sleep:
+- napi_disable() can sleep waiting for NAPI polling to complete
+- flush_workqueue() can sleep waiting for pending work items
+
+This causes a "BUG: scheduling while atomic" error during suspend/resume
+cycles on systems using the AMD XGBE Ethernet controller.
+
+The spinlock protection in these functions is unnecessary as these
+functions are called from suspend/resume paths which are already serialized
+by the PM core
+
+Fix this by removing the spinlock. Since only code that takes this lock
+is xgbe_powerdown() and xgbe_powerup(), remove it completely.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260302042124.1386445-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ----------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 -
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ---
+ 3 files changed, 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 00312543f2267..046f38d4bac61 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1181,7 +1181,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+@@ -1192,8 +1191,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+@@ -1209,8 +1206,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+
+ pdata->power_down = 1;
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+@@ -1220,7 +1215,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+@@ -1231,8 +1225,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+@@ -1247,8 +1239,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ xgbe_start_timers(pdata);
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index a218dc6f2edd5..dfd1add6dbaac 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -185,7 +185,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+- spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 27fc9fb00cd73..998e56e1a7702 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1050,9 +1050,6 @@ struct xgbe_prv_data {
+ unsigned int pp3;
+ unsigned int pp4;
+
+- /* Overall device lock */
+- spinlock_t lock;
+-
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+--
+2.51.0
+
--- /dev/null
+From 2f9a712695eb0f6e98d389d3a9604710586f993c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:32:40 +0800
+Subject: atm: lec: fix null-ptr-deref in lec_arp_clear_vccs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 101bacb303e89dc2e0640ae6a5e0fb97c4eb45bb ]
+
+syzkaller reported a null-ptr-deref in lec_arp_clear_vccs().
+This issue can be easily reproduced using the syzkaller reproducer.
+
+In the ATM LANE (LAN Emulation) module, the same atm_vcc can be shared by
+multiple lec_arp_table entries (e.g., via entry->vcc or entry->recv_vcc).
+When the underlying VCC is closed, lec_vcc_close() iterates over all
+ARP entries and calls lec_arp_clear_vccs() for each matched entry.
+
+For example, when lec_vcc_close() iterates through the hlists in
+priv->lec_arp_empty_ones or other ARP tables:
+
+1. In the first iteration, for the first matched ARP entry sharing the VCC,
+lec_arp_clear_vccs() frees the associated vpriv (which is vcc->user_back)
+and sets vcc->user_back to NULL.
+2. In the second iteration, for the next matched ARP entry sharing the same
+VCC, lec_arp_clear_vccs() is called again. It obtains a NULL vpriv from
+vcc->user_back (via LEC_VCC_PRIV(vcc)) and then attempts to dereference it
+via `vcc->pop = vpriv->old_pop`, leading to a null-ptr-deref crash.
+
+Fix this by adding a null check for vpriv before dereferencing
+it. If vpriv is already NULL, it means the VCC has been cleared
+by a previous call, so we can safely skip the cleanup and just
+clear the entry's vcc/recv_vcc pointers.
+
+The entire cleanup block (including vcc_release_async()) is placed inside
+the vpriv guard because a NULL vpriv indicates the VCC has already been
+fully released by a prior iteration — repeating the teardown would
+redundantly set flags and trigger callbacks on an already-closing socket.
+
+The Fixes tag points to the initial commit because the entry->vcc path has
+been vulnerable since the original code. The entry->recv_vcc path was later
+added by commit 8d9f73c0ad2f ("atm: fix a memory leak of vcc->user_back")
+with the same pattern, and both paths are fixed here.
+
+Reported-by: syzbot+72e3ea390c305de0e259@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68c95a83.050a0220.3c6139.0e5c.GAE@google.com/T/
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260225123250.189289-1-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index 73078306504c0..768df9d7cd676 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1262,24 +1262,28 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+ struct net_device *dev = (struct net_device *)vcc->proto_data;
+
+- vcc->pop = vpriv->old_pop;
+- if (vpriv->xoff)
+- netif_wake_queue(dev);
+- kfree(vpriv);
+- vcc->user_back = NULL;
+- vcc->push = entry->old_push;
+- vcc_release_async(vcc, -EPIPE);
++ if (vpriv) {
++ vcc->pop = vpriv->old_pop;
++ if (vpriv->xoff)
++ netif_wake_queue(dev);
++ kfree(vpriv);
++ vcc->user_back = NULL;
++ vcc->push = entry->old_push;
++ vcc_release_async(vcc, -EPIPE);
++ }
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+- kfree(vpriv);
+- vcc->user_back = NULL;
++ if (vpriv) {
++ kfree(vpriv);
++ vcc->user_back = NULL;
+
+- entry->recv_vcc->push = entry->old_recv_push;
+- vcc_release_async(entry->recv_vcc, -EPIPE);
++ entry->recv_vcc->push = entry->old_recv_push;
++ vcc_release_async(entry->recv_vcc, -EPIPE);
++ }
+ entry->recv_vcc = NULL;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From d37b03b4ddbffb8cc4672df340ec0cdb19793b1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 11:58:06 +0100
+Subject: can: bcm: fix locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c35636e91e392e1540949bbc67932167cb48bc3a ]
+
+Commit c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+added a locking for some variables that can be modified at runtime when
+updating the sending bcm_op with a new TX_SETUP command in bcm_tx_setup().
+
+Usually the RX_SETUP only handles and filters incoming traffic with one
+exception: When the RX_RTR_FRAME flag is set a predefined CAN frame is
+sent when a specific RTR frame is received. Therefore the rx bcm_op uses
+bcm_can_tx() which uses the bcm_tx_lock that was only initialized in
+bcm_tx_setup(). Add the missing spin_lock_init() when allocating the
+bcm_op in bcm_rx_setup() to handle the RTR case properly.
+
+Fixes: c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+Reported-by: syzbot+5b11eccc403dd1cea9f8@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-can/699466e4.a70a0220.2c38d7.00ff.GAE@google.com/
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20260218-bcm_spin_lock_init-v1-1-592634c8a5b5@hartkopp.net
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index e2325f5ba7e54..c77d8bafde653 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1123,6 +1123,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->nframes = msg_head->nframes;
+ op->cfsiz = CFSIZ(msg_head->flags);
+--
+2.51.0
+
--- /dev/null
+From 45f661043b0f323720b520785243b0743026481d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:47:05 +0100
+Subject: can: mcp251x: fix deadlock in error path of mcp251x_open
+
+From: Alban Bedel <alban.bedel@lht.dlh.de>
+
+[ Upstream commit ab3f894de216f4a62adc3b57e9191888cbf26885 ]
+
+The mcp251x_open() function call free_irq() in its error path with the
+mpc_lock mutex held. But if an interrupt already occurred the
+interrupt handler will be waiting for the mpc_lock and free_irq() will
+deadlock waiting for the handler to finish.
+
+This issue is similar to the one fixed in commit 7dd9c26bd6cf ("can:
+mcp251x: fix deadlock if an interrupt occurs during mcp251x_open") but
+for the error path.
+
+To solve this issue move the call to free_irq() after the lock is
+released. Setting `priv->force_quit = 1` beforehand ensure that the IRQ
+handler will exit right away once it acquired the lock.
+
+Signed-off-by: Alban Bedel <alban.bedel@lht.dlh.de>
+Link: https://patch.msgid.link/20260209144706.2261954-1-alban.bedel@lht.dlh.de
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 653566c570df8..e71edca7afbb2 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1207,6 +1207,7 @@ static int mcp251x_open(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
++ bool release_irq = false;
+ unsigned long flags = 0;
+ int ret;
+
+@@ -1252,12 +1253,24 @@ static int mcp251x_open(struct net_device *net)
+ return 0;
+
+ out_free_irq:
+- free_irq(spi->irq, priv);
++ /* The IRQ handler might be running, and if so it will be waiting
++ * for the lock. But free_irq() must wait for the handler to finish
++ * so calling it here would deadlock.
++ *
++ * Setting priv->force_quit will let the handler exit right away
++ * without any access to the hardware. This make it safe to call
++ * free_irq() after the lock is released.
++ */
++ priv->force_quit = 1;
++ release_irq = true;
++
+ mcp251x_hw_sleep(spi);
+ out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
++ if (release_irq)
++ free_irq(spi->irq, priv);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From fdb23a435762cd36ed7f677fb57bd7287e01676c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 13:59:30 +0200
+Subject: dpaa2-switch: do not clear any interrupts automatically
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit f6da276479c63ca29774bc331a537b92f0550c45 ]
+
+The DPSW object has multiple event sources multiplexed over the same
+IRQ. The driver has the capability to configure only some of these
+events to trigger the IRQ.
+
+The dpsw_get_irq_status() can clear events automatically based on the
+value stored in the 'status' variable passed to it. We don't want that
+to happen because we could get into a situation when we are clearing
+more events than we actually handled.
+
+Just resort to manually clearing the events that we handled. Also, since
+status is not used on the out path we remove its initialization to zero.
+
+This change does not have a user-visible effect because the dpaa2-switch
+driver enables and handles all the DPSW events which exist at the
+moment.
+
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 92500e55ab931..9713d04238138 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1518,9 +1518,9 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct ethsw_port_priv *port_priv;
+- u32 status = ~0;
+ int err, if_id;
+ bool had_mac;
++ u32 status;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+@@ -1555,12 +1555,12 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ rtnl_unlock();
+ }
+
+-out:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+
++out:
+ return IRQ_HANDLED;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7871eded31ae8595c666e730795bfa59d8b77890 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:58:12 -0800
+Subject: dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ
+ handler
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 74badb9c20b1a9c02a95c735c6d3cd6121679c93 ]
+
+Commit 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ
+handler") introduces a range check for if_id to avoid an out-of-bounds
+access. If an out-of-bounds if_id is detected, the interrupt status is
+not cleared. This may result in an interrupt storm.
+
+Clear the interrupt status after detecting an out-of-bounds if_id to avoid
+the problem.
+
+Found by an experimental AI code review agent at Google.
+
+Fixes: 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ handler")
+Cc: Junrui Luo <moonafterrain@outlook.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20260227055812.1777915-1-linux@roeck-us.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 9713d04238138..3cc844a61cb88 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1532,7 +1532,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ if_id = (status & 0xFFFF0000) >> 16;
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id);
+- goto out;
++ goto out_clear;
+ }
+ port_priv = ethsw->ports[if_id];
+
+@@ -1555,6 +1555,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ rtnl_unlock();
+ }
+
++out_clear:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+--
+2.51.0
+
--- /dev/null
+From ad1da66b9cac361234dd1714a6e13c7635dd0473 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 17:26:03 +0000
+Subject: indirect_call_wrapper: do not reevaluate function pointer
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 710f5c76580306cdb9ec51fac8fcf6a8faff7821 ]
+
+We have an increasing number of READ_ONCE(xxx->function)
+combined with INDIRECT_CALL_[1234]() helpers.
+
+Unfortunately this forces INDIRECT_CALL_[1234]() to read
+xxx->function many times, which is not what we wanted.
+
+Fix these macros so that xxx->function value is not reloaded.
+
+$ scripts/bloat-o-meter -t vmlinux.0 vmlinux
+add/remove: 0/0 grow/shrink: 1/65 up/down: 122/-1084 (-962)
+Function old new delta
+ip_push_pending_frames 59 181 +122
+ip6_finish_output 687 681 -6
+__udp_enqueue_schedule_skb 1078 1072 -6
+ioam6_output 2319 2312 -7
+xfrm4_rcv_encap_finish2 64 56 -8
+xfrm4_output 297 289 -8
+vrf_ip_local_out 278 270 -8
+vrf_ip6_local_out 278 270 -8
+seg6_input_finish 64 56 -8
+rpl_output 700 692 -8
+ipmr_forward_finish 124 116 -8
+ip_forward_finish 143 135 -8
+ip6mr_forward2_finish 100 92 -8
+ip6_forward_finish 73 65 -8
+input_action_end_bpf 1091 1083 -8
+dst_input 52 44 -8
+__xfrm6_output 801 793 -8
+__xfrm4_output 83 75 -8
+bpf_input 500 491 -9
+__tcp_check_space 530 521 -9
+input_action_end_dt6 291 280 -11
+vti6_tnl_xmit 1634 1622 -12
+bpf_xmit 1203 1191 -12
+rpl_input 497 483 -14
+rawv6_send_hdrinc 1355 1341 -14
+ndisc_send_skb 1030 1016 -14
+ipv6_srh_rcv 1377 1363 -14
+ip_send_unicast_reply 1253 1239 -14
+ip_rcv_finish 226 212 -14
+ip6_rcv_finish 300 286 -14
+input_action_end_x_core 205 191 -14
+input_action_end_x 355 341 -14
+input_action_end_t 205 191 -14
+input_action_end_dx6_finish 127 113 -14
+input_action_end_dx4_finish 373 359 -14
+input_action_end_dt4 426 412 -14
+input_action_end_core 186 172 -14
+input_action_end_b6_encap 292 278 -14
+input_action_end_b6 198 184 -14
+igmp6_send 1332 1318 -14
+ip_sublist_rcv 864 848 -16
+ip6_sublist_rcv 1091 1075 -16
+ipv6_rpl_srh_rcv 1937 1920 -17
+xfrm_policy_queue_process 1246 1228 -18
+seg6_output_core 903 885 -18
+mld_sendpack 856 836 -20
+NF_HOOK 756 736 -20
+vti_tunnel_xmit 1447 1426 -21
+input_action_end_dx6 664 642 -22
+input_action_end 1502 1480 -22
+sock_sendmsg_nosec 134 111 -23
+ip6mr_forward2 388 364 -24
+sock_recvmsg_nosec 134 109 -25
+seg6_input_core 836 810 -26
+ip_send_skb 172 146 -26
+ip_local_out 140 114 -26
+ip6_local_out 140 114 -26
+__sock_sendmsg 162 136 -26
+__ip_queue_xmit 1196 1170 -26
+__ip_finish_output 405 379 -26
+ipmr_queue_fwd_xmit 373 346 -27
+sock_recvmsg 173 145 -28
+ip6_xmit 1635 1607 -28
+xfrm_output_resume 1418 1389 -29
+ip_build_and_send_pkt 625 591 -34
+dst_output 504 432 -72
+Total: Before=25217686, After=25216724, chg -0.00%
+
+Fixes: 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260227172603.1700433-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/indirect_call_wrapper.h | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
+index c1c76a70a6ce9..227cee5e2a98b 100644
+--- a/include/linux/indirect_call_wrapper.h
++++ b/include/linux/indirect_call_wrapper.h
+@@ -16,22 +16,26 @@
+ */
+ #define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
++ typeof(f) __f1 = (f); \
++ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+- likely(f == f2) ? f2(__VA_ARGS__) : \
+- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
++ typeof(f) __f2 = (f); \
++ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
++ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f3) ? f3(__VA_ARGS__) : \
+- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
++ typeof(f) __f3 = (f); \
++ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
++ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f4) ? f4(__VA_ARGS__) : \
+- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
++ typeof(f) __f4 = (f); \
++ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
++ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
+ })
+
+ #define INDIRECT_CALLABLE_DECLARE(f) f
+--
+2.51.0
+
--- /dev/null
+From a4a8ed327633bc5c70519cf557170acfcbd21de9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 11:45:48 -0800
+Subject: ipv6: fix NULL pointer deref in ip6_rt_get_dev_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2ffb4f5c2ccb2fa1c049dd11899aee7967deef5a ]
+
+l3mdev_master_dev_rcu() can return NULL when the slave device is being
+un-slaved from a VRF. All other callers deal with this, but we lost
+the fallback to loopback in ip6_rt_pcpu_alloc() -> ip6_rt_get_dev_rcu()
+with commit 4832c30d5458 ("net: ipv6: put host and anycast routes on
+device with address").
+
+ KASAN: null-ptr-deref in range [0x0000000000000108-0x000000000000010f]
+ RIP: 0010:ip6_rt_pcpu_alloc (net/ipv6/route.c:1418)
+ Call Trace:
+ ip6_pol_route (net/ipv6/route.c:2318)
+ fib6_rule_lookup (net/ipv6/fib6_rules.c:115)
+ ip6_route_output_flags (net/ipv6/route.c:2607)
+ vrf_process_v6_outbound (drivers/net/vrf.c:437)
+
+I was tempted to rework the un-slaving code to clear the flag first
+and insert synchronize_rcu() before we remove the upper. But looks like
+the explicit fallback to loopback_dev is an established pattern.
+And I guess avoiding the synchronize_rcu() is nice, too.
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260301194548.927324-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index f30a5b7d93f4d..05e2ea8b269df 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1018,7 +1018,8 @@ static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
+ */
+ if (netif_is_l3_slave(dev) &&
+ !rt6_need_strict(&res->f6i->fib6_dst.addr))
+- dev = l3mdev_master_dev_rcu(dev);
++ dev = l3mdev_master_dev_rcu(dev) ? :
++ dev_net(dev)->loopback_dev;
+ else if (!netif_is_l3_master(dev))
+ dev = dev_net(dev)->loopback_dev;
+ /* last case is netif_is_l3_master(dev) is true in which
+--
+2.51.0
+
--- /dev/null
+From 789f5be73c826b17d10bf00a4fd5c79176cd4832 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:56 +0100
+Subject: net: bridge: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit e5e890630533bdc15b26a34bb8e7ef539bdf1322 ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. Then, if neigh_suppress is enabled and an ICMPv6
+Neighbor Discovery packet reaches the bridge, br_do_suppress_nd() will
+dereference ipv6_stub->nd_tbl which is NULL, passing it to
+neigh_lookup(). This causes a kernel NULL pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000268
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x16/0xe0
+ [...]
+ Call Trace:
+ <IRQ>
+ ? neigh_lookup+0x16/0xe0
+ br_do_suppress_nd+0x160/0x290 [bridge]
+ br_handle_frame_finish+0x500/0x620 [bridge]
+ br_handle_frame+0x353/0x440 [bridge]
+ __netif_receive_skb_core.constprop.0+0x298/0x1110
+ __netif_receive_skb_one_core+0x3d/0xa0
+ process_backlog+0xa0/0x140
+ __napi_poll+0x2c/0x170
+ net_rx_action+0x2c4/0x3a0
+ handle_softirqs+0xd0/0x270
+ do_softirq+0x3f/0x60
+
+Fix this by replacing IS_ENABLED(IPV6) call with ipv6_mod_enabled() in
+the callers. This is in essence disabling NS/NA suppression when IPv6 is
+disabled.
+
+Fixes: ed842faeb2bd ("bridge: suppress nd pkts on BR_NEIGH_SUPPRESS ports")
+Reported-by: Guruprasad C P <gurucp2005@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAHXs0ORzd62QOG-Fttqa2Cx_A_VFp=utE2H2VTX5nqfgs7LDxQ@mail.gmail.com/
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260304120357.9778-1-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_device.c | 2 +-
+ net/bridge/br_input.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index b2fa4ca281021..4886be8970a88 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -70,7 +70,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ br_do_proxy_suppress_arp(skb, br, vid, NULL);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 14423132a3df5..a66df464f8562 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -130,7 +130,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ (skb->protocol == htons(ETH_P_ARP) ||
+ skb->protocol == htons(ETH_P_RARP))) {
+ br_do_proxy_suppress_arp(skb, br, vid, p);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+--
+2.51.0
+
--- /dev/null
+From 12e1221e2780268706973f419864e2414157f202 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:11 +0200
+Subject: net: dpaa2: replace dpaa2_mac_is_type_fixed() with
+ dpaa2_mac_is_type_phy()
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 320fefa9e2edc67011e235ea1d50f0d00ddfe004 ]
+
+dpaa2_mac_is_type_fixed() is a header with no implementation and no
+callers, which is referenced from the documentation though. It can be
+deleted.
+
+On the other hand, it would be useful to reuse the code between
+dpaa2_eth_is_type_phy() and dpaa2_switch_port_is_type_phy(). That common
+code should be called dpaa2_mac_is_type_phy(), so let's create that.
+
+The removal and the addition are merged into the same patch because,
+in fact, is_type_phy() is the logical opposite of is_type_fixed().
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/freescale/dpaa2/mac-phy-support.rst | 9 ++++++---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 7 +------
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h | 10 ++++++++--
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h | 7 +------
+ 4 files changed, 16 insertions(+), 17 deletions(-)
+
+diff --git a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
+index 51e6624fb7741..1d2f55feca242 100644
+--- a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
++++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
+@@ -181,10 +181,13 @@ when necessary using the below listed API::
+ - int dpaa2_mac_connect(struct dpaa2_mac *mac);
+ - void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+-A phylink integration is necessary only when the partner DPMAC is not of TYPE_FIXED.
+-One can check for this condition using the below API::
++A phylink integration is necessary only when the partner DPMAC is not of
++``TYPE_FIXED``. This means it is either of ``TYPE_PHY``, or of
++``TYPE_BACKPLANE`` (the difference being the two that in the ``TYPE_BACKPLANE``
++mode, the MC firmware does not access the PCS registers). One can check for
++this condition using the following helper::
+
+- - bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,struct fsl_mc_io *mc_io);
++ - static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac);
+
+ Before connection to a MAC, the caller must allocate and populate the
+ dpaa2_mac structure with the associated net_device, a pointer to the MC portal
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index 805e5619e1e63..f388acc434987 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -711,12 +711,7 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
+
+ static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+ {
+- if (priv->mac &&
+- (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+- priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+- return true;
+-
+- return false;
++ return dpaa2_mac_is_type_phy(priv->mac);
+ }
+
+ static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+index 7842cbb2207ab..0b2fc22f11909 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+@@ -27,8 +27,14 @@ struct dpaa2_mac {
+ struct fwnode_handle *fw_node;
+ };
+
+-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+- struct fsl_mc_io *mc_io);
++static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac)
++{
++ if (!mac)
++ return false;
++
++ return mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
++ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE;
++}
+
+ int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+index 0002dca4d4177..9898073abe012 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+@@ -230,12 +230,7 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
+ static inline bool
+ dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
+ {
+- if (port_priv->mac &&
+- (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+- port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+- return true;
+-
+- return false;
++ return dpaa2_mac_is_type_phy(port_priv->mac);
+ }
+
+ static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
+--
+2.51.0
+
--- /dev/null
+From b13ad2b18fef3ce1d9d21ecbcca5a956ed05fc5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:15 +0200
+Subject: net: dpaa2-switch: assign port_priv->mac after dpaa2_mac_connect()
+ call
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 88d64367cea019fa6197d0d97a85ac90279919b7 ]
+
+The dpaa2-switch has the exact same locking requirements when connected
+to a DPMAC, so it needs port_priv->mac to always point either to NULL,
+or to a DPMAC with a fully initialized phylink instance.
+
+Make the same preparatory change in the dpaa2-switch driver as in the
+dpaa2-eth one.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/freescale/dpaa2/dpaa2-switch.c | 21 +++++++++++--------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 147e53c0552f8..a2812229511c3 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1456,9 +1456,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
+- port_priv->mac = mac;
+
+- if (dpaa2_switch_port_is_type_phy(port_priv)) {
++ if (dpaa2_mac_is_type_phy(mac)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(port_priv->netdev,
+@@ -1468,11 +1467,12 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ }
+ }
+
++ port_priv->mac = mac;
++
+ return 0;
+
+ err_close_mac:
+ dpaa2_mac_close(mac);
+- port_priv->mac = NULL;
+ err_free_mac:
+ kfree(mac);
+ out_put_device:
+@@ -1482,15 +1482,18 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+
+ static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
+ {
+- if (dpaa2_switch_port_is_type_phy(port_priv))
+- dpaa2_mac_disconnect(port_priv->mac);
++ struct dpaa2_mac *mac = port_priv->mac;
+
+- if (!dpaa2_switch_port_has_mac(port_priv))
++ port_priv->mac = NULL;
++
++ if (!mac)
+ return;
+
+- dpaa2_mac_close(port_priv->mac);
+- kfree(port_priv->mac);
+- port_priv->mac = NULL;
++ if (dpaa2_mac_is_type_phy(mac))
++ dpaa2_mac_disconnect(mac);
++
++ dpaa2_mac_close(mac);
++ kfree(mac);
+ }
+
+ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+--
+2.51.0
+
--- /dev/null
+From 79873fe119be47be52dae29c00572a26629b8796 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:17 +0200
+Subject: net: dpaa2-switch replace direct MAC access with
+ dpaa2_switch_port_has_mac()
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit bc230671bfb25c2d3c225f674fe6c03cea88d22e ]
+
+The helper function will gain a lockdep annotation in a future patch.
+Make sure to benefit from it.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+index 720c9230cab57..0b41a945e0fff 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+@@ -196,7 +196,7 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ dpaa2_switch_ethtool_counters[i].name, err);
+ }
+
+- if (port_priv->mac)
++ if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 5c1e00d0a8a87029c3f3d72e5a2ff304626dd986 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:20 +0200
+Subject: net: dpaa2-switch: serialize changes to priv->mac with a mutex
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 3c7f44fa9c4c8a9154935ca49e4cf45c14240335 ]
+
+The dpaa2-switch driver uses a DPMAC in the same way as the dpaa2-eth
+driver, so we need to duplicate the locking solution established by the
+previous change to the switch driver as well.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../freescale/dpaa2/dpaa2-switch-ethtool.c | 32 +++++++++++++++----
+ .../ethernet/freescale/dpaa2/dpaa2-switch.c | 31 ++++++++++++++++--
+ .../ethernet/freescale/dpaa2/dpaa2-switch.h | 2 ++
+ 3 files changed, 55 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+index 0b41a945e0fff..dc9f4ad8a061d 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+@@ -60,11 +60,18 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev,
+ {
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state = {0};
+- int err = 0;
++ int err;
++
++ mutex_lock(&port_priv->mac_lock);
+
+- if (dpaa2_switch_port_is_type_phy(port_priv))
+- return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+- link_ksettings);
++ if (dpaa2_switch_port_is_type_phy(port_priv)) {
++ err = phylink_ethtool_ksettings_get(port_priv->mac->phylink,
++ link_ksettings);
++ mutex_unlock(&port_priv->mac_lock);
++ return err;
++ }
++
++ mutex_unlock(&port_priv->mac_lock);
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+@@ -99,9 +106,16 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
+ bool if_running;
+ int err = 0, ret;
+
+- if (dpaa2_switch_port_is_type_phy(port_priv))
+- return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+- link_ksettings);
++ mutex_lock(&port_priv->mac_lock);
++
++ if (dpaa2_switch_port_is_type_phy(port_priv)) {
++ err = phylink_ethtool_ksettings_set(port_priv->mac->phylink,
++ link_ksettings);
++ mutex_unlock(&port_priv->mac_lock);
++ return err;
++ }
++
++ mutex_unlock(&port_priv->mac_lock);
+
+ /* Interface needs to be down to change link settings */
+ if_running = netif_running(netdev);
+@@ -196,8 +210,12 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ dpaa2_switch_ethtool_counters[i].name, err);
+ }
+
++ mutex_lock(&port_priv->mac_lock);
++
+ if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
++
++ mutex_unlock(&port_priv->mac_lock);
+ }
+
+ const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index a2812229511c3..92500e55ab931 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -602,8 +602,11 @@ static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
+
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
++ * We can avoid locking because we are called from the "link changed"
++ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
++ * (the writer to port_priv->mac), so we cannot race with it.
+ */
+- if (dpaa2_switch_port_is_type_phy(port_priv))
++ if (dpaa2_mac_is_type_phy(port_priv->mac))
+ return 0;
+
+ /* Interrupts are received even though no one issued an 'ifconfig up'
+@@ -683,6 +686,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
++ mutex_lock(&port_priv->mac_lock);
++
+ if (!dpaa2_switch_port_is_type_phy(port_priv)) {
+ /* Explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+@@ -696,6 +701,7 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
++ mutex_unlock(&port_priv->mac_lock);
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+@@ -705,6 +711,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
+ if (dpaa2_switch_port_is_type_phy(port_priv))
+ phylink_start(port_priv->mac->phylink);
+
++ mutex_unlock(&port_priv->mac_lock);
++
+ return 0;
+ }
+
+@@ -714,6 +722,8 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
++ mutex_lock(&port_priv->mac_lock);
++
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ phylink_stop(port_priv->mac->phylink);
+ } else {
+@@ -721,6 +731,8 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
+ netif_carrier_off(netdev);
+ }
+
++ mutex_unlock(&port_priv->mac_lock);
++
+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+@@ -1467,7 +1479,9 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ }
+ }
+
++ mutex_lock(&port_priv->mac_lock);
+ port_priv->mac = mac;
++ mutex_unlock(&port_priv->mac_lock);
+
+ return 0;
+
+@@ -1482,9 +1496,12 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+
+ static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
+ {
+- struct dpaa2_mac *mac = port_priv->mac;
++ struct dpaa2_mac *mac;
+
++ mutex_lock(&port_priv->mac_lock);
++ mac = port_priv->mac;
+ port_priv->mac = NULL;
++ mutex_unlock(&port_priv->mac_lock);
+
+ if (!mac)
+ return;
+@@ -1503,6 +1520,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ struct ethsw_port_priv *port_priv;
+ u32 status = ~0;
+ int err, if_id;
++ bool had_mac;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+@@ -1525,7 +1543,12 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+
+ if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
+ rtnl_lock();
+- if (dpaa2_switch_port_has_mac(port_priv))
++ /* We can avoid locking because the "endpoint changed" IRQ
++ * handler is the only one who changes priv->mac at runtime,
++ * so we are not racing with anyone.
++ */
++ had_mac = !!port_priv->mac;
++ if (had_mac)
+ dpaa2_switch_port_disconnect_mac(port_priv);
+ else
+ dpaa2_switch_port_connect_mac(port_priv);
+@@ -3281,6 +3304,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
+ port_priv->netdev = port_netdev;
+ port_priv->ethsw_data = ethsw;
+
++ mutex_init(&port_priv->mac_lock);
++
+ port_priv->idx = port_idx;
+ port_priv->stp_state = BR_STATE_FORWARDING;
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+index 9898073abe012..42b3ca73f55d5 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+@@ -161,6 +161,8 @@ struct ethsw_port_priv {
+
+ struct dpaa2_switch_filter_block *filter_block;
+ struct dpaa2_mac *mac;
++ /* Protects against changes to port_priv->mac */
++ struct mutex mac_lock;
+ };
+
+ /* Switch data */
+--
+2.51.0
+
--- /dev/null
+From fadcb14f713492dd3eb8fee779b2b2cfd2bd6bec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 23:43:59 +0530
+Subject: net: ethernet: ti: am65-cpsw-nuss/cpsw-ale: Fix multicast entry
+ handling in ALE table
+
+From: Chintan Vankar <c-vankar@ti.com>
+
+[ Upstream commit be11a537224d72b906db6b98510619770298c8a4 ]
+
+In the current implementation, flushing multicast entries in MAC mode
+incorrectly deletes entries for all ports instead of only the target port,
+disrupting multicast traffic on other ports. The cause is adding multicast
+entries by setting only host port bit, and not setting the MAC port bits.
+
+Fix this by setting the MAC port's bit in the port mask while adding the
+multicast entry. Also fix the flush logic to preserve the host port bit
+during removal of MAC port and free ALE entries when mask contains only
+host port.
+
+Fixes: 5c50a856d550 ("drivers: net: ethernet: cpsw: add multicast address to ALE table")
+Signed-off-by: Chintan Vankar <c-vankar@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260224181359.2055322-1-c-vankar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ drivers/net/ethernet/ti/cpsw_ale.c | 9 ++++-----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index c2700fcfc10e2..7c2202225475e 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -320,7 +320,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+- port_mask = ALE_PORT_HOST;
++ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 348a05454fcaa..e9e8253ecea5a 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -420,14 +420,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+- mask &= ~port_mask;
++ mask &= (~port_mask | ALE_PORT_HOST);
+
+- /* free if only remaining port is host port */
+- if (mask)
++ if (mask == 0x0 || mask == ALE_PORT_HOST)
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+- else
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+--
+2.51.0
+
--- /dev/null
+From f460ab24b6bf0ba6bf69fe1ca3d5c849382ec943 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 19:38:13 +0800
+Subject: net: ipv6: fix panic when IPv4 route references loopback IPv6 nexthop
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 21ec92774d1536f71bdc90b0e3d052eff99cf093 ]
+
+When a standalone IPv6 nexthop object is created with a loopback device
+(e.g., "ip -6 nexthop add id 100 dev lo"), fib6_nh_init() misclassifies
+it as a reject route. This is because nexthop objects have no destination
+prefix (fc_dst=::), causing fib6_is_reject() to match any loopback
+nexthop. The reject path skips fib_nh_common_init(), leaving
+nhc_pcpu_rth_output unallocated. If an IPv4 route later references this
+nexthop, __mkroute_output() dereferences NULL nhc_pcpu_rth_output and
+panics.
+
+Simplify the check in fib6_nh_init() to only match explicit reject
+routes (RTF_REJECT) instead of using fib6_is_reject(). The loopback
+promotion heuristic in fib6_is_reject() is handled separately by
+ip6_route_info_create_nh(). After this change, the three cases behave
+as follows:
+
+1. Explicit reject route ("ip -6 route add unreachable 2001:db8::/64"):
+ RTF_REJECT is set, enters reject path, skips fib_nh_common_init().
+ No behavior change.
+
+2. Implicit loopback reject route ("ip -6 route add 2001:db8::/32 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. ip6_route_info_create_nh() still promotes it to reject
+ afterward. nhc_pcpu_rth_output is allocated but unused, which is
+ harmless.
+
+3. Standalone nexthop object ("ip -6 nexthop add id 100 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. nhc_pcpu_rth_output is properly allocated, fixing the crash
+ when IPv4 routes reference this nexthop.
+
+Suggested-by: Ido Schimmel <idosch@nvidia.com>
+Fixes: 493ced1ac47c ("ipv4: Allow routes to use nexthop objects")
+Reported-by: syzbot+334190e097a98a1b81bb@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698f8482.a70a0220.2c38d7.00ca.GAE@google.com/T/
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260304113817.294966-2-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 05e2ea8b269df..52e8e77df69a1 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3519,7 +3519,6 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ {
+ struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
+- int addr_type;
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
+@@ -3560,11 +3559,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+
+ fib6_nh->fib_nh_weight = 1;
+
+- /* We cannot add true routes via loopback here,
+- * they would result in kernel looping; promote them to reject routes
++ /* Reset the nexthop device to the loopback device in case of reject
++ * routes.
+ */
+- addr_type = ipv6_addr_type(&cfg->fc_dst);
+- if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
++ if (cfg->fc_flags & RTF_REJECT) {
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != net->loopback_dev) {
+ if (dev) {
+--
+2.51.0
+
--- /dev/null
+From 8d28ddd0a03f2e3c0cc6c01421290a7c89e97672 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 18:32:37 +0200
+Subject: net: nfc: nci: Fix zero-length proprietary notifications
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit f7d92f11bd33a6eb49c7c812255ef4ab13681f0f ]
+
+NCI NFC controllers may have proprietary OIDs with zero-length payload.
+One example is: drivers/nfc/nxp-nci/core.c, NXP_NCI_RF_TXLDO_ERROR_NTF.
+
+Allow a zero length payload in proprietary notifications *only*.
+
+Before:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+-- >8 --
+
+After:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x23, plen=0
+kernel: nci: nci_ntf_packet: unknown ntf opcode 0x123
+kernel: nfc nfc0: NFC: RF transmitter couldn't start. Bad power and/or configuration?
+-- >8 --
+
+After fixing the hardware:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 27
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x5, plen=24
+kernel: nci: nci_rf_intf_activated_ntf_packet: rf_discovery_id 1
+-- >8 --
+
+Fixes: d24b03535e5e ("nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260302163238.140576-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index c26914ca40aff..4f1f56e264730 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1474,10 +1474,20 @@ static bool nci_valid_size(struct sk_buff *skb)
+ BUILD_BUG_ON(NCI_CTRL_HDR_SIZE != NCI_DATA_HDR_SIZE);
+
+ if (skb->len < hdr_size ||
+- !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
++
++ if (!nci_plen(skb->data)) {
++ /* Allow zero length in proprietary notifications (0x20 - 0x3F). */
++ if (nci_opcode_oid(nci_opcode(skb->data)) >= 0x20 &&
++ nci_mt(skb->data) == NCI_MT_NTF_PKT)
++ return true;
++
++ /* Disallow zero length otherwise. */
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 778240926dddf6fa452cc1dbd98e27901a2f4164 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Feb 2026 23:53:07 +0900
+Subject: net: sched: avoid qdisc_reset_all_tx_gt() vs dequeue race for
+ lockless qdiscs
+
+From: Koichiro Den <den@valinux.co.jp>
+
+[ Upstream commit 7f083faf59d14c04e01ec05a7507f036c965acf8 ]
+
+When shrinking the number of real tx queues,
+netif_set_real_num_tx_queues() calls qdisc_reset_all_tx_gt() to flush
+qdiscs for queues which will no longer be used.
+
+qdisc_reset_all_tx_gt() currently serializes qdisc_reset() with
+qdisc_lock(). However, for lockless qdiscs, the dequeue path is
+serialized by qdisc_run_begin/end() using qdisc->seqlock instead, so
+qdisc_reset() can run concurrently with __qdisc_run() and free skbs
+while they are still being dequeued, leading to UAF.
+
+This can easily be reproduced on e.g. virtio-net by imposing heavy
+traffic while frequently changing the number of queue pairs:
+
+ iperf3 -ub0 -c $peer -t 0 &
+ while :; do
+ ethtool -L eth0 combined 1
+ ethtool -L eth0 combined 2
+ done
+
+With KASAN enabled, this leads to reports like:
+
+ BUG: KASAN: slab-use-after-free in __qdisc_run+0x133f/0x1760
+ ...
+ Call Trace:
+ <TASK>
+ ...
+ __qdisc_run+0x133f/0x1760
+ __dev_queue_xmit+0x248f/0x3550
+ ip_finish_output2+0xa42/0x2110
+ ip_output+0x1a7/0x410
+ ip_send_skb+0x2e6/0x480
+ udp_send_skb+0xb0a/0x1590
+ udp_sendmsg+0x13c9/0x1fc0
+ ...
+ </TASK>
+
+ Allocated by task 1270 on cpu 5 at 44.558414s:
+ ...
+ alloc_skb_with_frags+0x84/0x7c0
+ sock_alloc_send_pskb+0x69a/0x830
+ __ip_append_data+0x1b86/0x48c0
+ ip_make_skb+0x1e8/0x2b0
+ udp_sendmsg+0x13a6/0x1fc0
+ ...
+
+ Freed by task 1306 on cpu 3 at 44.558445s:
+ ...
+ kmem_cache_free+0x117/0x5e0
+ pfifo_fast_reset+0x14d/0x580
+ qdisc_reset+0x9e/0x5f0
+ netif_set_real_num_tx_queues+0x303/0x840
+ virtnet_set_channels+0x1bf/0x260 [virtio_net]
+ ethnl_set_channels+0x684/0xae0
+ ethnl_default_set_doit+0x31a/0x890
+ ...
+
+Serialize qdisc_reset_all_tx_gt() against the lockless dequeue path by
+taking qdisc->seqlock for TCQ_F_NOLOCK qdiscs, matching the
+serialization model already used by dev_reset_queue().
+
+Additionally clear QDISC_STATE_NON_EMPTY after reset so the qdisc state
+reflects an empty queue, avoiding needless re-scheduling.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Signed-off-by: Koichiro Den <den@valinux.co.jp>
+Link: https://patch.msgid.link/20260228145307.3955532-1-den@valinux.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 55127305478df..dd6203f3f0a5e 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -742,13 +742,23 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+ {
+ struct Qdisc *qdisc;
++ bool nolock;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ if (qdisc) {
++ nolock = qdisc->flags & TCQ_F_NOLOCK;
++
++ if (nolock)
++ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
++ if (nolock) {
++ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
++ spin_unlock_bh(&qdisc->seqlock);
++ }
+ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 80635668b54813ccb133844c41a1cc538d79b316 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:25 +0000
+Subject: net: stmmac: Fix error handling in VLAN add and delete paths
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 35dfedce442c4060cfe5b98368bc9643fb995716 ]
+
+stmmac_vlan_rx_add_vid() updates active_vlans and the VLAN hash
+register before writing the HW filter entry. If the filter write
+fails, it leaves a stale VID in active_vlans and the hash register.
+
+stmmac_vlan_rx_kill_vid() has the reverse problem: it clears
+active_vlans before removing the HW filter. On failure, the VID is
+gone from active_vlans but still present in the HW filter table.
+
+To fix this, reorder the operations to update the hash table first,
+then attempt the HW filter operation. If the HW filter fails, roll
+back both the active_vlans bitmap and the hash table by calling
+stmmac_vlan_update() again.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-2-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index e056b512c1277..70e941650b425 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6281,9 +6281,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ clear_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto err_pm_put;
++ }
+ }
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6306,15 +6310,21 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
++ ret = stmmac_vlan_update(priv, is_double);
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ goto del_vlan_error;
++ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto del_vlan_error;
++ }
+ }
+
+- ret = stmmac_vlan_update(priv, is_double);
+-
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+--
+2.51.0
+
--- /dev/null
+From 007f9756a3d7fbf7195e0824fa230576d45b961c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:57 +0100
+Subject: net: vxlan: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit 168ff39e4758897d2eee4756977d036d52884c7e ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. If an IPv6 packet is injected into the interface,
+route_shortcircuit() is called and a NULL pointer dereference happens on
+neigh_lookup().
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000380
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x20/0x270
+ [...]
+ Call Trace:
+ <TASK>
+ vxlan_xmit+0x638/0x1ef0 [vxlan]
+ dev_hard_start_xmit+0x9e/0x2e0
+ __dev_queue_xmit+0xbee/0x14e0
+ packet_sendmsg+0x116f/0x1930
+ __sys_sendto+0x1f5/0x200
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x12f/0x1590
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by adding an early check on route_shortcircuit() when protocol
+is ETH_P_IPV6. Note that ipv6_mod_enabled() cannot be used here because
+VXLAN can be built-in even when IPv6 is built as a module.
+
+Fixes: e15a00aafa4b ("vxlan: add ipv6 route short circuit support")
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Link: https://patch.msgid.link/20260304120357.9778-2-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 91122d4d404b7..934a2f02a6c6a 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2259,6 +2259,11 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct ipv6hdr *pip6;
+
++ /* check if nd_tbl is not initiliazed due to
++ * ipv6.disable=1 set during boot
++ */
++ if (!ipv6_stub->nd_tbl)
++ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+--
+2.51.0
+
--- /dev/null
+From cff1a88742bc6f120eab0c8653af0d571718a7b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:44 -0800
+Subject: nfc: nci: clear NCI_DATA_EXCHANGE before calling completion callback
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0efdc02f4f6d52f8ca5d5889560f325a836ce0a8 ]
+
+Move clear_bit(NCI_DATA_EXCHANGE) before invoking the data exchange
+callback in nci_data_exchange_complete().
+
+The callback (e.g. rawsock_data_exchange_complete) may immediately
+schedule another data exchange via schedule_work(tx_work). On a
+multi-CPU system, tx_work can run and reach nci_transceive() before
+the current nci_data_exchange_complete() clears the flag, causing
+test_and_set_bit(NCI_DATA_EXCHANGE) to return -EBUSY and the new
+transfer to fail.
+
+This causes intermittent flakes in nci/nci_dev in NIPA:
+
+ # # RUN NCI.NCI1_0.t4t_tag_read ...
+ # # t4t_tag_read: Test terminated by timeout
+ # # FAIL NCI.NCI1_0.t4t_tag_read
+ # not ok 3 NCI.NCI1_0.t4t_tag_read
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-5-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 3d36ea5701f02..7a3fb2a397a1e 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -33,7 +33,8 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+- goto exit;
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++ return;
+ }
+
+ cb = conn_info->data_exchange_cb;
+@@ -45,6 +46,12 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ del_timer_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
++ /* Mark the exchange as done before calling the callback.
++ * The callback (e.g. rawsock_data_exchange_complete) may
++ * want to immediately queue another data exchange.
++ */
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++
+ if (cb) {
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+@@ -54,9 +61,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+-
+-exit:
+- clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+ }
+
+ /* ----------------- NCI TX Data ----------------- */
+--
+2.51.0
+
--- /dev/null
+From 9ac06577533e062193f3fd00c0abaeaaabaacd70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:41 -0800
+Subject: nfc: nci: free skb on nci_transceive early error paths
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7bd4b0c4779f978a6528c9b7937d2ca18e936e2c ]
+
+nci_transceive() takes ownership of the skb passed by the caller,
+but the -EPROTO, -EINVAL, and -EBUSY error paths return without
+freeing it.
+
+Due to issues clearing NCI_DATA_EXCHANGE fixed by subsequent changes
+the nci/nci_dev selftest hits the error path occasionally in NIPA,
+and kmemleak detects leaks:
+
+unreferenced object 0xff11000015ce6a40 (size 640):
+ comm "nci_dev", pid 3954, jiffies 4295441246
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 00 a4 00 0c 02 e1 03 6b 6b 6b 6b 6b kkkk.......kkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ backtrace (crc 7c40cc2a):
+ kmem_cache_alloc_node_noprof+0x492/0x630
+ __alloc_skb+0x11e/0x5f0
+ alloc_skb_with_frags+0xc6/0x8f0
+ sock_alloc_send_pskb+0x326/0x3f0
+ nfc_alloc_send_skb+0x94/0x1d0
+ rawsock_sendmsg+0x162/0x4c0
+ do_syscall_64+0x117/0xfc0
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 4f1f56e264730..ca9543be400c4 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1027,18 +1027,23 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+- if (!conn_info)
++ if (!conn_info) {
++ kfree_skb(skb);
+ return -EPROTO;
++ }
+
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ pr_err("unable to exchange data, no active target\n");
++ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+- if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) {
++ kfree_skb(skb);
+ return -EBUSY;
++ }
+
+ /* store cb and context to be used on receiving data */
+ conn_info->data_exchange_cb = cb;
+--
+2.51.0
+
--- /dev/null
+From 20ede8c1a9ddbdecea141e1c6c5b8f2edc81b5d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:45 -0800
+Subject: nfc: rawsock: cancel tx_work before socket teardown
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit d793458c45df2aed498d7f74145eab7ee22d25aa ]
+
+In rawsock_release(), cancel any pending tx_work and purge the write
+queue before orphaning the socket. rawsock_tx_work runs on the system
+workqueue and calls nfc_data_exchange which dereferences the NCI
+device. Without synchronization, tx_work can race with socket and
+device teardown when a process is killed (e.g. by SIGKILL), leading
+to use-after-free or leaked references.
+
+Set SEND_SHUTDOWN first so that if tx_work is already running it will
+see the flag and skip transmitting, then use cancel_work_sync to wait
+for any in-progress execution to finish, and finally purge any
+remaining queued skbs.
+
+Fixes: 23b7869c0fd0 ("NFC: add the NFC socket raw protocol")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-6-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/rawsock.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 0ca214ab5aeff..23d52b8d6363e 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -66,6 +66,17 @@ static int rawsock_release(struct socket *sock)
+ if (sock->type == SOCK_RAW)
+ nfc_sock_unlink(&raw_sk_list, sk);
+
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* Prevent rawsock_tx_work from starting new transmits and
++ * wait for any in-progress work to finish. This must happen
++ * before the socket is orphaned to avoid a race where
++ * rawsock_tx_work runs after the NCI device has been freed.
++ */
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ cancel_work_sync(&nfc_rawsock(sk)->tx_work);
++ rawsock_write_queue_purge(sk);
++ }
++
+ sock_orphan(sk);
+ sock_put(sk);
+
+--
+2.51.0
+
--- /dev/null
+From 5bf9ee1ae9db3d886a76f3e24e47a98f1e2f75db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 01:01:29 +0000
+Subject: platform/x86: thinkpad_acpi: Fix errors reading battery thresholds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Teh <jonathan.teh@outlook.com>
+
+[ Upstream commit 53e977b1d50c46f2c4ec3865cd13a822f58ad3cd ]
+
+Check whether the battery supports the relevant charge threshold before
+reading the value to silence these errors:
+
+thinkpad_acpi: acpi_evalf(BCTG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCTG: evaluate failed
+thinkpad_acpi: acpi_evalf(BCSG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCSG: evaluate failed
+
+when reading the charge thresholds via sysfs on platforms that do not
+support them such as the ThinkPad T400.
+
+Fixes: 2801b9683f74 ("thinkpad_acpi: Add support for battery thresholds")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=202619
+Signed-off-by: Jonathan Teh <jonathan.teh@outlook.com>
+Reviewed-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://patch.msgid.link/MI0P293MB01967B206E1CA6F337EBFB12926CA@MI0P293MB0196.ITAP293.PROD.OUTLOOK.COM
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 89a8e074c16d0..43a4851d2de61 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9460,14 +9460,16 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
+ {
+ switch (what) {
+ case THRESHOLD_START:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
++ if (!battery_info.batteries[battery].start_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery)))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
++ if (!battery_info.batteries[battery].stop_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery)))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+--
+2.51.0
+
drbd-fix-logic-bug-in-drbd_al_begin_io_nonblock.patch
scsi-core-fix-refcount-leak-for-tagset_refcnt.patch
selftests-mptcp-more-stable-simult_flows-tests.patch
+platform-x86-thinkpad_acpi-fix-errors-reading-batter.patch
+net-ethernet-ti-am65-cpsw-nuss-cpsw-ale-fix-multicas.patch
+net-dpaa2-replace-dpaa2_mac_is_type_fixed-with-dpaa2.patch
+net-dpaa2-switch-assign-port_priv-mac-after-dpaa2_ma.patch
+net-dpaa2-switch-replace-direct-mac-access-with-dpaa.patch
+net-dpaa2-switch-serialize-changes-to-priv-mac-with-.patch
+dpaa2-switch-do-not-clear-any-interrupts-automatical.patch
+dpaa2-switch-fix-interrupt-storm-after-receiving-bad.patch
+atm-lec-fix-null-ptr-deref-in-lec_arp_clear_vccs.patch
+can-bcm-fix-locking-for-bcm_op-runtime-updates.patch
+can-mcp251x-fix-deadlock-in-error-path-of-mcp251x_op.patch
+wifi-cw1200-fix-locking-in-error-paths.patch
+wifi-wlcore-fix-a-locking-bug.patch
+indirect_call_wrapper-do-not-reevaluate-function-poi.patch
+xen-acpi-processor-fix-_cst-detection-using-undersiz.patch
+ipv6-fix-null-pointer-deref-in-ip6_rt_get_dev_rcu.patch
+amd-xgbe-fix-sleep-while-atomic-on-suspend-resume.patch
+net-sched-avoid-qdisc_reset_all_tx_gt-vs-dequeue-rac.patch
+net-nfc-nci-fix-zero-length-proprietary-notification.patch
+nfc-nci-free-skb-on-nci_transceive-early-error-paths.patch
+nfc-nci-clear-nci_data_exchange-before-calling-compl.patch
+nfc-rawsock-cancel-tx_work-before-socket-teardown.patch
+net-stmmac-fix-error-handling-in-vlan-add-and-delete.patch
+net-bridge-fix-nd_tbl-null-dereference-when-ipv6-is-.patch
+net-vxlan-fix-nd_tbl-null-dereference-when-ipv6-is-d.patch
+net-ipv6-fix-panic-when-ipv4-route-references-loopba.patch
--- /dev/null
+From 6f3d390b3e6d38824ba9ff190714766af6d2050b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:24 -0800
+Subject: wifi: cw1200: Fix locking in error paths
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit d98c24617a831e92e7224a07dcaed2dd0b02af96 ]
+
+cw1200_wow_suspend() must only return with priv->conf_mutex locked if it
+returns zero. This mutex must be unlocked if an error is returned. Add
+mutex_unlock() calls to the error paths from which that call is missing.
+This has been detected by the Clang thread-safety analyzer.
+
+Fixes: a910e4a94f69 ("cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-25-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/st/cw1200/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
+index a20ab577a3644..212b6f2af8de4 100644
+--- a/drivers/net/wireless/st/cw1200/pm.c
++++ b/drivers/net/wireless/st/cw1200/pm.c
+@@ -264,12 +264,14 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EAGAIN;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 5c22e79007dcf60a6b182bf0b99401aea751eab2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:25 -0800
+Subject: wifi: wlcore: Fix a locking bug
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 72c6df8f284b3a49812ce2ac136727ace70acc7c ]
+
+Make sure that wl->mutex is locked before it is unlocked. This has been
+detected by the Clang thread-safety analyzer.
+
+Fixes: 45aa7f071b06 ("wlcore: Use generic runtime pm calls for wowlan elp configuration")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-26-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 7d664380c4771..5842fe6bfb855 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -1813,6 +1813,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
+
++ mutex_lock(&wl->mutex);
++
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+@@ -1829,8 +1831,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+- mutex_lock(&wl->mutex);
+-
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+--
+2.51.0
+
--- /dev/null
+From e43b466fc1e72ed71bb3d1cba6baeb0ca9076799 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 09:37:11 +0000
+Subject: xen/acpi-processor: fix _CST detection using undersized evaluation
+ buffer
+
+From: David Thomson <dt@linux-mail.net>
+
+[ Upstream commit 8b57227d59a86fc06d4f09de08f98133680f2cae ]
+
+read_acpi_id() attempts to evaluate _CST using a stack buffer of
+sizeof(union acpi_object) (48 bytes), but _CST returns a nested Package
+of sub-Packages (one per C-state, each containing a register descriptor,
+type, latency, and power) requiring hundreds of bytes. The evaluation
+always fails with AE_BUFFER_OVERFLOW.
+
+On modern systems using FFH/MWAIT entry (where pblk is zero), this
+causes the function to return before setting the acpi_id_cst_present
+bit. In check_acpi_ids(), flags.power is then zero for all Phase 2 CPUs
+(physical CPUs beyond dom0's vCPU count), so push_cxx_to_hypervisor() is
+never called for them.
+
+On a system with dom0_max_vcpus=2 and 8 physical CPUs, only PCPUs 0-1
+receive C-state data. PCPUs 2-7 are stuck in C0/C1 idle, unable to
+enter C2/C3. This costs measurable wall power (4W observed on an Intel
+Core Ultra 7 265K with Xen 4.20).
+
+The function never uses the _CST return value -- it only needs to know
+whether _CST exists. Replace the broken acpi_evaluate_object() call with
+acpi_has_method(), which correctly detects _CST presence using
+acpi_get_handle() without any buffer allocation. This brings C-state
+detection to parity with the P-state path, which already works correctly
+for Phase 2 CPUs.
+
+Fixes: 59a568029181 ("xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.")
+Signed-off-by: David Thomson <dt@linux-mail.net>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260224093707.19679-1-dt@linux-mail.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xen-acpi-processor.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index df7cab870be5a..aa22c2f52bae7 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -379,11 +379,8 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ acpi_psd[acpi_id].domain);
+ }
+
+- status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- if (!pblk)
+- return AE_OK;
+- }
++ if (!pblk && !acpi_has_method(handle, "_CST"))
++ return AE_OK;
+ /* .. and it has a C-state */
+ __set_bit(acpi_id, acpi_id_cst_present);
+
+--
+2.51.0
+
--- /dev/null
+From f4f68a3e0a24e8738ee716a647e6f99e57eb5320 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 09:51:24 +0530
+Subject: amd-xgbe: fix sleep while atomic on suspend/resume
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit e2f27363aa6d983504c6836dd0975535e2e9dba0 ]
+
+The xgbe_powerdown() and xgbe_powerup() functions use spinlocks
+(spin_lock_irqsave) while calling functions that may sleep:
+- napi_disable() can sleep waiting for NAPI polling to complete
+- flush_workqueue() can sleep waiting for pending work items
+
+This causes a "BUG: scheduling while atomic" error during suspend/resume
+cycles on systems using the AMD XGBE Ethernet controller.
+
+The spinlock protection in these functions is unnecessary as these
+functions are called from suspend/resume paths which are already serialized
+by the PM core
+
+Fix this by removing the spinlock. Since only code that takes this lock
+is xgbe_powerdown() and xgbe_powerup(), remove it completely.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260302042124.1386445-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ----------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 -
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ---
+ 3 files changed, 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 3d6f8f3a83366..256969ac2cb9e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1181,7 +1181,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+@@ -1192,8 +1191,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+@@ -1209,8 +1206,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+
+ pdata->power_down = 1;
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+@@ -1220,7 +1215,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+@@ -1231,8 +1225,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+@@ -1247,8 +1239,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ xgbe_start_timers(pdata);
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 0e8698928e4d7..6e8fafb2acbaa 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -185,7 +185,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+- spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index f3ba76530b67b..92c40142c4576 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1077,9 +1077,6 @@ struct xgbe_prv_data {
+ unsigned int pp3;
+ unsigned int pp4;
+
+- /* Overall device lock */
+- spinlock_t lock;
+-
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+--
+2.51.0
+
--- /dev/null
+From 6195c4c7f2989ee6949435ae9609b67e9e33fd27 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:32:40 +0800
+Subject: atm: lec: fix null-ptr-deref in lec_arp_clear_vccs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 101bacb303e89dc2e0640ae6a5e0fb97c4eb45bb ]
+
+syzkaller reported a null-ptr-deref in lec_arp_clear_vccs().
+This issue can be easily reproduced using the syzkaller reproducer.
+
+In the ATM LANE (LAN Emulation) module, the same atm_vcc can be shared by
+multiple lec_arp_table entries (e.g., via entry->vcc or entry->recv_vcc).
+When the underlying VCC is closed, lec_vcc_close() iterates over all
+ARP entries and calls lec_arp_clear_vccs() for each matched entry.
+
+For example, when lec_vcc_close() iterates through the hlists in
+priv->lec_arp_empty_ones or other ARP tables:
+
+1. In the first iteration, for the first matched ARP entry sharing the VCC,
+lec_arp_clear_vccs() frees the associated vpriv (which is vcc->user_back)
+and sets vcc->user_back to NULL.
+2. In the second iteration, for the next matched ARP entry sharing the same
+VCC, lec_arp_clear_vccs() is called again. It obtains a NULL vpriv from
+vcc->user_back (via LEC_VCC_PRIV(vcc)) and then attempts to dereference it
+via `vcc->pop = vpriv->old_pop`, leading to a null-ptr-deref crash.
+
+Fix this by adding a null check for vpriv before dereferencing
+it. If vpriv is already NULL, it means the VCC has been cleared
+by a previous call, so we can safely skip the cleanup and just
+clear the entry's vcc/recv_vcc pointers.
+
+The entire cleanup block (including vcc_release_async()) is placed inside
+the vpriv guard because a NULL vpriv indicates the VCC has already been
+fully released by a prior iteration — repeating the teardown would
+redundantly set flags and trigger callbacks on an already-closing socket.
+
+The Fixes tag points to the initial commit because the entry->vcc path has
+been vulnerable since the original code. The entry->recv_vcc path was later
+added by commit 8d9f73c0ad2f ("atm: fix a memory leak of vcc->user_back")
+with the same pattern, and both paths are fixed here.
+
+Reported-by: syzbot+72e3ea390c305de0e259@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68c95a83.050a0220.3c6139.0e5c.GAE@google.com/T/
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260225123250.189289-1-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index b7fa48a9b7205..0d4b8e5936dcf 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1260,24 +1260,28 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+ struct net_device *dev = (struct net_device *)vcc->proto_data;
+
+- vcc->pop = vpriv->old_pop;
+- if (vpriv->xoff)
+- netif_wake_queue(dev);
+- kfree(vpriv);
+- vcc->user_back = NULL;
+- vcc->push = entry->old_push;
+- vcc_release_async(vcc, -EPIPE);
++ if (vpriv) {
++ vcc->pop = vpriv->old_pop;
++ if (vpriv->xoff)
++ netif_wake_queue(dev);
++ kfree(vpriv);
++ vcc->user_back = NULL;
++ vcc->push = entry->old_push;
++ vcc_release_async(vcc, -EPIPE);
++ }
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+- kfree(vpriv);
+- vcc->user_back = NULL;
++ if (vpriv) {
++ kfree(vpriv);
++ vcc->user_back = NULL;
+
+- entry->recv_vcc->push = entry->old_recv_push;
+- vcc_release_async(entry->recv_vcc, -EPIPE);
++ entry->recv_vcc->push = entry->old_recv_push;
++ vcc_release_async(entry->recv_vcc, -EPIPE);
++ }
+ entry->recv_vcc = NULL;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From eb6a69acfd7858dec73acee1e4e47555a722d724 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 23:59:42 -0700
+Subject: bpf: export bpf_link_inc_not_zero.
+
+From: Kui-Feng Lee <thinker.li@gmail.com>
+
+[ Upstream commit 67c3e8353f45c27800eecc46e00e8272f063f7d1 ]
+
+bpf_link_inc_not_zero() will be used by kernel modules. We will use it in
+bpf_testmod.c later.
+
+Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
+Link: https://lore.kernel.org/r/20240530065946.979330-5-thinker.li@gmail.com
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Stable-dep-of: 56145d237385 ("bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 6 ++++++
+ kernel/bpf/syscall.c | 3 ++-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 142a21f019ff8..3045de8e3f660 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -1907,6 +1907,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
+ int bpf_link_settle(struct bpf_link_primer *primer);
+ void bpf_link_cleanup(struct bpf_link_primer *primer);
+ void bpf_link_inc(struct bpf_link *link);
++struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
+ void bpf_link_put(struct bpf_link *link);
+ int bpf_link_new_fd(struct bpf_link *link);
+ struct file *bpf_link_new_file(struct bpf_link *link, int *reserved_fd);
+@@ -2254,6 +2255,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
+ {
+ }
+
++static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
++{
++ return NULL;
++}
++
+ static inline void bpf_link_put(struct bpf_link *link)
+ {
+ }
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index b559d99e5959a..ed8f55bdc1370 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -4763,10 +4763,11 @@ static int link_detach(union bpf_attr *attr)
+ return ret;
+ }
+
+-static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
++struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+ {
+ return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
+ }
++EXPORT_SYMBOL(bpf_link_inc_not_zero);
+
+ struct bpf_link *bpf_link_by_id(u32 id)
+ {
+--
+2.51.0
+
--- /dev/null
+From 2ace4c93362eef3934cf4348755abe7a7cbb4ba6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 17:52:17 +0800
+Subject: bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim
+
+From: Lang Xu <xulang@uniontech.com>
+
+[ Upstream commit 56145d237385ca0e7ca9ff7b226aaf2eb8ef368b ]
+
+The root cause of this bug is that when 'bpf_link_put' reduces the
+refcount of 'shim_link->link.link' to zero, the resource is considered
+released but may still be referenced via 'tr->progs_hlist' in
+'cgroup_shim_find'. The actual cleanup of 'tr->progs_hlist' in
+'bpf_shim_tramp_link_release' is deferred. During this window, another
+process can cause a use-after-free via 'bpf_trampoline_link_cgroup_shim'.
+
+Based on Martin KaFai Lau's suggestions, I have created a simple patch.
+
+To fix this:
+ Add an atomic non-zero check in 'bpf_trampoline_link_cgroup_shim'.
+ Only increment the refcount if it is not already zero.
+
+Testing:
+ I verified the fix by adding a delay in
+ 'bpf_shim_tramp_link_release' to make the bug easier to trigger:
+
+static void bpf_shim_tramp_link_release(struct bpf_link *link)
+{
+ /* ... */
+ if (!shim_link->trampoline)
+ return;
+
++ msleep(100);
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link,
+ shim_link->trampoline, NULL));
+ bpf_trampoline_put(shim_link->trampoline);
+}
+
+Before the patch, running a PoC easily reproduced the crash(almost 100%)
+with a call trace similar to KaiyanM's report.
+After the patch, the bug no longer occurs even after millions of
+iterations.
+
+Fixes: 69fd337a975c ("bpf: per-cgroup lsm flavor")
+Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
+Closes: https://lore.kernel.org/bpf/3c4ebb0b.46ff8.19abab8abe2.Coremail.kaiyanm@hust.edu.cn/
+Signed-off-by: Lang Xu <xulang@uniontech.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/279EEE1BA1DDB49D+20260303095217.34436-1-xulang@uniontech.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/trampoline.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 4c7c6129db90e..17763af54179b 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -732,10 +732,8 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ mutex_lock(&tr->mutex);
+
+ shim_link = cgroup_shim_find(tr, bpf_func);
+- if (shim_link) {
++ if (shim_link && !IS_ERR(bpf_link_inc_not_zero(&shim_link->link.link))) {
+ /* Reusing existing shim attached by the other program. */
+- bpf_link_inc(&shim_link->link.link);
+-
+ mutex_unlock(&tr->mutex);
+ bpf_trampoline_put(tr); /* bpf_trampoline_get above */
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 5798d197b33ba6f9bb0ba7ebd723517e00008f65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 11:58:06 +0100
+Subject: can: bcm: fix locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c35636e91e392e1540949bbc67932167cb48bc3a ]
+
+Commit c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+added a locking for some variables that can be modified at runtime when
+updating the sending bcm_op with a new TX_SETUP command in bcm_tx_setup().
+
+Usually the RX_SETUP only handles and filters incoming traffic with one
+exception: When the RX_RTR_FRAME flag is set a predefined CAN frame is
+sent when a specific RTR frame is received. Therefore the rx bcm_op uses
+bcm_can_tx() which uses the bcm_tx_lock that was only initialized in
+bcm_tx_setup(). Add the missing spin_lock_init() when allocating the
+bcm_op in bcm_rx_setup() to handle the RTR case properly.
+
+Fixes: c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+Reported-by: syzbot+5b11eccc403dd1cea9f8@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-can/699466e4.a70a0220.2c38d7.00ff.GAE@google.com/
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20260218-bcm_spin_lock_init-v1-1-592634c8a5b5@hartkopp.net
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 4fb5cfaf74f3f..050c755ff5fbd 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1128,6 +1128,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->nframes = msg_head->nframes;
+ op->cfsiz = CFSIZ(msg_head->flags);
+--
+2.51.0
+
--- /dev/null
+From 9dcc57b817c5114564620cd923ac26da09a6c91a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:47:05 +0100
+Subject: can: mcp251x: fix deadlock in error path of mcp251x_open
+
+From: Alban Bedel <alban.bedel@lht.dlh.de>
+
+[ Upstream commit ab3f894de216f4a62adc3b57e9191888cbf26885 ]
+
+The mcp251x_open() function call free_irq() in its error path with the
+mpc_lock mutex held. But if an interrupt already occurred the
+interrupt handler will be waiting for the mpc_lock and free_irq() will
+deadlock waiting for the handler to finish.
+
+This issue is similar to the one fixed in commit 7dd9c26bd6cf ("can:
+mcp251x: fix deadlock if an interrupt occurs during mcp251x_open") but
+for the error path.
+
+To solve this issue move the call to free_irq() after the lock is
+released. Setting `priv->force_quit = 1` beforehand ensure that the IRQ
+handler will exit right away once it acquired the lock.
+
+Signed-off-by: Alban Bedel <alban.bedel@lht.dlh.de>
+Link: https://patch.msgid.link/20260209144706.2261954-1-alban.bedel@lht.dlh.de
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 8c56f85e87c1a..72ae17b2313ec 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1202,6 +1202,7 @@ static int mcp251x_open(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
++ bool release_irq = false;
+ unsigned long flags = 0;
+ int ret;
+
+@@ -1245,12 +1246,24 @@ static int mcp251x_open(struct net_device *net)
+ return 0;
+
+ out_free_irq:
+- free_irq(spi->irq, priv);
++ /* The IRQ handler might be running, and if so it will be waiting
++ * for the lock. But free_irq() must wait for the handler to finish
++ * so calling it here would deadlock.
++ *
++ * Setting priv->force_quit will let the handler exit right away
++ * without any access to the hardware. This make it safe to call
++ * free_irq() after the lock is released.
++ */
++ priv->force_quit = 1;
++ release_irq = true;
++
+ mcp251x_hw_sleep(spi);
+ out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
++ if (release_irq)
++ free_irq(spi->irq, priv);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 36ed9dba97713b26fdead918cb9ae92dbeac130e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 13:59:30 +0200
+Subject: dpaa2-switch: do not clear any interrupts automatically
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit f6da276479c63ca29774bc331a537b92f0550c45 ]
+
+The DPSW object has multiple event sources multiplexed over the same
+IRQ. The driver has the capability to configure only some of these
+events to trigger the IRQ.
+
+The dpsw_get_irq_status() can clear events automatically based on the
+value stored in the 'status' variable passed to it. We don't want that
+to happen because we could get into a situation when we are clearing
+more events than we actually handled.
+
+Just resort to manually clearing the events that we handled. Also, since
+status is not used on the out path we remove its initialization to zero.
+
+This change does not have a user-visible effect because the dpaa2-switch
+driver enables and handles all the DPSW events which exist at the
+moment.
+
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 68378d694c5d3..b29f49ec64049 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1522,9 +1522,9 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct ethsw_port_priv *port_priv;
+- u32 status = ~0;
+ int err, if_id;
+ bool had_mac;
++ u32 status;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+@@ -1559,12 +1559,12 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ rtnl_unlock();
+ }
+
+-out:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+
++out:
+ return IRQ_HANDLED;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f33a1969b54f8b31483ab04db850f99249e0c5fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:58:12 -0800
+Subject: dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ
+ handler
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 74badb9c20b1a9c02a95c735c6d3cd6121679c93 ]
+
+Commit 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ
+handler") introduces a range check for if_id to avoid an out-of-bounds
+access. If an out-of-bounds if_id is detected, the interrupt status is
+not cleared. This may result in an interrupt storm.
+
+Clear the interrupt status after detecting an out-of-bounds if_id to avoid
+the problem.
+
+Found by an experimental AI code review agent at Google.
+
+Fixes: 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ handler")
+Cc: Junrui Luo <moonafterrain@outlook.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20260227055812.1777915-1-linux@roeck-us.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index b29f49ec64049..510a018978d9a 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1536,7 +1536,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ if_id = (status & 0xFFFF0000) >> 16;
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id);
+- goto out;
++ goto out_clear;
+ }
+ port_priv = ethsw->ports[if_id];
+
+@@ -1559,6 +1559,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ rtnl_unlock();
+ }
+
++out_clear:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+--
+2.51.0
+
--- /dev/null
+From 90f25a6a481586aca51a8f527844c070aade5929 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 17:26:03 +0000
+Subject: indirect_call_wrapper: do not reevaluate function pointer
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 710f5c76580306cdb9ec51fac8fcf6a8faff7821 ]
+
+We have an increasing number of READ_ONCE(xxx->function)
+combined with INDIRECT_CALL_[1234]() helpers.
+
+Unfortunately this forces INDIRECT_CALL_[1234]() to read
+xxx->function many times, which is not what we wanted.
+
+Fix these macros so that xxx->function value is not reloaded.
+
+$ scripts/bloat-o-meter -t vmlinux.0 vmlinux
+add/remove: 0/0 grow/shrink: 1/65 up/down: 122/-1084 (-962)
+Function old new delta
+ip_push_pending_frames 59 181 +122
+ip6_finish_output 687 681 -6
+__udp_enqueue_schedule_skb 1078 1072 -6
+ioam6_output 2319 2312 -7
+xfrm4_rcv_encap_finish2 64 56 -8
+xfrm4_output 297 289 -8
+vrf_ip_local_out 278 270 -8
+vrf_ip6_local_out 278 270 -8
+seg6_input_finish 64 56 -8
+rpl_output 700 692 -8
+ipmr_forward_finish 124 116 -8
+ip_forward_finish 143 135 -8
+ip6mr_forward2_finish 100 92 -8
+ip6_forward_finish 73 65 -8
+input_action_end_bpf 1091 1083 -8
+dst_input 52 44 -8
+__xfrm6_output 801 793 -8
+__xfrm4_output 83 75 -8
+bpf_input 500 491 -9
+__tcp_check_space 530 521 -9
+input_action_end_dt6 291 280 -11
+vti6_tnl_xmit 1634 1622 -12
+bpf_xmit 1203 1191 -12
+rpl_input 497 483 -14
+rawv6_send_hdrinc 1355 1341 -14
+ndisc_send_skb 1030 1016 -14
+ipv6_srh_rcv 1377 1363 -14
+ip_send_unicast_reply 1253 1239 -14
+ip_rcv_finish 226 212 -14
+ip6_rcv_finish 300 286 -14
+input_action_end_x_core 205 191 -14
+input_action_end_x 355 341 -14
+input_action_end_t 205 191 -14
+input_action_end_dx6_finish 127 113 -14
+input_action_end_dx4_finish 373 359 -14
+input_action_end_dt4 426 412 -14
+input_action_end_core 186 172 -14
+input_action_end_b6_encap 292 278 -14
+input_action_end_b6 198 184 -14
+igmp6_send 1332 1318 -14
+ip_sublist_rcv 864 848 -16
+ip6_sublist_rcv 1091 1075 -16
+ipv6_rpl_srh_rcv 1937 1920 -17
+xfrm_policy_queue_process 1246 1228 -18
+seg6_output_core 903 885 -18
+mld_sendpack 856 836 -20
+NF_HOOK 756 736 -20
+vti_tunnel_xmit 1447 1426 -21
+input_action_end_dx6 664 642 -22
+input_action_end 1502 1480 -22
+sock_sendmsg_nosec 134 111 -23
+ip6mr_forward2 388 364 -24
+sock_recvmsg_nosec 134 109 -25
+seg6_input_core 836 810 -26
+ip_send_skb 172 146 -26
+ip_local_out 140 114 -26
+ip6_local_out 140 114 -26
+__sock_sendmsg 162 136 -26
+__ip_queue_xmit 1196 1170 -26
+__ip_finish_output 405 379 -26
+ipmr_queue_fwd_xmit 373 346 -27
+sock_recvmsg 173 145 -28
+ip6_xmit 1635 1607 -28
+xfrm_output_resume 1418 1389 -29
+ip_build_and_send_pkt 625 591 -34
+dst_output 504 432 -72
+Total: Before=25217686, After=25216724, chg -0.00%
+
+Fixes: 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260227172603.1700433-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/indirect_call_wrapper.h | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
+index c1c76a70a6ce9..227cee5e2a98b 100644
+--- a/include/linux/indirect_call_wrapper.h
++++ b/include/linux/indirect_call_wrapper.h
+@@ -16,22 +16,26 @@
+ */
+ #define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
++ typeof(f) __f1 = (f); \
++ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+- likely(f == f2) ? f2(__VA_ARGS__) : \
+- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
++ typeof(f) __f2 = (f); \
++ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
++ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f3) ? f3(__VA_ARGS__) : \
+- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
++ typeof(f) __f3 = (f); \
++ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
++ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f4) ? f4(__VA_ARGS__) : \
+- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
++ typeof(f) __f4 = (f); \
++ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
++ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
+ })
+
+ #define INDIRECT_CALLABLE_DECLARE(f) f
+--
+2.51.0
+
--- /dev/null
+From 67c4a4c21fa2746e0583333bb67301afca6716db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 11:45:48 -0800
+Subject: ipv6: fix NULL pointer deref in ip6_rt_get_dev_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2ffb4f5c2ccb2fa1c049dd11899aee7967deef5a ]
+
+l3mdev_master_dev_rcu() can return NULL when the slave device is being
+un-slaved from a VRF. All other callers deal with this, but we lost
+the fallback to loopback in ip6_rt_pcpu_alloc() -> ip6_rt_get_dev_rcu()
+with commit 4832c30d5458 ("net: ipv6: put host and anycast routes on
+device with address").
+
+ KASAN: null-ptr-deref in range [0x0000000000000108-0x000000000000010f]
+ RIP: 0010:ip6_rt_pcpu_alloc (net/ipv6/route.c:1418)
+ Call Trace:
+ ip6_pol_route (net/ipv6/route.c:2318)
+ fib6_rule_lookup (net/ipv6/fib6_rules.c:115)
+ ip6_route_output_flags (net/ipv6/route.c:2607)
+ vrf_process_v6_outbound (drivers/net/vrf.c:437)
+
+I was tempted to rework the un-slaving code to clear the flag first
+and insert synchronize_rcu() before we remove the upper. But looks like
+the explicit fallback to loopback_dev is an established pattern.
+And I guess avoiding the synchronize_rcu() is nice, too.
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260301194548.927324-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 07e3d59c24059..5aa5390da1095 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1058,7 +1058,8 @@ static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
+ */
+ if (netif_is_l3_slave(dev) &&
+ !rt6_need_strict(&res->f6i->fib6_dst.addr))
+- dev = l3mdev_master_dev_rcu(dev);
++ dev = l3mdev_master_dev_rcu(dev) ? :
++ dev_net(dev)->loopback_dev;
+ else if (!netif_is_l3_master(dev))
+ dev = dev_net(dev)->loopback_dev;
+ /* last case is netif_is_l3_master(dev) is true in which
+--
+2.51.0
+
--- /dev/null
+From 5f535824e5da444ef7e542f5f24c7de79026714a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 22 Jan 2023 02:27:17 +0500
+Subject: kunit: kunit.py extract handlers
+
+From: Alexander Pantyukhin <apantykhin@gmail.com>
+
+[ Upstream commit 2dc9d6ca52a47fd00822e818c2a5e48fc5fbbd53 ]
+
+The main function contains a wide if-elif block that handles different
+subcommands. It's possible to make code refactoring to extract
+subcommands handlers.
+
+Fixed commit summary line.
+Shuah Khan <skhan@linuxfoundation.org>
+
+Signed-off-by: Alexander Pantyukhin <apantykhin@gmail.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit.py | 167 ++++++++++++++++++++---------------
+ 1 file changed, 96 insertions(+), 71 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
+index 43fbe96318fe1..8cd8188675047 100755
+--- a/tools/testing/kunit/kunit.py
++++ b/tools/testing/kunit/kunit.py
+@@ -395,6 +395,95 @@ def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree
+ extra_qemu_args=qemu_args)
+
+
++def run_handler(cli_args):
++ if not os.path.exists(cli_args.build_dir):
++ os.mkdir(cli_args.build_dir)
++
++ linux = tree_from_args(cli_args)
++ request = KunitRequest(build_dir=cli_args.build_dir,
++ make_options=cli_args.make_options,
++ jobs=cli_args.jobs,
++ raw_output=cli_args.raw_output,
++ json=cli_args.json,
++ timeout=cli_args.timeout,
++ filter_glob=cli_args.filter_glob,
++ kernel_args=cli_args.kernel_args,
++ run_isolated=cli_args.run_isolated)
++ result = run_tests(linux, request)
++ if result.status != KunitStatus.SUCCESS:
++ sys.exit(1)
++
++
++def config_handler(cli_args):
++ if cli_args.build_dir and (
++ not os.path.exists(cli_args.build_dir)):
++ os.mkdir(cli_args.build_dir)
++
++ linux = tree_from_args(cli_args)
++ request = KunitConfigRequest(build_dir=cli_args.build_dir,
++ make_options=cli_args.make_options)
++ result = config_tests(linux, request)
++ stdout.print_with_timestamp((
++ 'Elapsed time: %.3fs\n') % (
++ result.elapsed_time))
++ if result.status != KunitStatus.SUCCESS:
++ sys.exit(1)
++
++
++def build_handler(cli_args):
++ linux = tree_from_args(cli_args)
++ request = KunitBuildRequest(build_dir=cli_args.build_dir,
++ make_options=cli_args.make_options,
++ jobs=cli_args.jobs)
++ result = config_and_build_tests(linux, request)
++ stdout.print_with_timestamp((
++ 'Elapsed time: %.3fs\n') % (
++ result.elapsed_time))
++ if result.status != KunitStatus.SUCCESS:
++ sys.exit(1)
++
++
++def exec_handler(cli_args):
++ linux = tree_from_args(cli_args)
++ exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
++ build_dir=cli_args.build_dir,
++ json=cli_args.json,
++ timeout=cli_args.timeout,
++ filter_glob=cli_args.filter_glob,
++ kernel_args=cli_args.kernel_args,
++ run_isolated=cli_args.run_isolated)
++ result = exec_tests(linux, exec_request)
++ stdout.print_with_timestamp((
++ 'Elapsed time: %.3fs\n') % (result.elapsed_time))
++ if result.status != KunitStatus.SUCCESS:
++ sys.exit(1)
++
++
++def parse_handler(cli_args):
++ if cli_args.file is None:
++ sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error
++ kunit_output = sys.stdin
++ else:
++ with open(cli_args.file, 'r', errors='backslashreplace') as f:
++ kunit_output = f.read().splitlines()
++ # We know nothing about how the result was created!
++ metadata = kunit_json.Metadata()
++ request = KunitParseRequest(raw_output=cli_args.raw_output,
++ json=cli_args.json)
++ result, _ = parse_tests(request, metadata, kunit_output)
++ if result.status != KunitStatus.SUCCESS:
++ sys.exit(1)
++
++
++subcommand_handlers_map = {
++ 'run': run_handler,
++ 'config': config_handler,
++ 'build': build_handler,
++ 'exec': exec_handler,
++ 'parse': parse_handler
++}
++
++
+ def main(argv):
+ parser = argparse.ArgumentParser(
+ description='Helps writing and running KUnit tests.')
+@@ -438,78 +527,14 @@ def main(argv):
+ if get_kernel_root_path():
+ os.chdir(get_kernel_root_path())
+
+- if cli_args.subcommand == 'run':
+- if not os.path.exists(cli_args.build_dir):
+- os.mkdir(cli_args.build_dir)
+-
+- linux = tree_from_args(cli_args)
+- request = KunitRequest(build_dir=cli_args.build_dir,
+- make_options=cli_args.make_options,
+- jobs=cli_args.jobs,
+- raw_output=cli_args.raw_output,
+- json=cli_args.json,
+- timeout=cli_args.timeout,
+- filter_glob=cli_args.filter_glob,
+- kernel_args=cli_args.kernel_args,
+- run_isolated=cli_args.run_isolated)
+- result = run_tests(linux, request)
+- if result.status != KunitStatus.SUCCESS:
+- sys.exit(1)
+- elif cli_args.subcommand == 'config':
+- if cli_args.build_dir and (
+- not os.path.exists(cli_args.build_dir)):
+- os.mkdir(cli_args.build_dir)
+-
+- linux = tree_from_args(cli_args)
+- request = KunitConfigRequest(build_dir=cli_args.build_dir,
+- make_options=cli_args.make_options)
+- result = config_tests(linux, request)
+- stdout.print_with_timestamp((
+- 'Elapsed time: %.3fs\n') % (
+- result.elapsed_time))
+- if result.status != KunitStatus.SUCCESS:
+- sys.exit(1)
+- elif cli_args.subcommand == 'build':
+- linux = tree_from_args(cli_args)
+- request = KunitBuildRequest(build_dir=cli_args.build_dir,
+- make_options=cli_args.make_options,
+- jobs=cli_args.jobs)
+- result = config_and_build_tests(linux, request)
+- stdout.print_with_timestamp((
+- 'Elapsed time: %.3fs\n') % (
+- result.elapsed_time))
+- if result.status != KunitStatus.SUCCESS:
+- sys.exit(1)
+- elif cli_args.subcommand == 'exec':
+- linux = tree_from_args(cli_args)
+- exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
+- build_dir=cli_args.build_dir,
+- json=cli_args.json,
+- timeout=cli_args.timeout,
+- filter_glob=cli_args.filter_glob,
+- kernel_args=cli_args.kernel_args,
+- run_isolated=cli_args.run_isolated)
+- result = exec_tests(linux, exec_request)
+- stdout.print_with_timestamp((
+- 'Elapsed time: %.3fs\n') % (result.elapsed_time))
+- if result.status != KunitStatus.SUCCESS:
+- sys.exit(1)
+- elif cli_args.subcommand == 'parse':
+- if cli_args.file is None:
+- sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error
+- kunit_output = sys.stdin
+- else:
+- with open(cli_args.file, 'r', errors='backslashreplace') as f:
+- kunit_output = f.read().splitlines()
+- # We know nothing about how the result was created!
+- metadata = kunit_json.Metadata()
+- request = KunitParseRequest(raw_output=cli_args.raw_output,
+- json=cli_args.json)
+- result, _ = parse_tests(request, metadata, kunit_output)
+- if result.status != KunitStatus.SUCCESS:
+- sys.exit(1)
+- else:
++ subcomand_handler = subcommand_handlers_map.get(cli_args.subcommand, None)
++
++ if subcomand_handler is None:
+ parser.print_help()
++ return
++
++ subcomand_handler(cli_args)
++
+
+ if __name__ == '__main__':
+ main(sys.argv[1:])
+--
+2.51.0
+
--- /dev/null
+From 83bd9da47e0e5e114731be154f289e0de2d3206c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Jul 2023 21:25:16 +0000
+Subject: kunit: tool: Add command line interface to filter and report
+ attributes
+
+From: Rae Moar <rmoar@google.com>
+
+[ Upstream commit 723c8258c8fe167191b53e274dea435c4522e4d7 ]
+
+Add ability to kunit.py to filter attributes and report a list of tests
+including attributes without running tests.
+
+Add flag "--filter" to input filters on test attributes. Tests will be
+filtered out if they do not match all inputted filters.
+
+Example: --filter speed=slow (This filter would run only the tests that are
+marked as slow)
+
+Filters have operations: <, >, <=, >=, !=, and =. But note that the
+characters < and > are often interpreted by the shell, so they may need to
+be quoted or escaped.
+
+Example: --filter "speed>slow" or --filter speed\>slow (This filter would
+run only the tests that have the speed faster than slow.
+
+Additionally, multiple filters can be used.
+
+Example: --filter "speed=slow, module!=example" (This filter would run
+only the tests that have the speed slow and are not in the "example"
+module)
+
+Note if the user wants to skip filtered tests instead of not
+running/showing them use the "--filter_action=skip" flag instead.
+
+Expose the output of kunit.action=list option with flag "--list_tests" to
+output a list of tests. Additionally, add flag "--list_tests_attr" to
+output a list of tests and their attributes. These flags are useful to see
+tests and test attributes without needing to run tests.
+
+Example of the output of "--list_tests_attr":
+ example
+ example.test_1
+ example.test_2
+ # example.test_2.speed: slow
+
+This output includes a suite, example, with two test cases, test_1 and
+test_2. And in this instance test_2 has been marked as slow.
+
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Rae Moar <rmoar@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit.py | 70 ++++++++++++++++++++++++--
+ tools/testing/kunit/kunit_kernel.py | 8 ++-
+ tools/testing/kunit/kunit_parser.py | 11 +++-
+ tools/testing/kunit/kunit_tool_test.py | 39 +++++++-------
+ 4 files changed, 99 insertions(+), 29 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
+index 1ed7f0f86dee3..23f84f405b4a0 100755
+--- a/tools/testing/kunit/kunit.py
++++ b/tools/testing/kunit/kunit.py
+@@ -55,8 +55,12 @@ class KunitExecRequest(KunitParseRequest):
+ build_dir: str
+ timeout: int
+ filter_glob: str
++ filter: str
++ filter_action: Optional[str]
+ kernel_args: Optional[List[str]]
+ run_isolated: Optional[str]
++ list_tests: bool
++ list_tests_attr: bool
+
+ @dataclass
+ class KunitRequest(KunitExecRequest, KunitBuildRequest):
+@@ -111,19 +115,41 @@ def config_and_build_tests(linux: kunit_kernel.LinuxSourceTree,
+
+ def _list_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> List[str]:
+ args = ['kunit.action=list']
++
++ if request.kernel_args:
++ args.extend(request.kernel_args)
++
++ output = linux.run_kernel(args=args,
++ timeout=request.timeout,
++ filter_glob=request.filter_glob,
++ filter=request.filter,
++ filter_action=request.filter_action,
++ build_dir=request.build_dir)
++ lines = kunit_parser.extract_tap_lines(output)
++ # Hack! Drop the dummy TAP version header that the executor prints out.
++ lines.pop()
++
++ # Filter out any extraneous non-test output that might have gotten mixed in.
++ return [l for l in output if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
++
++def _list_tests_attr(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> Iterable[str]:
++ args = ['kunit.action=list_attr']
++
+ if request.kernel_args:
+ args.extend(request.kernel_args)
+
+ output = linux.run_kernel(args=args,
+ timeout=request.timeout,
+ filter_glob=request.filter_glob,
++ filter=request.filter,
++ filter_action=request.filter_action,
+ build_dir=request.build_dir)
+ lines = kunit_parser.extract_tap_lines(output)
+ # Hack! Drop the dummy TAP version header that the executor prints out.
+ lines.pop()
+
+ # Filter out any extraneous non-test output that might have gotten mixed in.
+- return [l for l in lines if re.match(r'^[^\s.]+\.[^\s.]+$', l)]
++ return lines
+
+ def _suites_from_test_list(tests: List[str]) -> List[str]:
+ """Extracts all the suites from an ordered list of tests."""
+@@ -137,10 +163,18 @@ def _suites_from_test_list(tests: List[str]) -> List[str]:
+ suites.append(suite)
+ return suites
+
+-
+-
+ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -> KunitResult:
+ filter_globs = [request.filter_glob]
++ if request.list_tests:
++ output = _list_tests(linux, request)
++ for line in output:
++ print(line.rstrip())
++ return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
++ if request.list_tests_attr:
++ attr_output = _list_tests_attr(linux, request)
++ for line in attr_output:
++ print(line.rstrip())
++ return KunitResult(status=KunitStatus.SUCCESS, elapsed_time=0.0)
+ if request.run_isolated:
+ tests = _list_tests(linux, request)
+ if request.run_isolated == 'test':
+@@ -164,6 +198,8 @@ def exec_tests(linux: kunit_kernel.LinuxSourceTree, request: KunitExecRequest) -
+ args=request.kernel_args,
+ timeout=request.timeout,
+ filter_glob=filter_glob,
++ filter=request.filter,
++ filter_action=request.filter_action,
+ build_dir=request.build_dir)
+
+ _, test_result = parse_tests(request, metadata, run_result)
+@@ -350,6 +386,16 @@ def add_exec_opts(parser: argparse.ArgumentParser) -> None:
+ nargs='?',
+ default='',
+ metavar='filter_glob')
++ parser.add_argument('--filter',
++ help='Filter KUnit tests with attributes, '
++ 'e.g. module=example or speed>slow',
++ type=str,
++ default='')
++ parser.add_argument('--filter_action',
++ help='If set to skip, filtered tests will be skipped, '
++ 'e.g. --filter_action=skip. Otherwise they will not run.',
++ type=str,
++ choices=['skip'])
+ parser.add_argument('--kernel_args',
+ help='Kernel command-line parameters. Maybe be repeated',
+ action='append', metavar='')
+@@ -359,6 +405,12 @@ def add_exec_opts(parser: argparse.ArgumentParser) -> None:
+ 'what ran before it.',
+ type=str,
+ choices=['suite', 'test'])
++ parser.add_argument('--list_tests', help='If set, list all tests that will be '
++ 'run.',
++ action='store_true')
++ parser.add_argument('--list_tests_attr', help='If set, list all tests and test '
++ 'attributes.',
++ action='store_true')
+
+ def add_parse_opts(parser: argparse.ArgumentParser) -> None:
+ parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
+@@ -407,8 +459,12 @@ def run_handler(cli_args: argparse.Namespace) -> None:
+ json=cli_args.json,
+ timeout=cli_args.timeout,
+ filter_glob=cli_args.filter_glob,
++ filter=cli_args.filter,
++ filter_action=cli_args.filter_action,
+ kernel_args=cli_args.kernel_args,
+- run_isolated=cli_args.run_isolated)
++ run_isolated=cli_args.run_isolated,
++ list_tests=cli_args.list_tests,
++ list_tests_attr=cli_args.list_tests_attr)
+ result = run_tests(linux, request)
+ if result.status != KunitStatus.SUCCESS:
+ sys.exit(1)
+@@ -450,8 +506,12 @@ def exec_handler(cli_args: argparse.Namespace) -> None:
+ json=cli_args.json,
+ timeout=cli_args.timeout,
+ filter_glob=cli_args.filter_glob,
++ filter=cli_args.filter,
++ filter_action=cli_args.filter_action,
+ kernel_args=cli_args.kernel_args,
+- run_isolated=cli_args.run_isolated)
++ run_isolated=cli_args.run_isolated,
++ list_tests=cli_args.list_tests,
++ list_tests_attr=cli_args.list_tests_attr)
+ result = exec_tests(linux, exec_request)
+ stdout.print_with_timestamp((
+ 'Elapsed time: %.3fs\n') % (result.elapsed_time))
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index faf90dcfed32d..86accd53644c1 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -329,11 +329,15 @@ class LinuxSourceTree:
+ return False
+ return self.validate_config(build_dir)
+
+- def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', timeout: Optional[int]=None) -> Iterator[str]:
++ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
+ if not args:
+ args = []
+ if filter_glob:
+- args.append('kunit.filter_glob='+filter_glob)
++ args.append('kunit.filter_glob=' + filter_glob)
++ if filter:
++ args.append('kunit.filter="' + filter + '"')
++ if filter_action:
++ args.append('kunit.filter_action=' + filter_action)
+ args.append('kunit.enable=1')
+
+ process = self._ops.start(args, build_dir)
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index d5abd0567c8e0..ca9921ea328a4 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -221,6 +221,7 @@ KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
+ TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
+ KTAP_END = re.compile(r'\s*(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
++EXECUTOR_ERROR = re.compile(r'\s*kunit executor: (.*)$')
+
+ def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
+ """Extracts KTAP lines from the kernel output."""
+@@ -251,6 +252,8 @@ def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
+ # remove the prefix, if any.
+ line = line[prefix_len:]
+ yield line_num, line
++ elif EXECUTOR_ERROR.search(line):
++ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+ KTAP_VERSIONS = [1]
+@@ -456,7 +459,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+- non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START]
++ non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START, TAP_START]
+ while lines and not any(re.match(lines.peek())
+ for re in non_diagnostic_lines):
+ log.append(lines.pop())
+@@ -722,6 +725,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
+ """
+ test = Test()
+ test.log.extend(log)
++
++ # Parse any errors prior to parsing tests
++ err_log = parse_diagnostic(lines)
++ test.log.extend(err_log)
++
+ if not is_subtest:
+ # If parsing the main/top-level test, parse KTAP version line and
+ # test plan
+@@ -783,6 +791,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest:
+ # Don't override a bad status if this test had one reported.
+ # Assumption: no subtests means CRASHED is from Test.__init__()
+ if test.status in (TestStatus.TEST_CRASHED, TestStatus.SUCCESS):
++ print_log(test.log)
+ test.status = TestStatus.NO_TESTS
+ test.add_error('0 tests run!')
+
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index 9ba0ff95fad5c..04714f59fced6 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -612,7 +612,7 @@ class KUnitMainTest(unittest.TestCase):
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 0)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir='.kunit', filter_glob='', timeout=300)
++ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_run_passes_args_pass(self):
+@@ -620,7 +620,7 @@ class KUnitMainTest(unittest.TestCase):
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.assertEqual(self.linux_source_mock.run_kernel.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir='.kunit', filter_glob='', timeout=300)
++ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_exec_passes_args_fail(self):
+@@ -644,7 +644,7 @@ class KUnitMainTest(unittest.TestCase):
+ kunit.main(['run'])
+ self.assertEqual(e.exception.code, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir='.kunit', filter_glob='', timeout=300)
++ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains(' 0 tests run!'))
+
+ def test_exec_raw_output(self):
+@@ -685,13 +685,13 @@ class KUnitMainTest(unittest.TestCase):
+ self.linux_source_mock.run_kernel = mock.Mock(return_value=[])
+ kunit.main(['run', '--raw_output', 'filter_glob'])
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir='.kunit', filter_glob='filter_glob', timeout=300)
++ args=None, build_dir='.kunit', filter_glob='filter_glob', filter='', filter_action=None, timeout=300)
+
+ def test_exec_timeout(self):
+ timeout = 3453
+ kunit.main(['exec', '--timeout', str(timeout)])
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
++ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_run_timeout(self):
+@@ -699,7 +699,7 @@ class KUnitMainTest(unittest.TestCase):
+ kunit.main(['run', '--timeout', str(timeout)])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir='.kunit', filter_glob='', timeout=timeout)
++ args=None, build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=timeout)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_run_builddir(self):
+@@ -707,7 +707,7 @@ class KUnitMainTest(unittest.TestCase):
+ kunit.main(['run', '--build_dir=.kunit'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir=build_dir, filter_glob='', timeout=300)
++ args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_config_builddir(self):
+@@ -725,7 +725,7 @@ class KUnitMainTest(unittest.TestCase):
+ build_dir = '.kunit'
+ kunit.main(['exec', '--build_dir', build_dir])
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=None, build_dir=build_dir, filter_glob='', timeout=300)
++ args=None, build_dir=build_dir, filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_run_kunitconfig(self):
+@@ -801,7 +801,7 @@ class KUnitMainTest(unittest.TestCase):
+ kunit.main(['run', '--kernel_args=a=1', '--kernel_args=b=2'])
+ self.assertEqual(self.linux_source_mock.build_reconfig.call_count, 1)
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=['a=1','b=2'], build_dir='.kunit', filter_glob='', timeout=300)
++ args=['a=1','b=2'], build_dir='.kunit', filter_glob='', filter='', filter_action=None, timeout=300)
+ self.print_mock.assert_any_call(StrContains('Testing complete.'))
+
+ def test_list_tests(self):
+@@ -809,13 +809,11 @@ class KUnitMainTest(unittest.TestCase):
+ self.linux_source_mock.run_kernel.return_value = ['TAP version 14', 'init: random output'] + want
+
+ got = kunit._list_tests(self.linux_source_mock,
+- kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'suite'))
+-
++ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'suite', False, False))
+ self.assertEqual(got, want)
+ # Should respect the user's filter glob when listing tests.
+ self.linux_source_mock.run_kernel.assert_called_once_with(
+- args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', timeout=300)
+-
++ args=['kunit.action=list'], build_dir='.kunit', filter_glob='suite*', filter='', filter_action=None, timeout=300)
+
+ @mock.patch.object(kunit, '_list_tests')
+ def test_run_isolated_by_suite(self, mock_tests):
+@@ -824,10 +822,10 @@ class KUnitMainTest(unittest.TestCase):
+
+ # Should respect the user's filter glob when listing tests.
+ mock_tests.assert_called_once_with(mock.ANY,
+- kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', None, 'suite'))
++ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*.test*', '', None, None, 'suite', False, False))
+ self.linux_source_mock.run_kernel.assert_has_calls([
+- mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', timeout=300),
+- mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', timeout=300),
++ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test*', filter='', filter_action=None, timeout=300),
++ mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test*', filter='', filter_action=None, timeout=300),
+ ])
+
+ @mock.patch.object(kunit, '_list_tests')
+@@ -837,13 +835,12 @@ class KUnitMainTest(unittest.TestCase):
+
+ # Should respect the user's filter glob when listing tests.
+ mock_tests.assert_called_once_with(mock.ANY,
+- kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', None, 'test'))
++ kunit.KunitExecRequest(None, None, '.kunit', 300, 'suite*', '', None, None, 'test', False, False))
+ self.linux_source_mock.run_kernel.assert_has_calls([
+- mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', timeout=300),
+- mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', timeout=300),
+- mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', timeout=300),
++ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test1', filter='', filter_action=None, timeout=300),
++ mock.call(args=None, build_dir='.kunit', filter_glob='suite.test2', filter='', filter_action=None, timeout=300),
++ mock.call(args=None, build_dir='.kunit', filter_glob='suite2.test1', filter='', filter_action=None, timeout=300),
+ ])
+
+-
+ if __name__ == '__main__':
+ unittest.main()
+--
+2.51.0
+
--- /dev/null
+From 09252a93ee27b87d0ec77bbcebb1fb8ebf824173 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:14:10 +0545
+Subject: kunit: tool: copy caller args in run_kernel to prevent mutation
+
+From: Shuvam Pandey <shuvampandey1@gmail.com>
+
+[ Upstream commit 40804c4974b8df2adab72f6475d343eaff72b7f6 ]
+
+run_kernel() appended KUnit flags directly to the caller-provided args
+list. When exec_tests() calls run_kernel() repeatedly (e.g. with
+--run_isolated), each call mutated the same list, causing later runs
+to inherit stale filter_glob values and duplicate kunit.enable flags.
+
+Fix this by copying args at the start of run_kernel(). Add a regression
+test that calls run_kernel() twice with the same list and verifies the
+original remains unchanged.
+
+Fixes: ff9e09a3762f ("kunit: tool: support running each suite/test separately")
+Signed-off-by: Shuvam Pandey <shuvampandey1@gmail.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_kernel.py | 6 ++++--
+ tools/testing/kunit/kunit_tool_test.py | 26 ++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index 86accd53644c1..2343f9a77a65a 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -330,8 +330,10 @@ class LinuxSourceTree:
+ return self.validate_config(build_dir)
+
+ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
+- if not args:
+- args = []
++ # Copy to avoid mutating the caller-supplied list. exec_tests() reuses
++ # the same args across repeated run_kernel() calls (e.g. --run_isolated),
++ # so appending to the original would accumulate stale flags on each call.
++ args = list(args) if args else []
+ if filter_glob:
+ args.append('kunit.filter_glob=' + filter_glob)
+ if filter:
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index 04714f59fced6..29063d9ae2851 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -479,6 +479,32 @@ class LinuxSourceTreeTest(unittest.TestCase):
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
++ def test_run_kernel_args_not_mutated(self):
++ """Verify run_kernel() copies args so callers can reuse them."""
++ start_calls = []
++
++ def fake_start(start_args, unused_build_dir):
++ start_calls.append(list(start_args))
++ return subprocess.Popen(['printf', 'KTAP version 1\n'],
++ text=True, stdout=subprocess.PIPE)
++
++ with tempfile.TemporaryDirectory('') as build_dir:
++ tree = kunit_kernel.LinuxSourceTree(build_dir,
++ kunitconfig_paths=[os.devnull])
++ with mock.patch.object(tree._ops, 'start', side_effect=fake_start), \
++ mock.patch.object(kunit_kernel.subprocess, 'call'):
++ kernel_args = ['mem=1G']
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test1'):
++ pass
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test2'):
++ pass
++ self.assertEqual(kernel_args, ['mem=1G'],
++ 'run_kernel() should not modify caller args')
++ self.assertIn('kunit.filter_glob=suite.test1', start_calls[0])
++ self.assertIn('kunit.filter_glob=suite.test2', start_calls[1])
++
+ def test_build_reconfig_no_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+--
+2.51.0
+
--- /dev/null
+From c87174b31a784adfc76c028fc102c2b295e21dd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Nov 2022 16:12:34 -0800
+Subject: kunit: tool: don't include KTAP headers and the like in the test log
+
+From: Daniel Latypov <dlatypov@google.com>
+
+[ Upstream commit 5937e0c04afc7d4b7b737fda93316ba4b74183c0 ]
+
+We print the "test log" on failure.
+This is meant to be all the kernel output that happened during the test.
+
+But we also include the special KTAP lines in it, which are often
+redundant.
+
+E.g. we include the "not ok" line in the log, right before we print
+that the test case failed...
+[13:51:48] Expected 2 + 1 == 2, but
+[13:51:48] 2 + 1 == 3 (0x3)
+[13:51:48] not ok 1 example_simple_test
+[13:51:48] [FAILED] example_simple_test
+
+More full example after this patch:
+[13:51:48] =================== example (4 subtests) ===================
+[13:51:48] # example_simple_test: initializing
+[13:51:48] # example_simple_test: EXPECTATION FAILED at lib/kunit/kunit-example-test.c:29
+[13:51:48] Expected 2 + 1 == 2, but
+[13:51:48] 2 + 1 == 3 (0x3)
+[13:51:48] [FAILED] example_simple_test
+
+Signed-off-by: Daniel Latypov <dlatypov@google.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_parser.py | 8 ++++----
+ tools/testing/kunit/kunit_tool_test.py | 17 +++++++++++++++++
+ 2 files changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index 259ce7696587b..baf0430be0e33 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -304,7 +304,7 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool:
+ check_version(version_num, TAP_VERSIONS, 'TAP', test)
+ else:
+ return False
+- test.log.append(lines.pop())
++ lines.pop()
+ return True
+
+ TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
+@@ -327,8 +327,8 @@ def parse_test_header(lines: LineStream, test: Test) -> bool:
+ match = TEST_HEADER.match(lines.peek())
+ if not match:
+ return False
+- test.log.append(lines.pop())
+ test.name = match.group(1)
++ lines.pop()
+ return True
+
+ TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
+@@ -354,9 +354,9 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ if not match:
+ test.expected_count = None
+ return False
+- test.log.append(lines.pop())
+ expected_count = int(match.group(1))
+ test.expected_count = expected_count
++ lines.pop()
+ return True
+
+ TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
+@@ -418,7 +418,7 @@ def parse_test_result(lines: LineStream, test: Test,
+ # Check if line matches test result line format
+ if not match:
+ return False
+- test.log.append(lines.pop())
++ lines.pop()
+
+ # Set name of test object
+ if skip_match:
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index 8334d660753c4..7e2f748a24eb2 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -81,6 +81,10 @@ class KconfigTest(unittest.TestCase):
+
+ class KUnitParserTest(unittest.TestCase):
+
++ def noPrintCallContains(self, substr: str):
++ for call in self.print_mock.mock_calls:
++ self.assertNotIn(substr, call.args[0])
++
+ def assertContains(self, needle: str, haystack: kunit_parser.LineStream):
+ # Clone the iterator so we can print the contents on failure.
+ copy, backup = itertools.tee(haystack)
+@@ -345,6 +349,19 @@ class KUnitParserTest(unittest.TestCase):
+ result = kunit_parser.parse_run_tests(file.readlines())
+ self.print_mock.assert_any_call(StrContains('suite (1 subtest)'))
+
++ def test_show_test_output_on_failure(self):
++ output = """
++ KTAP version 1
++ 1..1
++ Test output.
++ not ok 1 test1
++ """
++ result = kunit_parser.parse_run_tests(output.splitlines())
++ self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
++
++ self.print_mock.assert_any_call(StrContains('Test output.'))
++ self.noPrintCallContains('not ok 1 test1')
++
+ def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream:
+ return kunit_parser.LineStream(enumerate(strs, start=1))
+
+--
+2.51.0
+
--- /dev/null
+From a203d415d9982682c4a6a0cd0fdcc9c9b905967e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 15:06:38 -0700
+Subject: kunit: tool: fix pre-existing `mypy --strict` errors and update
+ run_checks.py
+
+From: Daniel Latypov <dlatypov@google.com>
+
+[ Upstream commit 1da2e6220e1115930694c649605534baf6fa3dea ]
+
+Basically, get this command to be happy and make run_checks.py happy
+ $ mypy --strict --exclude '_test.py$' --exclude qemu_configs/ ./tools/testing/kunit/
+
+Primarily the changes are
+* add `-> None` return type annotations
+* add all the missing argument type annotations
+
+Previously, we had false positives from mypy in `main()`, see commit
+09641f7c7d8f ("kunit: tool: surface and address more typing issues").
+But after commit 2dc9d6ca52a4 ("kunit: kunit.py extract handlers")
+refactored things, the variable name reuse mypy hated is gone.
+
+Note: mypy complains we don't annotate the types the unused args in our
+signal handler. That's silly.
+But to make it happy, I've copy-pasted an appropriate annotation from
+https://github.com/python/typing/discussions/1042#discussioncomment-2013595.
+
+Reported-by: Johannes Berg <johannes.berg@intel.com>
+Link: https://lore.kernel.org/linux-kselftest/9a172b50457f4074af41fe1dc8e55dcaf4795d7e.camel@sipsolutions.net/
+Signed-off-by: Daniel Latypov <dlatypov@google.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit.py | 24 ++++++++++++------------
+ tools/testing/kunit/kunit_config.py | 4 ++--
+ tools/testing/kunit/kunit_kernel.py | 29 +++++++++++++++--------------
+ tools/testing/kunit/run_checks.py | 4 ++--
+ 4 files changed, 31 insertions(+), 30 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
+index 172db04b48f42..1ed7f0f86dee3 100755
+--- a/tools/testing/kunit/kunit.py
++++ b/tools/testing/kunit/kunit.py
+@@ -278,7 +278,7 @@ def massage_argv(argv: Sequence[str]) -> Sequence[str]:
+ def get_default_jobs() -> int:
+ return len(os.sched_getaffinity(0))
+
+-def add_common_opts(parser) -> None:
++def add_common_opts(parser: argparse.ArgumentParser) -> None:
+ parser.add_argument('--build_dir',
+ help='As in the make command, it specifies the build '
+ 'directory.',
+@@ -329,13 +329,13 @@ def add_common_opts(parser) -> None:
+ help='Additional QEMU arguments, e.g. "-smp 8"',
+ action='append', metavar='')
+
+-def add_build_opts(parser) -> None:
++def add_build_opts(parser: argparse.ArgumentParser) -> None:
+ parser.add_argument('--jobs',
+ help='As in the make command, "Specifies the number of '
+ 'jobs (commands) to run simultaneously."',
+ type=int, default=get_default_jobs(), metavar='N')
+
+-def add_exec_opts(parser) -> None:
++def add_exec_opts(parser: argparse.ArgumentParser) -> None:
+ parser.add_argument('--timeout',
+ help='maximum number of seconds to allow for all tests '
+ 'to run. This does not include time taken to build the '
+@@ -360,7 +360,7 @@ def add_exec_opts(parser) -> None:
+ type=str,
+ choices=['suite', 'test'])
+
+-def add_parse_opts(parser) -> None:
++def add_parse_opts(parser: argparse.ArgumentParser) -> None:
+ parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
+ 'By default, filters to just KUnit output. Use '
+ '--raw_output=all to show everything',
+@@ -395,7 +395,7 @@ def tree_from_args(cli_args: argparse.Namespace) -> kunit_kernel.LinuxSourceTree
+ extra_qemu_args=qemu_args)
+
+
+-def run_handler(cli_args):
++def run_handler(cli_args: argparse.Namespace) -> None:
+ if not os.path.exists(cli_args.build_dir):
+ os.mkdir(cli_args.build_dir)
+
+@@ -414,7 +414,7 @@ def run_handler(cli_args):
+ sys.exit(1)
+
+
+-def config_handler(cli_args):
++def config_handler(cli_args: argparse.Namespace) -> None:
+ if cli_args.build_dir and (
+ not os.path.exists(cli_args.build_dir)):
+ os.mkdir(cli_args.build_dir)
+@@ -430,7 +430,7 @@ def config_handler(cli_args):
+ sys.exit(1)
+
+
+-def build_handler(cli_args):
++def build_handler(cli_args: argparse.Namespace) -> None:
+ linux = tree_from_args(cli_args)
+ request = KunitBuildRequest(build_dir=cli_args.build_dir,
+ make_options=cli_args.make_options,
+@@ -443,7 +443,7 @@ def build_handler(cli_args):
+ sys.exit(1)
+
+
+-def exec_handler(cli_args):
++def exec_handler(cli_args: argparse.Namespace) -> None:
+ linux = tree_from_args(cli_args)
+ exec_request = KunitExecRequest(raw_output=cli_args.raw_output,
+ build_dir=cli_args.build_dir,
+@@ -459,10 +459,10 @@ def exec_handler(cli_args):
+ sys.exit(1)
+
+
+-def parse_handler(cli_args):
++def parse_handler(cli_args: argparse.Namespace) -> None:
+ if cli_args.file is None:
+- sys.stdin.reconfigure(errors='backslashreplace') # pytype: disable=attribute-error
+- kunit_output = sys.stdin
++ sys.stdin.reconfigure(errors='backslashreplace') # type: ignore
++ kunit_output = sys.stdin # type: Iterable[str]
+ else:
+ with open(cli_args.file, 'r', errors='backslashreplace') as f:
+ kunit_output = f.read().splitlines()
+@@ -484,7 +484,7 @@ subcommand_handlers_map = {
+ }
+
+
+-def main(argv):
++def main(argv: Sequence[str]) -> None:
+ parser = argparse.ArgumentParser(
+ description='Helps writing and running KUnit tests.')
+ subparser = parser.add_subparsers(dest='subcommand')
+diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
+index 9f76d7b896175..eb5dd01210b1b 100644
+--- a/tools/testing/kunit/kunit_config.py
++++ b/tools/testing/kunit/kunit_config.py
+@@ -8,7 +8,7 @@
+
+ from dataclasses import dataclass
+ import re
+-from typing import Dict, Iterable, List, Tuple
++from typing import Any, Dict, Iterable, List, Tuple
+
+ CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
+ CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
+@@ -34,7 +34,7 @@ class Kconfig:
+ def __init__(self) -> None:
+ self._entries = {} # type: Dict[str, str]
+
+- def __eq__(self, other) -> bool:
++ def __eq__(self, other: Any) -> bool:
+ if not isinstance(other, self.__class__):
+ return False
+ return self._entries == other._entries
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index cd73256c30c39..faf90dcfed32d 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -16,6 +16,7 @@ import shutil
+ import signal
+ import threading
+ from typing import Iterator, List, Optional, Tuple
++from types import FrameType
+
+ import kunit_config
+ import qemu_config
+@@ -56,7 +57,7 @@ class LinuxSourceTreeOperations:
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+ return base_kunitconfig
+
+- def make_olddefconfig(self, build_dir: str, make_options) -> None:
++ def make_olddefconfig(self, build_dir: str, make_options: Optional[List[str]]) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, 'olddefconfig']
+ if self._cross_compile:
+ command += ['CROSS_COMPILE=' + self._cross_compile]
+@@ -70,7 +71,7 @@ class LinuxSourceTreeOperations:
+ except subprocess.CalledProcessError as e:
+ raise ConfigError(e.output.decode())
+
+- def make(self, jobs, build_dir: str, make_options) -> None:
++ def make(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> None:
+ command = ['make', 'ARCH=' + self._linux_arch, 'O=' + build_dir, '--jobs=' + str(jobs)]
+ if make_options:
+ command.extend(make_options)
+@@ -132,7 +133,7 @@ class LinuxSourceTreeOperationsQemu(LinuxSourceTreeOperations):
+ class LinuxSourceTreeOperationsUml(LinuxSourceTreeOperations):
+ """An abstraction over command line operations performed on a source tree."""
+
+- def __init__(self, cross_compile=None):
++ def __init__(self, cross_compile: Optional[str]=None):
+ super().__init__(linux_arch='um', cross_compile=cross_compile)
+
+ def make_arch_config(self, base_kunitconfig: kunit_config.Kconfig) -> kunit_config.Kconfig:
+@@ -215,7 +216,7 @@ def _get_qemu_ops(config_path: str,
+
+ if not hasattr(config, 'QEMU_ARCH'):
+ raise ValueError('qemu_config module missing "QEMU_ARCH": ' + config_path)
+- params: qemu_config.QemuArchParams = config.QEMU_ARCH # type: ignore
++ params: qemu_config.QemuArchParams = config.QEMU_ARCH
+ if extra_qemu_args:
+ params.extra_qemu_params.extend(extra_qemu_args)
+ return params.linux_arch, LinuxSourceTreeOperationsQemu(
+@@ -229,10 +230,10 @@ class LinuxSourceTree:
+ build_dir: str,
+ kunitconfig_paths: Optional[List[str]]=None,
+ kconfig_add: Optional[List[str]]=None,
+- arch=None,
+- cross_compile=None,
+- qemu_config_path=None,
+- extra_qemu_args=None) -> None:
++ arch: Optional[str]=None,
++ cross_compile: Optional[str]=None,
++ qemu_config_path: Optional[str]=None,
++ extra_qemu_args: Optional[List[str]]=None) -> None:
+ signal.signal(signal.SIGINT, self.signal_handler)
+ if qemu_config_path:
+ self._arch, self._ops = _get_qemu_ops(qemu_config_path, extra_qemu_args, cross_compile)
+@@ -275,7 +276,7 @@ class LinuxSourceTree:
+ logging.error(message)
+ return False
+
+- def build_config(self, build_dir: str, make_options) -> bool:
++ def build_config(self, build_dir: str, make_options: Optional[List[str]]) -> bool:
+ kconfig_path = get_kconfig_path(build_dir)
+ if build_dir and not os.path.exists(build_dir):
+ os.mkdir(build_dir)
+@@ -303,7 +304,7 @@ class LinuxSourceTree:
+ old_kconfig = kunit_config.parse_file(old_path)
+ return old_kconfig != self._kconfig
+
+- def build_reconfig(self, build_dir: str, make_options) -> bool:
++ def build_reconfig(self, build_dir: str, make_options: Optional[List[str]]) -> bool:
+ """Creates a new .config if it is not a subset of the .kunitconfig."""
+ kconfig_path = get_kconfig_path(build_dir)
+ if not os.path.exists(kconfig_path):
+@@ -319,7 +320,7 @@ class LinuxSourceTree:
+ os.remove(kconfig_path)
+ return self.build_config(build_dir, make_options)
+
+- def build_kernel(self, jobs, build_dir: str, make_options) -> bool:
++ def build_kernel(self, jobs: int, build_dir: str, make_options: Optional[List[str]]) -> bool:
+ try:
+ self._ops.make_olddefconfig(build_dir, make_options)
+ self._ops.make(jobs, build_dir, make_options)
+@@ -328,7 +329,7 @@ class LinuxSourceTree:
+ return False
+ return self.validate_config(build_dir)
+
+- def run_kernel(self, args=None, build_dir='', filter_glob='', timeout=None) -> Iterator[str]:
++ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', timeout: Optional[int]=None) -> Iterator[str]:
+ if not args:
+ args = []
+ if filter_glob:
+@@ -339,7 +340,7 @@ class LinuxSourceTree:
+ assert process.stdout is not None # tell mypy it's set
+
+ # Enforce the timeout in a background thread.
+- def _wait_proc():
++ def _wait_proc() -> None:
+ try:
+ process.wait(timeout=timeout)
+ except Exception as e:
+@@ -365,6 +366,6 @@ class LinuxSourceTree:
+ waiter.join()
+ subprocess.call(['stty', 'sane'])
+
+- def signal_handler(self, unused_sig, unused_frame) -> None:
++ def signal_handler(self, unused_sig: int, unused_frame: Optional[FrameType]) -> None:
+ logging.error('Build interruption occurred. Cleaning console.')
+ subprocess.call(['stty', 'sane'])
+diff --git a/tools/testing/kunit/run_checks.py b/tools/testing/kunit/run_checks.py
+index 066e6f938f6dc..d061cf1ca4a59 100755
+--- a/tools/testing/kunit/run_checks.py
++++ b/tools/testing/kunit/run_checks.py
+@@ -23,7 +23,7 @@ commands: Dict[str, Sequence[str]] = {
+ 'kunit_tool_test.py': ['./kunit_tool_test.py'],
+ 'kunit smoke test': ['./kunit.py', 'run', '--kunitconfig=lib/kunit', '--build_dir=kunit_run_checks'],
+ 'pytype': ['/bin/sh', '-c', 'pytype *.py'],
+- 'mypy': ['/bin/sh', '-c', 'mypy *.py'],
++ 'mypy': ['mypy', '--strict', '--exclude', '_test.py$', '--exclude', 'qemu_configs/', '.'],
+ }
+
+ # The user might not have mypy or pytype installed, skip them if so.
+@@ -73,7 +73,7 @@ def main(argv: Sequence[str]) -> None:
+ sys.exit(1)
+
+
+-def run_cmd(argv: Sequence[str]):
++def run_cmd(argv: Sequence[str]) -> None:
+ subprocess.check_output(argv, stderr=subprocess.STDOUT, cwd=ABS_TOOL_PATH, timeout=TIMEOUT)
+
+
+--
+2.51.0
+
--- /dev/null
+From 44cf45a9ed6fef15b3b18ef5bdc71cc41e13291c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Nov 2022 11:55:26 -0800
+Subject: kunit: tool: make --json do nothing if --raw_ouput is set
+
+From: Daniel Latypov <dlatypov@google.com>
+
+[ Upstream commit 309e22effb741a8c65131a2694a49839fd685a27 ]
+
+When --raw_output is set (to any value), we don't actually parse the
+test results. So asking to print the test results as json doesn't make
+sense.
+
+We internally create a fake test with one passing subtest, so --json
+would actually print out something misleading.
+
+This patch:
+* Rewords the flag descriptions so hopefully this is more obvious.
+* Also updates --raw_output's description to note the default behavior
+ is to print out only "KUnit" results (actually any KTAP results)
+* also renames and refactors some related logic for clarity (e.g.
+ test_result => test, it's a kunit_parser.Test object).
+
+Notably, this patch does not make it an error to specify --json and
+--raw_output together. This is an edge case, but I know of at least one
+wrapper around kunit.py that always sets --json. You'd never be able to
+use --raw_output with that wrapper.
+
+Signed-off-by: Daniel Latypov <dlatypov@google.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit.py | 34 ++++++++++++++++++----------------
+ 1 file changed, 18 insertions(+), 16 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
+index 4d4663fb578bd..e7b6549712d66 100755
+--- a/tools/testing/kunit/kunit.py
++++ b/tools/testing/kunit/kunit.py
+@@ -192,12 +192,11 @@ def _map_to_overall_status(test_status: kunit_parser.TestStatus) -> KunitStatus:
+ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input_data: Iterable[str]) -> Tuple[KunitResult, kunit_parser.Test]:
+ parse_start = time.time()
+
+- test_result = kunit_parser.Test()
+-
+ if request.raw_output:
+ # Treat unparsed results as one passing test.
+- test_result.status = kunit_parser.TestStatus.SUCCESS
+- test_result.counts.passed = 1
++ fake_test = kunit_parser.Test()
++ fake_test.status = kunit_parser.TestStatus.SUCCESS
++ fake_test.counts.passed = 1
+
+ output: Iterable[str] = input_data
+ if request.raw_output == 'all':
+@@ -206,14 +205,17 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input
+ output = kunit_parser.extract_tap_lines(output, lstrip=False)
+ for line in output:
+ print(line.rstrip())
++ parse_time = time.time() - parse_start
++ return KunitResult(KunitStatus.SUCCESS, parse_time), fake_test
+
+- else:
+- test_result = kunit_parser.parse_run_tests(input_data)
+- parse_end = time.time()
++
++ # Actually parse the test results.
++ test = kunit_parser.parse_run_tests(input_data)
++ parse_time = time.time() - parse_start
+
+ if request.json:
+ json_str = kunit_json.get_json_result(
+- test=test_result,
++ test=test,
+ metadata=metadata)
+ if request.json == 'stdout':
+ print(json_str)
+@@ -223,10 +225,10 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input
+ stdout.print_with_timestamp("Test results stored in %s" %
+ os.path.abspath(request.json))
+
+- if test_result.status != kunit_parser.TestStatus.SUCCESS:
+- return KunitResult(KunitStatus.TEST_FAILURE, parse_end - parse_start), test_result
++ if test.status != kunit_parser.TestStatus.SUCCESS:
++ return KunitResult(KunitStatus.TEST_FAILURE, parse_time), test
+
+- return KunitResult(KunitStatus.SUCCESS, parse_end - parse_start), test_result
++ return KunitResult(KunitStatus.SUCCESS, parse_time), test
+
+ def run_tests(linux: kunit_kernel.LinuxSourceTree,
+ request: KunitRequest) -> KunitResult:
+@@ -359,14 +361,14 @@ def add_exec_opts(parser) -> None:
+ choices=['suite', 'test'])
+
+ def add_parse_opts(parser) -> None:
+- parser.add_argument('--raw_output', help='If set don\'t format output from kernel. '
+- 'If set to --raw_output=kunit, filters to just KUnit output.',
++ parser.add_argument('--raw_output', help='If set don\'t parse output from kernel. '
++ 'By default, filters to just KUnit output. Use '
++ '--raw_output=all to show everything',
+ type=str, nargs='?', const='all', default=None, choices=['all', 'kunit'])
+ parser.add_argument('--json',
+ nargs='?',
+- help='Stores test results in a JSON, and either '
+- 'prints to stdout or saves to file if a '
+- 'filename is specified',
++ help='Prints parsed test results as JSON to stdout or a file if '
++ 'a filename is specified. Does nothing if --raw_output is set.',
+ type=str, const='stdout', default=None, metavar='FILE')
+
+
+--
+2.51.0
+
--- /dev/null
+From 2c97b4148b5d228df74d31b1cd77094ac5230628 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Nov 2022 10:54:19 -0800
+Subject: kunit: tool: make parser preserve whitespace when printing test log
+
+From: Daniel Latypov <dlatypov@google.com>
+
+[ Upstream commit c2bb92bc4ea13842fdd27819c0d5b48df2b86ea5 ]
+
+Currently, kunit_parser.py is stripping all leading whitespace to make
+parsing easier. But this means we can't accurately show kernel output
+for failing tests or when the kernel crashes.
+
+Embarassingly, this affects even KUnit's own output, e.g.
+[13:40:46] Expected 2 + 1 == 2, but
+[13:40:46] 2 + 1 == 3 (0x3)
+[13:40:46] not ok 1 example_simple_test
+[13:40:46] [FAILED] example_simple_test
+
+After this change, here's what the output in context would look like
+[13:40:46] =================== example (4 subtests) ===================
+[13:40:46] # example_simple_test: initializing
+[13:40:46] # example_simple_test: EXPECTATION FAILED at lib/kunit/kunit-example-test.c:29
+[13:40:46] Expected 2 + 1 == 2, but
+[13:40:46] 2 + 1 == 3 (0x3)
+[13:40:46] [FAILED] example_simple_test
+[13:40:46] [SKIPPED] example_skip_test
+[13:40:46] [SKIPPED] example_mark_skipped_test
+[13:40:46] [PASSED] example_all_expect_macros_test
+[13:40:46] # example: initializing suite
+[13:40:46] # example: pass:1 fail:1 skip:2 total:4
+[13:40:46] # Totals: pass:1 fail:1 skip:2 total:4
+[13:40:46] ===================== [FAILED] example =====================
+
+This example shows one minor cosmetic defect this approach has.
+The test counts lines prevent us from dedenting the suite-level output.
+But at the same time, any form of non-KUnit output would do the same
+unless it happened to be indented as well.
+
+Signed-off-by: Daniel Latypov <dlatypov@google.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit.py | 2 +-
+ tools/testing/kunit/kunit_parser.py | 27 +++++++++++++-------------
+ tools/testing/kunit/kunit_tool_test.py | 2 ++
+ 3 files changed, 16 insertions(+), 15 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
+index e7b6549712d66..43fbe96318fe1 100755
+--- a/tools/testing/kunit/kunit.py
++++ b/tools/testing/kunit/kunit.py
+@@ -202,7 +202,7 @@ def parse_tests(request: KunitParseRequest, metadata: kunit_json.Metadata, input
+ if request.raw_output == 'all':
+ pass
+ elif request.raw_output == 'kunit':
+- output = kunit_parser.extract_tap_lines(output, lstrip=False)
++ output = kunit_parser.extract_tap_lines(output)
+ for line in output:
+ print(line.rstrip())
+ parse_time = time.time() - parse_start
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index baf0430be0e33..c02100b70af62 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -12,6 +12,7 @@
+ from __future__ import annotations
+ import re
+ import sys
++import textwrap
+
+ from enum import Enum, auto
+ from typing import Iterable, Iterator, List, Optional, Tuple
+@@ -217,12 +218,12 @@ class LineStream:
+
+ # Parsing helper methods:
+
+-KTAP_START = re.compile(r'KTAP version ([0-9]+)$')
+-TAP_START = re.compile(r'TAP version ([0-9]+)$')
+-KTAP_END = re.compile('(List of all partitions:|'
++KTAP_START = re.compile(r'\s*KTAP version ([0-9]+)$')
++TAP_START = re.compile(r'\s*TAP version ([0-9]+)$')
++KTAP_END = re.compile(r'\s*(List of all partitions:|'
+ 'Kernel panic - not syncing: VFS:|reboot: System halted)')
+
+-def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream:
++def extract_tap_lines(kernel_output: Iterable[str]) -> LineStream:
+ """Extracts KTAP lines from the kernel output."""
+ def isolate_ktap_output(kernel_output: Iterable[str]) \
+ -> Iterator[Tuple[int, str]]:
+@@ -248,11 +249,8 @@ def extract_tap_lines(kernel_output: Iterable[str], lstrip=True) -> LineStream:
+ # stop extracting KTAP lines
+ break
+ elif started:
+- # remove the prefix and optionally any leading
+- # whitespace. Our parsing logic relies on this.
++ # remove the prefix, if any.
+ line = line[prefix_len:]
+- if lstrip:
+- line = line.lstrip()
+ yield line_num, line
+ return LineStream(lines=isolate_ktap_output(kernel_output))
+
+@@ -307,7 +305,7 @@ def parse_ktap_header(lines: LineStream, test: Test) -> bool:
+ lines.pop()
+ return True
+
+-TEST_HEADER = re.compile(r'^# Subtest: (.*)$')
++TEST_HEADER = re.compile(r'^\s*# Subtest: (.*)$')
+
+ def parse_test_header(lines: LineStream, test: Test) -> bool:
+ """
+@@ -331,7 +329,7 @@ def parse_test_header(lines: LineStream, test: Test) -> bool:
+ lines.pop()
+ return True
+
+-TEST_PLAN = re.compile(r'1\.\.([0-9]+)')
++TEST_PLAN = re.compile(r'^\s*1\.\.([0-9]+)')
+
+ def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ """
+@@ -359,9 +357,9 @@ def parse_test_plan(lines: LineStream, test: Test) -> bool:
+ lines.pop()
+ return True
+
+-TEST_RESULT = re.compile(r'^(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
++TEST_RESULT = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?([^#]*)( # .*)?$')
+
+-TEST_RESULT_SKIP = re.compile(r'^(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
++TEST_RESULT_SKIP = re.compile(r'^\s*(ok|not ok) ([0-9]+) (- )?(.*) # SKIP(.*)$')
+
+ def peek_test_name_match(lines: LineStream, test: Test) -> bool:
+ """
+@@ -520,8 +518,9 @@ def print_test_header(test: Test) -> None:
+
+ def print_log(log: Iterable[str]) -> None:
+ """Prints all strings in saved log for test in yellow."""
+- for m in log:
+- stdout.print_with_timestamp(stdout.yellow(m))
++ formatted = textwrap.dedent('\n'.join(log))
++ for line in formatted.splitlines():
++ stdout.print_with_timestamp(stdout.yellow(line))
+
+ def format_test_result(test: Test) -> str:
+ """
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index 7e2f748a24eb2..fc13326e5c47a 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -354,12 +354,14 @@ class KUnitParserTest(unittest.TestCase):
+ KTAP version 1
+ 1..1
+ Test output.
++ Indented more.
+ not ok 1 test1
+ """
+ result = kunit_parser.parse_run_tests(output.splitlines())
+ self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
+
+ self.print_mock.assert_any_call(StrContains('Test output.'))
++ self.print_mock.assert_any_call(StrContains(' Indented more.'))
+ self.noPrintCallContains('not ok 1 test1')
+
+ def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream:
+--
+2.51.0
+
--- /dev/null
+From 6f765621f88e81f0339681718b947315a48bd096 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Nov 2022 18:25:57 +0000
+Subject: kunit: tool: parse KTAP compliant test output
+
+From: Rae Moar <rmoar@google.com>
+
+[ Upstream commit 434498a6bee3db729dbdb7f131f3506f4dca85e8 ]
+
+Change the KUnit parser to be able to parse test output that complies with
+the KTAP version 1 specification format found here:
+https://kernel.org/doc/html/latest/dev-tools/ktap.html. Ensure the parser
+is able to parse tests with the original KUnit test output format as
+well.
+
+KUnit parser now accepts any of the following test output formats:
+
+Original KUnit test output format:
+
+ TAP version 14
+ 1..1
+ # Subtest: kunit-test-suite
+ 1..3
+ ok 1 - kunit_test_1
+ ok 2 - kunit_test_2
+ ok 3 - kunit_test_3
+ # kunit-test-suite: pass:3 fail:0 skip:0 total:3
+ # Totals: pass:3 fail:0 skip:0 total:3
+ ok 1 - kunit-test-suite
+
+KTAP version 1 test output format:
+
+ KTAP version 1
+ 1..1
+ KTAP version 1
+ 1..3
+ ok 1 kunit_test_1
+ ok 2 kunit_test_2
+ ok 3 kunit_test_3
+ ok 1 kunit-test-suite
+
+New KUnit test output format (changes made in the next patch of
+this series):
+
+ KTAP version 1
+ 1..1
+ KTAP version 1
+ # Subtest: kunit-test-suite
+ 1..3
+ ok 1 kunit_test_1
+ ok 2 kunit_test_2
+ ok 3 kunit_test_3
+ # kunit-test-suite: pass:3 fail:0 skip:0 total:3
+ # Totals: pass:3 fail:0 skip:0 total:3
+ ok 1 kunit-test-suite
+
+Signed-off-by: Rae Moar <rmoar@google.com>
+Reviewed-by: Daniel Latypov <dlatypov@google.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_parser.py | 79 ++++++++++++-------
+ tools/testing/kunit/kunit_tool_test.py | 14 ++++
+ .../test_data/test_parse_ktap_output.log | 8 ++
+ .../test_data/test_parse_subtest_header.log | 7 ++
+ 4 files changed, 80 insertions(+), 28 deletions(-)
+ create mode 100644 tools/testing/kunit/test_data/test_parse_ktap_output.log
+ create mode 100644 tools/testing/kunit/test_data/test_parse_subtest_header.log
+
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index 94dba66feec50..259ce7696587b 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -450,6 +450,7 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
+ - '# Subtest: [test name]'
+ - '[ok|not ok] [test number] [-] [test name] [optional skip
+ directive]'
++ - 'KTAP version [version number]'
+
+ Parameters:
+ lines - LineStream of KTAP output to parse
+@@ -458,8 +459,9 @@ def parse_diagnostic(lines: LineStream) -> List[str]:
+ Log of diagnostic lines
+ """
+ log = [] # type: List[str]
+- while lines and not TEST_RESULT.match(lines.peek()) and not \
+- TEST_HEADER.match(lines.peek()):
++ non_diagnostic_lines = [TEST_RESULT, TEST_HEADER, KTAP_START]
++ while lines and not any(re.match(lines.peek())
++ for re in non_diagnostic_lines):
+ log.append(lines.pop())
+ return log
+
+@@ -505,11 +507,15 @@ def print_test_header(test: Test) -> None:
+ test - Test object representing current test being printed
+ """
+ message = test.name
++ if message != "":
++ # Add a leading space before the subtest counts only if a test name
++ # is provided using a "# Subtest" header line.
++ message += " "
+ if test.expected_count:
+ if test.expected_count == 1:
+- message += ' (1 subtest)'
++ message += '(1 subtest)'
+ else:
+- message += f' ({test.expected_count} subtests)'
++ message += f'({test.expected_count} subtests)'
+ stdout.print_with_timestamp(format_test_divider(message, len(message)))
+
+ def print_log(log: Iterable[str]) -> None:
+@@ -656,7 +662,7 @@ def bubble_up_test_results(test: Test) -> None:
+ elif test.counts.get_status() == TestStatus.TEST_CRASHED:
+ test.status = TestStatus.TEST_CRASHED
+
+-def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
++def parse_test(lines: LineStream, expected_num: int, log: List[str], is_subtest: bool) -> Test:
+ """
+ Finds next test to parse in LineStream, creates new Test object,
+ parses any subtests of the test, populates Test object with all
+@@ -674,15 +680,32 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ 1..4
+ [subtests]
+
+- - Subtest header line
++ - Subtest header (must include either the KTAP version line or
++ "# Subtest" header line)
+
+- Example:
++ Example (preferred format with both KTAP version line and
++ "# Subtest" line):
++
++ KTAP version 1
++ # Subtest: name
++ 1..3
++ [subtests]
++ ok 1 name
++
++ Example (only "# Subtest" line):
+
+ # Subtest: name
+ 1..3
+ [subtests]
+ ok 1 name
+
++ Example (only KTAP version line, compliant with KTAP v1 spec):
++
++ KTAP version 1
++ 1..3
++ [subtests]
++ ok 1 name
++
+ - Test result line
+
+ Example:
+@@ -694,28 +717,29 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ expected_num - expected test number for test to be parsed
+ log - list of strings containing any preceding diagnostic lines
+ corresponding to the current test
++ is_subtest - boolean indicating whether test is a subtest
+
+ Return:
+ Test object populated with characteristics and any subtests
+ """
+ test = Test()
+ test.log.extend(log)
+- parent_test = False
+- main = parse_ktap_header(lines, test)
+- if main:
+- # If KTAP/TAP header is found, attempt to parse
++ if not is_subtest:
++ # If parsing the main/top-level test, parse KTAP version line and
+ # test plan
+ test.name = "main"
++ ktap_line = parse_ktap_header(lines, test)
+ parse_test_plan(lines, test)
+ parent_test = True
+ else:
+- # If KTAP/TAP header is not found, test must be subtest
+- # header or test result line so parse attempt to parser
+- # subtest header
+- parent_test = parse_test_header(lines, test)
++ # If not the main test, attempt to parse a test header containing
++ # the KTAP version line and/or subtest header line
++ ktap_line = parse_ktap_header(lines, test)
++ subtest_line = parse_test_header(lines, test)
++ parent_test = (ktap_line or subtest_line)
+ if parent_test:
+- # If subtest header is found, attempt to parse
+- # test plan and print header
++ # If KTAP version line and/or subtest header is found, attempt
++ # to parse test plan and print test header
+ parse_test_plan(lines, test)
+ print_test_header(test)
+ expected_count = test.expected_count
+@@ -730,7 +754,7 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ sub_log = parse_diagnostic(lines)
+ sub_test = Test()
+ if not lines or (peek_test_name_match(lines, test) and
+- not main):
++ is_subtest):
+ if expected_count and test_num <= expected_count:
+ # If parser reaches end of test before
+ # parsing expected number of subtests, print
+@@ -744,20 +768,19 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+ test.log.extend(sub_log)
+ break
+ else:
+- sub_test = parse_test(lines, test_num, sub_log)
++ sub_test = parse_test(lines, test_num, sub_log, True)
+ subtests.append(sub_test)
+ test_num += 1
+ test.subtests = subtests
+- if not main:
++ if is_subtest:
+ # If not main test, look for test result line
+ test.log.extend(parse_diagnostic(lines))
+- if (parent_test and peek_test_name_match(lines, test)) or \
+- not parent_test:
+- parse_test_result(lines, test, expected_num)
+- else:
++ if test.name != "" and not peek_test_name_match(lines, test):
+ test.add_error('missing subtest result line!')
++ else:
++ parse_test_result(lines, test, expected_num)
+
+- # Check for there being no tests
++ # Check for there being no subtests within parent test
+ if parent_test and len(subtests) == 0:
+ # Don't override a bad status if this test had one reported.
+ # Assumption: no subtests means CRASHED is from Test.__init__()
+@@ -767,11 +790,11 @@ def parse_test(lines: LineStream, expected_num: int, log: List[str]) -> Test:
+
+ # Add statuses to TestCounts attribute in Test object
+ bubble_up_test_results(test)
+- if parent_test and not main:
++ if parent_test and is_subtest:
+ # If test has subtests and is not the main test object, print
+ # footer.
+ print_test_footer(test)
+- elif not main:
++ elif is_subtest:
+ print_test_result(test)
+ return test
+
+@@ -794,7 +817,7 @@ def parse_run_tests(kernel_output: Iterable[str]) -> Test:
+ test.add_error('could not find any KTAP output!')
+ test.status = TestStatus.FAILURE_TO_PARSE_TESTS
+ else:
+- test = parse_test(lines, 0, [])
++ test = parse_test(lines, 0, [], False)
+ if test.status != TestStatus.NO_TESTS:
+ test.status = test.counts.get_status()
+ stdout.print_with_timestamp(DIVIDER)
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index 42cbf28bfa6c6..8334d660753c4 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -330,6 +330,20 @@ class KUnitParserTest(unittest.TestCase):
+ self.assertEqual(kunit_parser._summarize_failed_tests(result),
+ 'Failures: all_failed_suite, some_failed_suite.test2')
+
++ def test_ktap_format(self):
++ ktap_log = test_data_path('test_parse_ktap_output.log')
++ with open(ktap_log) as file:
++ result = kunit_parser.parse_run_tests(file.readlines())
++ self.assertEqual(result.counts, kunit_parser.TestCounts(passed=3))
++ self.assertEqual('suite', result.subtests[0].name)
++ self.assertEqual('case_1', result.subtests[0].subtests[0].name)
++ self.assertEqual('case_2', result.subtests[0].subtests[1].name)
++
++ def test_parse_subtest_header(self):
++ ktap_log = test_data_path('test_parse_subtest_header.log')
++ with open(ktap_log) as file:
++ result = kunit_parser.parse_run_tests(file.readlines())
++ self.print_mock.assert_any_call(StrContains('suite (1 subtest)'))
+
+ def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream:
+ return kunit_parser.LineStream(enumerate(strs, start=1))
+diff --git a/tools/testing/kunit/test_data/test_parse_ktap_output.log b/tools/testing/kunit/test_data/test_parse_ktap_output.log
+new file mode 100644
+index 0000000000000..ccdf244e53039
+--- /dev/null
++++ b/tools/testing/kunit/test_data/test_parse_ktap_output.log
+@@ -0,0 +1,8 @@
++KTAP version 1
++1..1
++ KTAP version 1
++ 1..3
++ ok 1 case_1
++ ok 2 case_2
++ ok 3 case_3
++ok 1 suite
+diff --git a/tools/testing/kunit/test_data/test_parse_subtest_header.log b/tools/testing/kunit/test_data/test_parse_subtest_header.log
+new file mode 100644
+index 0000000000000..216631092e7b1
+--- /dev/null
++++ b/tools/testing/kunit/test_data/test_parse_subtest_header.log
+@@ -0,0 +1,7 @@
++KTAP version 1
++1..1
++ KTAP version 1
++ # Subtest: suite
++ 1..1
++ ok 1 test
++ok 1 suite
+\ No newline at end of file
+--
+2.51.0
+
--- /dev/null
+From 79ee191d6d6e9cd640379302685f0c779926142d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 Oct 2022 14:02:56 -0700
+Subject: kunit: tool: print summary of failed tests if a few failed out of a
+ lot
+
+From: Daniel Latypov <dlatypov@google.com>
+
+[ Upstream commit f19dd011d8de6f0c1d20abea5158aa4f5d9cea44 ]
+
+E.g. all the hw_breakpoint tests are failing right now.
+So if I run `kunit.py run --altests --arch=x86_64`, then I see
+> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
+
+Seeing which 9 tests failed out of the hundreds is annoying.
+If my terminal doesn't have scrollback support, I have to resort to
+looking at `.kunit/test.log` for the `not ok` lines.
+
+Teach kunit.py to print a summarized list of failures if the # of tests
+reachs an arbitrary threshold (>=100 tests).
+
+To try and keep the output from being too long/noisy, this new logic
+a) just reports "parent_test failed" if every child test failed
+b) won't print anything if there are >10 failures (also arbitrary).
+
+With this patch, we get an extra line of output showing:
+> Testing complete. Ran 408 tests: passed: 392, failed: 9, skipped: 7
+> Failures: hw_breakpoint
+
+This also works with parameterized tests, e.g. if I add a fake failure
+> Failures: kcsan.test_atomic_builtins_missing_barrier.threads=6
+
+Note: we didn't have enough tests for this to be a problem before.
+But with commit 980ac3ad0512 ("kunit: tool: rename all_test_uml.config,
+use it for --alltests"), --alltests works and thus running >100 tests
+will probably become more common.
+
+Signed-off-by: Daniel Latypov <dlatypov@google.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_parser.py | 47 ++++++++++++++++++++++++++
+ tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++
+ 2 files changed, 69 insertions(+)
+
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index 1ae873e3e3415..94dba66feec50 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -58,6 +58,10 @@ class Test:
+ self.counts.errors += 1
+ stdout.print_with_timestamp(stdout.red('[ERROR]') + f' Test: {self.name}: {error_message}')
+
++ def ok_status(self) -> bool:
++ """Returns true if the status was ok, i.e. passed or skipped."""
++ return self.status in (TestStatus.SUCCESS, TestStatus.SKIPPED)
++
+ class TestStatus(Enum):
+ """An enumeration class to represent the status of a test."""
+ SUCCESS = auto()
+@@ -565,6 +569,40 @@ def print_test_footer(test: Test) -> None:
+ stdout.print_with_timestamp(format_test_divider(message,
+ len(message) - stdout.color_len()))
+
++
++
++def _summarize_failed_tests(test: Test) -> str:
++ """Tries to summarize all the failing subtests in `test`."""
++
++ def failed_names(test: Test, parent_name: str) -> List[str]:
++ # Note: we use 'main' internally for the top-level test.
++ if not parent_name or parent_name == 'main':
++ full_name = test.name
++ else:
++ full_name = parent_name + '.' + test.name
++
++ if not test.subtests: # this is a leaf node
++ return [full_name]
++
++ # If all the children failed, just say this subtest failed.
++ # Don't summarize it down "the top-level test failed", though.
++ failed_subtests = [sub for sub in test.subtests if not sub.ok_status()]
++ if parent_name and len(failed_subtests) == len(test.subtests):
++ return [full_name]
++
++ all_failures = [] # type: List[str]
++ for t in failed_subtests:
++ all_failures.extend(failed_names(t, full_name))
++ return all_failures
++
++ failures = failed_names(test, '')
++ # If there are too many failures, printing them out will just be noisy.
++ if len(failures) > 10: # this is an arbitrary limit
++ return ''
++
++ return 'Failures: ' + ', '.join(failures)
++
++
+ def print_summary_line(test: Test) -> None:
+ """
+ Prints summary line of test object. Color of line is dependent on
+@@ -587,6 +625,15 @@ def print_summary_line(test: Test) -> None:
+ color = stdout.red
+ stdout.print_with_timestamp(color(f'Testing complete. {test.counts}'))
+
++ # Summarize failures that might have gone off-screen since we had a lot
++ # of tests (arbitrarily defined as >=100 for now).
++ if test.ok_status() or test.counts.total() < 100:
++ return
++ summarized = _summarize_failed_tests(test)
++ if not summarized:
++ return
++ stdout.print_with_timestamp(color(summarized))
++
+ # Other methods:
+
+ def bubble_up_test_results(test: Test) -> None:
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index e2cd2cc2e98f6..42cbf28bfa6c6 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -309,6 +309,28 @@ class KUnitParserTest(unittest.TestCase):
+ result.status)
+ self.assertEqual('kunit-resource-test', result.subtests[0].name)
+
++ def test_summarize_failures(self):
++ output = """
++ KTAP version 1
++ 1..2
++ # Subtest: all_failed_suite
++ 1..2
++ not ok 1 - test1
++ not ok 2 - test2
++ not ok 1 - all_failed_suite
++ # Subtest: some_failed_suite
++ 1..2
++ ok 1 - test1
++ not ok 2 - test2
++ not ok 1 - some_failed_suite
++ """
++ result = kunit_parser.parse_run_tests(output.splitlines())
++ self.assertEqual(kunit_parser.TestStatus.FAILURE, result.status)
++
++ self.assertEqual(kunit_parser._summarize_failed_tests(result),
++ 'Failures: all_failed_suite, some_failed_suite.test2')
++
++
+ def line_stream_from_strs(strs: Iterable[str]) -> kunit_parser.LineStream:
+ return kunit_parser.LineStream(enumerate(strs, start=1))
+
+--
+2.51.0
+
--- /dev/null
+From 8a32538cdcc80419759e180e677496416dfae62b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Mar 2023 15:06:37 -0700
+Subject: kunit: tool: remove unused imports and variables
+
+From: Daniel Latypov <dlatypov@google.com>
+
+[ Upstream commit 126901ba3499880c9ed033633817cf7493120fda ]
+
+We don't run a linter regularly over kunit.py code (the default settings
+on most don't like kernel style, e.g. tabs) so some of these imports
+didn't get removed when they stopped being used.
+
+Signed-off-by: Daniel Latypov <dlatypov@google.com>
+Reviewed-by: David Gow <davidgow@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 40804c4974b8 ("kunit: tool: copy caller args in run_kernel to prevent mutation")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit.py | 2 +-
+ tools/testing/kunit/kunit_config.py | 2 +-
+ tools/testing/kunit/kunit_kernel.py | 1 -
+ tools/testing/kunit/kunit_parser.py | 1 -
+ tools/testing/kunit/kunit_tool_test.py | 2 +-
+ 5 files changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
+index 8cd8188675047..172db04b48f42 100755
+--- a/tools/testing/kunit/kunit.py
++++ b/tools/testing/kunit/kunit.py
+@@ -132,7 +132,7 @@ def _suites_from_test_list(tests: List[str]) -> List[str]:
+ parts = t.split('.', maxsplit=2)
+ if len(parts) != 2:
+ raise ValueError(f'internal KUnit error, test name should be of the form "<suite>.<test>", got "{t}"')
+- suite, case = parts
++ suite, _ = parts
+ if not suites or suites[-1] != suite:
+ suites.append(suite)
+ return suites
+diff --git a/tools/testing/kunit/kunit_config.py b/tools/testing/kunit/kunit_config.py
+index 48b5f34b2e5d7..9f76d7b896175 100644
+--- a/tools/testing/kunit/kunit_config.py
++++ b/tools/testing/kunit/kunit_config.py
+@@ -8,7 +8,7 @@
+
+ from dataclasses import dataclass
+ import re
+-from typing import Dict, Iterable, List, Set, Tuple
++from typing import Dict, Iterable, List, Tuple
+
+ CONFIG_IS_NOT_SET_PATTERN = r'^# CONFIG_(\w+) is not set$'
+ CONFIG_PATTERN = r'^CONFIG_(\w+)=(\S+|".*")$'
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index 53e90c3358348..cd73256c30c39 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -18,7 +18,6 @@ import threading
+ from typing import Iterator, List, Optional, Tuple
+
+ import kunit_config
+-from kunit_printer import stdout
+ import qemu_config
+
+ KCONFIG_PATH = '.config'
+diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
+index c02100b70af62..d5abd0567c8e0 100644
+--- a/tools/testing/kunit/kunit_parser.py
++++ b/tools/testing/kunit/kunit_parser.py
+@@ -11,7 +11,6 @@
+
+ from __future__ import annotations
+ import re
+-import sys
+ import textwrap
+
+ from enum import Enum, auto
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index fc13326e5c47a..9ba0ff95fad5c 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -346,7 +346,7 @@ class KUnitParserTest(unittest.TestCase):
+ def test_parse_subtest_header(self):
+ ktap_log = test_data_path('test_parse_subtest_header.log')
+ with open(ktap_log) as file:
+- result = kunit_parser.parse_run_tests(file.readlines())
++ kunit_parser.parse_run_tests(file.readlines())
+ self.print_mock.assert_any_call(StrContains('suite (1 subtest)'))
+
+ def test_show_test_output_on_failure(self):
+--
+2.51.0
+
--- /dev/null
+From 9cc811ac5d3509b698345ae559d0b30a4d6f8bdb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:56 +0100
+Subject: net: bridge: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit e5e890630533bdc15b26a34bb8e7ef539bdf1322 ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. Then, if neigh_suppress is enabled and an ICMPv6
+Neighbor Discovery packet reaches the bridge, br_do_suppress_nd() will
+dereference ipv6_stub->nd_tbl which is NULL, passing it to
+neigh_lookup(). This causes a kernel NULL pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000268
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x16/0xe0
+ [...]
+ Call Trace:
+ <IRQ>
+ ? neigh_lookup+0x16/0xe0
+ br_do_suppress_nd+0x160/0x290 [bridge]
+ br_handle_frame_finish+0x500/0x620 [bridge]
+ br_handle_frame+0x353/0x440 [bridge]
+ __netif_receive_skb_core.constprop.0+0x298/0x1110
+ __netif_receive_skb_one_core+0x3d/0xa0
+ process_backlog+0xa0/0x140
+ __napi_poll+0x2c/0x170
+ net_rx_action+0x2c4/0x3a0
+ handle_softirqs+0xd0/0x270
+ do_softirq+0x3f/0x60
+
+Fix this by replacing IS_ENABLED(IPV6) call with ipv6_mod_enabled() in
+the callers. This is in essence disabling NS/NA suppression when IPv6 is
+disabled.
+
+Fixes: ed842faeb2bd ("bridge: suppress nd pkts on BR_NEIGH_SUPPRESS ports")
+Reported-by: Guruprasad C P <gurucp2005@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAHXs0ORzd62QOG-Fttqa2Cx_A_VFp=utE2H2VTX5nqfgs7LDxQ@mail.gmail.com/
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260304120357.9778-1-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_device.c | 2 +-
+ net/bridge/br_input.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 036ae99d09841..052986e05e620 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -71,7 +71,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ br_do_proxy_suppress_arp(skb, br, vid, NULL);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index e33500771b30f..aca6db6f95355 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -148,7 +148,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ (skb->protocol == htons(ETH_P_ARP) ||
+ skb->protocol == htons(ETH_P_RARP))) {
+ br_do_proxy_suppress_arp(skb, br, vid, p);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+--
+2.51.0
+
--- /dev/null
+From 170ecd50c3717d07c92fd486dcef0e7157746192 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:11 +0200
+Subject: net: dpaa2: replace dpaa2_mac_is_type_fixed() with
+ dpaa2_mac_is_type_phy()
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 320fefa9e2edc67011e235ea1d50f0d00ddfe004 ]
+
+dpaa2_mac_is_type_fixed() is a header with no implementation and no
+callers, which is referenced from the documentation though. It can be
+deleted.
+
+On the other hand, it would be useful to reuse the code between
+dpaa2_eth_is_type_phy() and dpaa2_switch_port_is_type_phy(). That common
+code should be called dpaa2_mac_is_type_phy(), so let's create that.
+
+The removal and the addition are merged into the same patch because,
+in fact, is_type_phy() is the logical opposite of is_type_fixed().
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/freescale/dpaa2/mac-phy-support.rst | 9 ++++++---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h | 7 +------
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h | 10 ++++++++--
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h | 7 +------
+ 4 files changed, 16 insertions(+), 17 deletions(-)
+
+diff --git a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
+index 51e6624fb7741..1d2f55feca242 100644
+--- a/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
++++ b/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/mac-phy-support.rst
+@@ -181,10 +181,13 @@ when necessary using the below listed API::
+ - int dpaa2_mac_connect(struct dpaa2_mac *mac);
+ - void dpaa2_mac_disconnect(struct dpaa2_mac *mac);
+
+-A phylink integration is necessary only when the partner DPMAC is not of TYPE_FIXED.
+-One can check for this condition using the below API::
++A phylink integration is necessary only when the partner DPMAC is not of
++``TYPE_FIXED``. This means it is either of ``TYPE_PHY``, or of
++``TYPE_BACKPLANE`` (the difference being the two that in the ``TYPE_BACKPLANE``
++mode, the MC firmware does not access the PCS registers). One can check for
++this condition using the following helper::
+
+- - bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,struct fsl_mc_io *mc_io);
++ - static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac);
+
+ Before connection to a MAC, the caller must allocate and populate the
+ dpaa2_mac structure with the associated net_device, a pointer to the MC portal
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+index e703846adc9f0..9c8d888b10b01 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h
+@@ -733,12 +733,7 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv)
+
+ static inline bool dpaa2_eth_is_type_phy(struct dpaa2_eth_priv *priv)
+ {
+- if (priv->mac &&
+- (priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+- priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+- return true;
+-
+- return false;
++ return dpaa2_mac_is_type_phy(priv->mac);
+ }
+
+ static inline bool dpaa2_eth_has_mac(struct dpaa2_eth_priv *priv)
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+index a58cab188a99a..c1ec9efd413ac 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-mac.h
+@@ -30,8 +30,14 @@ struct dpaa2_mac {
+ struct phy *serdes_phy;
+ };
+
+-bool dpaa2_mac_is_type_fixed(struct fsl_mc_device *dpmac_dev,
+- struct fsl_mc_io *mc_io);
++static inline bool dpaa2_mac_is_type_phy(struct dpaa2_mac *mac)
++{
++ if (!mac)
++ return false;
++
++ return mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
++ mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE;
++}
+
+ int dpaa2_mac_open(struct dpaa2_mac *mac);
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+index 0002dca4d4177..9898073abe012 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+@@ -230,12 +230,7 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw)
+ static inline bool
+ dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv)
+ {
+- if (port_priv->mac &&
+- (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY ||
+- port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE))
+- return true;
+-
+- return false;
++ return dpaa2_mac_is_type_phy(port_priv->mac);
+ }
+
+ static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv)
+--
+2.51.0
+
--- /dev/null
+From fd87bac817f3c7b48f9b49e4bf8bda043c0b6815 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:15 +0200
+Subject: net: dpaa2-switch: assign port_priv->mac after dpaa2_mac_connect()
+ call
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 88d64367cea019fa6197d0d97a85ac90279919b7 ]
+
+The dpaa2-switch has the exact same locking requirements when connected
+to a DPMAC, so it needs port_priv->mac to always point either to NULL,
+or to a DPMAC with a fully initialized phylink instance.
+
+Make the same preparatory change in the dpaa2-switch driver as in the
+dpaa2-eth one.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/freescale/dpaa2/dpaa2-switch.c | 21 +++++++++++--------
+ 1 file changed, 12 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 1e8ce5db867b4..371f53a100e84 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1460,9 +1460,8 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ err = dpaa2_mac_open(mac);
+ if (err)
+ goto err_free_mac;
+- port_priv->mac = mac;
+
+- if (dpaa2_switch_port_is_type_phy(port_priv)) {
++ if (dpaa2_mac_is_type_phy(mac)) {
+ err = dpaa2_mac_connect(mac);
+ if (err) {
+ netdev_err(port_priv->netdev,
+@@ -1472,11 +1471,12 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ }
+ }
+
++ port_priv->mac = mac;
++
+ return 0;
+
+ err_close_mac:
+ dpaa2_mac_close(mac);
+- port_priv->mac = NULL;
+ err_free_mac:
+ kfree(mac);
+ out_put_device:
+@@ -1486,15 +1486,18 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+
+ static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
+ {
+- if (dpaa2_switch_port_is_type_phy(port_priv))
+- dpaa2_mac_disconnect(port_priv->mac);
++ struct dpaa2_mac *mac = port_priv->mac;
+
+- if (!dpaa2_switch_port_has_mac(port_priv))
++ port_priv->mac = NULL;
++
++ if (!mac)
+ return;
+
+- dpaa2_mac_close(port_priv->mac);
+- kfree(port_priv->mac);
+- port_priv->mac = NULL;
++ if (dpaa2_mac_is_type_phy(mac))
++ dpaa2_mac_disconnect(mac);
++
++ dpaa2_mac_close(mac);
++ kfree(mac);
+ }
+
+ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+--
+2.51.0
+
--- /dev/null
+From 98d19a68dafca1067b4f7ae6169525c6f6c95225 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:17 +0200
+Subject: net: dpaa2-switch replace direct MAC access with
+ dpaa2_switch_port_has_mac()
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit bc230671bfb25c2d3c225f674fe6c03cea88d22e ]
+
+The helper function will gain a lockdep annotation in a future patch.
+Make sure to benefit from it.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+index 720c9230cab57..0b41a945e0fff 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+@@ -196,7 +196,7 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ dpaa2_switch_ethtool_counters[i].name, err);
+ }
+
+- if (port_priv->mac)
++ if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 96a8107c9f86243c98d4d2938b30cd6e7a481ea6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Nov 2022 16:12:20 +0200
+Subject: net: dpaa2-switch: serialize changes to priv->mac with a mutex
+
+From: Vladimir Oltean <vladimir.oltean@nxp.com>
+
+[ Upstream commit 3c7f44fa9c4c8a9154935ca49e4cf45c14240335 ]
+
+The dpaa2-switch driver uses a DPMAC in the same way as the dpaa2-eth
+driver, so we need to duplicate the locking solution established by the
+previous change to the switch driver as well.
+
+Signed-off-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Tested-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../freescale/dpaa2/dpaa2-switch-ethtool.c | 32 +++++++++++++++----
+ .../ethernet/freescale/dpaa2/dpaa2-switch.c | 31 ++++++++++++++++--
+ .../ethernet/freescale/dpaa2/dpaa2-switch.h | 2 ++
+ 3 files changed, 55 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+index 0b41a945e0fff..dc9f4ad8a061d 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c
+@@ -60,11 +60,18 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev,
+ {
+ struct ethsw_port_priv *port_priv = netdev_priv(netdev);
+ struct dpsw_link_state state = {0};
+- int err = 0;
++ int err;
++
++ mutex_lock(&port_priv->mac_lock);
+
+- if (dpaa2_switch_port_is_type_phy(port_priv))
+- return phylink_ethtool_ksettings_get(port_priv->mac->phylink,
+- link_ksettings);
++ if (dpaa2_switch_port_is_type_phy(port_priv)) {
++ err = phylink_ethtool_ksettings_get(port_priv->mac->phylink,
++ link_ksettings);
++ mutex_unlock(&port_priv->mac_lock);
++ return err;
++ }
++
++ mutex_unlock(&port_priv->mac_lock);
+
+ err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+@@ -99,9 +106,16 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev,
+ bool if_running;
+ int err = 0, ret;
+
+- if (dpaa2_switch_port_is_type_phy(port_priv))
+- return phylink_ethtool_ksettings_set(port_priv->mac->phylink,
+- link_ksettings);
++ mutex_lock(&port_priv->mac_lock);
++
++ if (dpaa2_switch_port_is_type_phy(port_priv)) {
++ err = phylink_ethtool_ksettings_set(port_priv->mac->phylink,
++ link_ksettings);
++ mutex_unlock(&port_priv->mac_lock);
++ return err;
++ }
++
++ mutex_unlock(&port_priv->mac_lock);
+
+ /* Interface needs to be down to change link settings */
+ if_running = netif_running(netdev);
+@@ -196,8 +210,12 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev,
+ dpaa2_switch_ethtool_counters[i].name, err);
+ }
+
++ mutex_lock(&port_priv->mac_lock);
++
+ if (dpaa2_switch_port_has_mac(port_priv))
+ dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i);
++
++ mutex_unlock(&port_priv->mac_lock);
+ }
+
+ const struct ethtool_ops dpaa2_switch_port_ethtool_ops = {
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 371f53a100e84..68378d694c5d3 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -603,8 +603,11 @@ static int dpaa2_switch_port_link_state_update(struct net_device *netdev)
+
+ /* When we manage the MAC/PHY using phylink there is no need
+ * to manually update the netif_carrier.
++ * We can avoid locking because we are called from the "link changed"
++ * IRQ handler, which is the same as the "endpoint changed" IRQ handler
++ * (the writer to port_priv->mac), so we cannot race with it.
+ */
+- if (dpaa2_switch_port_is_type_phy(port_priv))
++ if (dpaa2_mac_is_type_phy(port_priv->mac))
+ return 0;
+
+ /* Interrupts are received even though no one issued an 'ifconfig up'
+@@ -684,6 +687,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
++ mutex_lock(&port_priv->mac_lock);
++
+ if (!dpaa2_switch_port_is_type_phy(port_priv)) {
+ /* Explicitly set carrier off, otherwise
+ * netif_carrier_ok() will return true and cause 'ip link show'
+@@ -697,6 +702,7 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+ if (err) {
++ mutex_unlock(&port_priv->mac_lock);
+ netdev_err(netdev, "dpsw_if_enable err %d\n", err);
+ return err;
+ }
+@@ -708,6 +714,8 @@ static int dpaa2_switch_port_open(struct net_device *netdev)
+ phylink_start(port_priv->mac->phylink);
+ }
+
++ mutex_unlock(&port_priv->mac_lock);
++
+ return 0;
+ }
+
+@@ -717,6 +725,8 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
+ struct ethsw_core *ethsw = port_priv->ethsw_data;
+ int err;
+
++ mutex_lock(&port_priv->mac_lock);
++
+ if (dpaa2_switch_port_is_type_phy(port_priv)) {
+ phylink_stop(port_priv->mac->phylink);
+ dpaa2_mac_stop(port_priv->mac);
+@@ -725,6 +735,8 @@ static int dpaa2_switch_port_stop(struct net_device *netdev)
+ netif_carrier_off(netdev);
+ }
+
++ mutex_unlock(&port_priv->mac_lock);
++
+ err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0,
+ port_priv->ethsw_data->dpsw_handle,
+ port_priv->idx);
+@@ -1471,7 +1483,9 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+ }
+ }
+
++ mutex_lock(&port_priv->mac_lock);
+ port_priv->mac = mac;
++ mutex_unlock(&port_priv->mac_lock);
+
+ return 0;
+
+@@ -1486,9 +1500,12 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv)
+
+ static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv)
+ {
+- struct dpaa2_mac *mac = port_priv->mac;
++ struct dpaa2_mac *mac;
+
++ mutex_lock(&port_priv->mac_lock);
++ mac = port_priv->mac;
+ port_priv->mac = NULL;
++ mutex_unlock(&port_priv->mac_lock);
+
+ if (!mac)
+ return;
+@@ -1507,6 +1524,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ struct ethsw_port_priv *port_priv;
+ u32 status = ~0;
+ int err, if_id;
++ bool had_mac;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+@@ -1529,7 +1547,12 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+
+ if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) {
+ rtnl_lock();
+- if (dpaa2_switch_port_has_mac(port_priv))
++ /* We can avoid locking because the "endpoint changed" IRQ
++ * handler is the only one who changes priv->mac at runtime,
++ * so we are not racing with anyone.
++ */
++ had_mac = !!port_priv->mac;
++ if (had_mac)
+ dpaa2_switch_port_disconnect_mac(port_priv);
+ else
+ dpaa2_switch_port_connect_mac(port_priv);
+@@ -3279,6 +3302,8 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw,
+ port_priv->netdev = port_netdev;
+ port_priv->ethsw_data = ethsw;
+
++ mutex_init(&port_priv->mac_lock);
++
+ port_priv->idx = port_idx;
+ port_priv->stp_state = BR_STATE_FORWARDING;
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+index 9898073abe012..42b3ca73f55d5 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h
+@@ -161,6 +161,8 @@ struct ethsw_port_priv {
+
+ struct dpaa2_switch_filter_block *filter_block;
+ struct dpaa2_mac *mac;
++ /* Protects against changes to port_priv->mac */
++ struct mutex mac_lock;
+ };
+
+ /* Switch data */
+--
+2.51.0
+
--- /dev/null
+From 0e69a47d903f621ccf3f1f432f2c3dc7936da8d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 18:13:14 -0300
+Subject: net: dsa: realtek: rtl8365mb: fix rtl8365mb_phy_ocp_write return
+ value
+
+From: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+
+[ Upstream commit 7cbe98f7bef965241a5908d50d557008cf998aee ]
+
+Function rtl8365mb_phy_ocp_write() always returns 0, even when an error
+occurs during register access. This patch fixes the return value to
+propagate the actual error code from regmap operations.
+
+Link: https://lore.kernel.org/netdev/a2dfde3c-d46f-434b-9d16-1e251e449068@yahoo.com/
+Fixes: 2796728460b8 ("net: dsa: realtek: rtl8365mb: serialize indirect PHY register access")
+Signed-off-by: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Luiz Angelo Daros de Luca <luizluca@gmail.com>
+Reviewed-by: Linus Walleij <linusw@kernel.org>
+Link: https://patch.msgid.link/20260301-realtek_namiltd_fix1-v1-1-43a6bb707f9c@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/realtek/rtl8365mb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
+index da31d8b839ac6..abdff73aa9c32 100644
+--- a/drivers/net/dsa/realtek/rtl8365mb.c
++++ b/drivers/net/dsa/realtek/rtl8365mb.c
+@@ -764,7 +764,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
+ out:
+ mutex_unlock(&priv->map_lock);
+
+- return 0;
++ return ret;
+ }
+
+ static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
+--
+2.51.0
+
--- /dev/null
+From 2affe359759ef0db8e26135d2f0d987208f2ccb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 18:56:39 +0100
+Subject: net: ethernet: mtk_eth_soc: Reset prog ptr to old_prog in case of
+ error in mtk_xdp_setup()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 0abc73c8a40fd64ac1739c90bb4f42c418d27a5e ]
+
+Reset eBPF program pointer to old_prog and do not decrease its ref-count
+if mtk_open routine in mtk_xdp_setup() fails.
+
+Fixes: 7c26c20da5d42 ("net: ethernet: mtk_eth_soc: add basic XDP support")
+Suggested-by: Paolo Valerio <pvalerio@redhat.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260303-mtk-xdp-prog-ptr-fix-v2-1-97b6dbbe240f@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 3f2f725ccceb3..20d14e3ae6efd 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3119,12 +3119,21 @@ static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
++
++ if (netif_running(dev) && need_update) {
++ int err;
++
++ err = mtk_open(dev);
++ if (err) {
++ rcu_assign_pointer(eth->prog, old_prog);
++
++ return err;
++ }
++ }
++
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+- if (netif_running(dev) && need_update)
+- return mtk_open(dev);
+-
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From a5910ffb0e44c1ff15b952ed2c593ab102b85d3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 23:43:59 +0530
+Subject: net: ethernet: ti: am65-cpsw-nuss/cpsw-ale: Fix multicast entry
+ handling in ALE table
+
+From: Chintan Vankar <c-vankar@ti.com>
+
+[ Upstream commit be11a537224d72b906db6b98510619770298c8a4 ]
+
+In the current implementation, flushing multicast entries in MAC mode
+incorrectly deletes entries for all ports instead of only the target port,
+disrupting multicast traffic on other ports. The cause is adding multicast
+entries by setting only host port bit, and not setting the MAC port bits.
+
+Fix this by setting the MAC port's bit in the port mask while adding the
+multicast entry. Also fix the flush logic to preserve the host port bit
+during removal of MAC port and free ALE entries when mask contains only
+host port.
+
+Fixes: 5c50a856d550 ("drivers: net: ethernet: cpsw: add multicast address to ALE table")
+Signed-off-by: Chintan Vankar <c-vankar@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260224181359.2055322-1-c-vankar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ drivers/net/ethernet/ti/cpsw_ale.c | 9 ++++-----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index a0a9e4e13e77b..d04a05e959bbb 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -257,7 +257,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+- port_mask = ALE_PORT_HOST;
++ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 3d42ca15e8779..d7c65df7f8c06 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -422,14 +422,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+- mask &= ~port_mask;
++ mask &= (~port_mask | ALE_PORT_HOST);
+
+- /* free if only remaining port is host port */
+- if (mask)
++ if (mask == 0x0 || mask == ALE_PORT_HOST)
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+- else
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+--
+2.51.0
+
--- /dev/null
+From 86dea41e38219fa9b403514be5549e1a06904cde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 19:38:13 +0800
+Subject: net: ipv6: fix panic when IPv4 route references loopback IPv6 nexthop
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 21ec92774d1536f71bdc90b0e3d052eff99cf093 ]
+
+When a standalone IPv6 nexthop object is created with a loopback device
+(e.g., "ip -6 nexthop add id 100 dev lo"), fib6_nh_init() misclassifies
+it as a reject route. This is because nexthop objects have no destination
+prefix (fc_dst=::), causing fib6_is_reject() to match any loopback
+nexthop. The reject path skips fib_nh_common_init(), leaving
+nhc_pcpu_rth_output unallocated. If an IPv4 route later references this
+nexthop, __mkroute_output() dereferences NULL nhc_pcpu_rth_output and
+panics.
+
+Simplify the check in fib6_nh_init() to only match explicit reject
+routes (RTF_REJECT) instead of using fib6_is_reject(). The loopback
+promotion heuristic in fib6_is_reject() is handled separately by
+ip6_route_info_create_nh(). After this change, the three cases behave
+as follows:
+
+1. Explicit reject route ("ip -6 route add unreachable 2001:db8::/64"):
+ RTF_REJECT is set, enters reject path, skips fib_nh_common_init().
+ No behavior change.
+
+2. Implicit loopback reject route ("ip -6 route add 2001:db8::/32 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. ip6_route_info_create_nh() still promotes it to reject
+ afterward. nhc_pcpu_rth_output is allocated but unused, which is
+ harmless.
+
+3. Standalone nexthop object ("ip -6 nexthop add id 100 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. nhc_pcpu_rth_output is properly allocated, fixing the crash
+ when IPv4 routes reference this nexthop.
+
+Suggested-by: Ido Schimmel <idosch@nvidia.com>
+Fixes: 493ced1ac47c ("ipv4: Allow routes to use nexthop objects")
+Reported-by: syzbot+334190e097a98a1b81bb@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698f8482.a70a0220.2c38d7.00ca.GAE@google.com/T/
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260304113817.294966-2-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 5aa5390da1095..987ef0954e2ea 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3558,7 +3558,6 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ {
+ struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
+- int addr_type;
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
+@@ -3599,11 +3598,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+
+ fib6_nh->fib_nh_weight = 1;
+
+- /* We cannot add true routes via loopback here,
+- * they would result in kernel looping; promote them to reject routes
++ /* Reset the nexthop device to the loopback device in case of reject
++ * routes.
+ */
+- addr_type = ipv6_addr_type(&cfg->fc_dst);
+- if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
++ if (cfg->fc_flags & RTF_REJECT) {
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != net->loopback_dev) {
+ if (dev) {
+--
+2.51.0
+
--- /dev/null
+From 20f04d3180dede56b1b514998bbd98e1b9e92827 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 18:32:37 +0200
+Subject: net: nfc: nci: Fix zero-length proprietary notifications
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit f7d92f11bd33a6eb49c7c812255ef4ab13681f0f ]
+
+NCI NFC controllers may have proprietary OIDs with zero-length payload.
+One example is: drivers/nfc/nxp-nci/core.c, NXP_NCI_RF_TXLDO_ERROR_NTF.
+
+Allow a zero length payload in proprietary notifications *only*.
+
+Before:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+-- >8 --
+
+After:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x23, plen=0
+kernel: nci: nci_ntf_packet: unknown ntf opcode 0x123
+kernel: nfc nfc0: NFC: RF transmitter couldn't start. Bad power and/or configuration?
+-- >8 --
+
+After fixing the hardware:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 27
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x5, plen=24
+kernel: nci: nci_rf_intf_activated_ntf_packet: rf_discovery_id 1
+-- >8 --
+
+Fixes: d24b03535e5e ("nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260302163238.140576-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 2ffdbbf90eb70..6b62218718a06 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1470,10 +1470,20 @@ static bool nci_valid_size(struct sk_buff *skb)
+ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
+
+ if (skb->len < hdr_size ||
+- !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
++
++ if (!nci_plen(skb->data)) {
++ /* Allow zero length in proprietary notifications (0x20 - 0x3F). */
++ if (nci_opcode_oid(nci_opcode(skb->data)) >= 0x20 &&
++ nci_mt(skb->data) == NCI_MT_NTF_PKT)
++ return true;
++
++ /* Disallow zero length otherwise. */
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 631db7a9341d681ce093fa11a156352379282cc6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 09:06:02 -0500
+Subject: net/sched: act_ife: Fix metalist update behavior
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit e2cedd400c3ec0302ffca2490e8751772906ac23 ]
+
+Whenever an ife action replace changes the metalist, instead of
+replacing the old data on the metalist, the current ife code is appending
+the new metadata. Aside from being innapropriate behavior, this may lead
+to an unbounded addition of metadata to the metalist which might cause an
+out of bounds error when running the encode op:
+
+[ 138.423369][ C1] ==================================================================
+[ 138.424317][ C1] BUG: KASAN: slab-out-of-bounds in ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.424906][ C1] Write of size 4 at addr ffff8880077f4ffe by task ife_out_out_bou/255
+[ 138.425778][ C1] CPU: 1 UID: 0 PID: 255 Comm: ife_out_out_bou Not tainted 7.0.0-rc1-00169-gfbdfa8da05b6 #624 PREEMPT(full)
+[ 138.425795][ C1] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+[ 138.425800][ C1] Call Trace:
+[ 138.425804][ C1] <IRQ>
+[ 138.425808][ C1] dump_stack_lvl (lib/dump_stack.c:122)
+[ 138.425828][ C1] print_report (mm/kasan/report.c:379 mm/kasan/report.c:482)
+[ 138.425839][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425844][ C1] ? __virt_addr_valid (./arch/x86/include/asm/preempt.h:95 (discriminator 1) ./include/linux/rcupdate.h:975 (discriminator 1) ./include/linux/mmzone.h:2207 (discriminator 1) arch/x86/mm/physaddr.c:54 (discriminator 1))
+[ 138.425853][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425859][ C1] kasan_report (mm/kasan/report.c:221 mm/kasan/report.c:597)
+[ 138.425868][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425878][ C1] kasan_check_range (mm/kasan/generic.c:186 (discriminator 1) mm/kasan/generic.c:200 (discriminator 1))
+[ 138.425884][ C1] __asan_memset (mm/kasan/shadow.c:84 (discriminator 2))
+[ 138.425889][ C1] ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425893][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:171)
+[ 138.425898][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425903][ C1] ife_encode_meta_u16 (net/sched/act_ife.c:57)
+[ 138.425910][ C1] ? __pfx_do_raw_spin_lock (kernel/locking/spinlock_debug.c:114)
+[ 138.425916][ C1] ? __asan_memcpy (mm/kasan/shadow.c:105 (discriminator 3))
+[ 138.425921][ C1] ? __pfx_ife_encode_meta_u16 (net/sched/act_ife.c:45)
+[ 138.425927][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425931][ C1] tcf_ife_act (net/sched/act_ife.c:847 net/sched/act_ife.c:879)
+
+To solve this issue, fix the replace behavior by adding the metalist to
+the ife rcu data structure.
+
+Fixes: aa9fd9a325d51 ("sched: act: ife: update parameters via rcu handling")
+Reported-by: Ruitong Liu <cnitlrt@gmail.com>
+Tested-by: Ruitong Liu <cnitlrt@gmail.com>
+Co-developed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20260304140603.76500-1-jhs@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tc_act/tc_ife.h | 4 +-
+ net/sched/act_ife.c | 93 ++++++++++++++++++-------------------
+ 2 files changed, 45 insertions(+), 52 deletions(-)
+
+diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
+index c7f24a2da1cad..24d4d5a62b3c2 100644
+--- a/include/net/tc_act/tc_ife.h
++++ b/include/net/tc_act/tc_ife.h
+@@ -13,15 +13,13 @@ struct tcf_ife_params {
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+-
++ struct list_head metalist;
+ struct rcu_head rcu;
+ };
+
+ struct tcf_ife_info {
+ struct tc_action common;
+ struct tcf_ife_params __rcu *params;
+- /* list of metaids allowed */
+- struct list_head metalist;
+ };
+ #define to_ife(a) ((struct tcf_ife_info *)a)
+
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 1f243ea65443c..a25203a492700 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -292,8 +292,8 @@ static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
+ /* called when adding new meta information
+ */
+ static int __add_metainfo(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool atomic, bool exists)
++ struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len, bool atomic)
+ {
+ struct tcf_meta_info *mi = NULL;
+ int ret = 0;
+@@ -312,45 +312,40 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
+ }
+ }
+
+- if (exists)
+- spin_lock_bh(&ife->tcf_lock);
+- list_add_tail(&mi->metalist, &ife->metalist);
+- if (exists)
+- spin_unlock_bh(&ife->tcf_lock);
++ list_add_tail(&mi->metalist, &p->metalist);
+
+ return ret;
+ }
+
+ static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid,
+- bool exists)
++ struct tcf_ife_params *p, u32 metaid)
+ {
+ int ret;
+
+ if (!try_module_get(ops->owner))
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
++ ret = __add_metainfo(ops, p, metaid, NULL, 0, true);
+ if (ret)
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool exists)
++static int add_metainfo(struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len)
+ {
+ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret;
+
+ if (!ops)
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
++ ret = __add_metainfo(ops, p, metaid, metaval, len, false);
+ if (ret)
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
++static int use_all_metadata(struct tcf_ife_params *p)
+ {
+ struct tcf_meta_ops *o;
+ int rc = 0;
+@@ -358,7 +353,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+
+ read_lock(&ife_mod_lock);
+ list_for_each_entry(o, &ifeoplist, list) {
+- rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
++ rc = add_metainfo_and_get_ops(o, p, o->metaid);
+ if (rc == 0)
+ installed += 1;
+ }
+@@ -370,7 +365,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+ return -EINVAL;
+ }
+
+-static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int dump_metalist(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+ struct tcf_meta_info *e;
+ struct nlattr *nest;
+@@ -378,14 +373,14 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ int total_encoded = 0;
+
+ /*can only happen on decode */
+- if (list_empty(&ife->metalist))
++ if (list_empty(&p->metalist))
+ return 0;
+
+ nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
+ if (!nest)
+ goto out_nlmsg_trim;
+
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry(e, &p->metalist, metalist) {
+ if (!e->ops->get(skb, e))
+ total_encoded += 1;
+ }
+@@ -402,13 +397,11 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ return -1;
+ }
+
+-/* under ife->tcf_lock */
+-static void _tcf_ife_cleanup(struct tc_action *a)
++static void __tcf_ife_cleanup(struct tcf_ife_params *p)
+ {
+- struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_meta_info *e, *n;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_safe(e, n, &p->metalist, metalist) {
+ list_del(&e->metalist);
+ if (e->metaval) {
+ if (e->ops->release)
+@@ -421,18 +414,23 @@ static void _tcf_ife_cleanup(struct tc_action *a)
+ }
+ }
+
++static void tcf_ife_cleanup_params(struct rcu_head *head)
++{
++ struct tcf_ife_params *p = container_of(head, struct tcf_ife_params,
++ rcu);
++
++ __tcf_ife_cleanup(p);
++ kfree(p);
++}
++
+ static void tcf_ife_cleanup(struct tc_action *a)
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_ife_params *p;
+
+- spin_lock_bh(&ife->tcf_lock);
+- _tcf_ife_cleanup(a);
+- spin_unlock_bh(&ife->tcf_lock);
+-
+ p = rcu_dereference_protected(ife->params, 1);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+ }
+
+ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+@@ -454,8 +452,7 @@ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+ return 0;
+ }
+
+-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+- bool exists, bool rtnl_held)
++static int populate_metalist(struct tcf_ife_params *p, struct nlattr **tb)
+ {
+ int len = 0;
+ int rc = 0;
+@@ -467,7 +464,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ val = nla_data(tb[i]);
+ len = nla_len(tb[i]);
+
+- rc = add_metainfo(ife, i, val, len, exists);
++ rc = add_metainfo(p, i, val, len);
+ if (rc)
+ return rc;
+ }
+@@ -522,6 +519,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
++ INIT_LIST_HEAD(&p->metalist);
+
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+@@ -566,8 +564,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ ife = to_ife(*a);
+- if (ret == ACT_P_CREATED)
+- INIT_LIST_HEAD(&ife->metalist);
+
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+@@ -599,8 +595,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ if (tb[TCA_IFE_METALST]) {
+- err = populate_metalist(ife, tb2, exists,
+- !(flags & TCA_ACT_FLAGS_NO_RTNL));
++ err = populate_metalist(p, tb2);
+ if (err)
+ goto metadata_parse_err;
+ } else {
+@@ -609,7 +604,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ * as we can. You better have at least one else we are
+ * going to bail out
+ */
+- err = use_all_metadata(ife, exists);
++ err = use_all_metadata(p);
+ if (err)
+ goto metadata_parse_err;
+ }
+@@ -625,13 +620,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+
+ return ret;
+ metadata_parse_err:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ release_idr:
++ __tcf_ife_cleanup(p);
+ kfree(p);
+ tcf_idr_release(*a, bind);
+ return err;
+@@ -678,7 +674,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
+ goto nla_put_failure;
+
+- if (dump_metalist(skb, ife)) {
++ if (dump_metalist(skb, p)) {
+ /*ignore failure to dump metalist */
+ pr_info("Failed to dump metalist\n");
+ }
+@@ -692,13 +688,13 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ return -1;
+ }
+
+-static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
++static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_params *p,
+ u16 metaid, u16 mlen, void *mdata)
+ {
+ struct tcf_meta_info *e;
+
+ /* XXX: use hash to speed up */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (metaid == e->metaid) {
+ if (e->ops) {
+ /* We check for decode presence already */
+@@ -715,10 +711,13 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ int action = ife->tcf_action;
++ struct tcf_ife_params *p;
+ u8 *ifehdr_end;
+ u8 *tlv_data;
+ u16 metalen;
+
++ p = rcu_dereference_bh(ife->params);
++
+ bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+ tcf_lastuse_update(&ife->tcf_tm);
+
+@@ -744,7 +743,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ return TC_ACT_SHOT;
+ }
+
+- if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
++ if (find_decode_metaid(skb, p, mtype, dlen, curr_data)) {
+ /* abuse overlimits to count when we receive metadata
+ * but dont have an ops for it
+ */
+@@ -768,12 +767,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ /*XXX: check if we can do this at install time instead of current
+ * send data path
+ **/
+-static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+- struct tcf_meta_info *e, *n;
++ struct tcf_meta_info *e;
+ int tot_run_sz = 0, run_sz = 0;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->check_presence) {
+ run_sz = e->ops->check_presence(skb, e);
+ tot_run_sz += run_sz;
+@@ -794,7 +793,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
+ where ORIGDATA = original ethernet header ...
+ */
+- u16 metalen = ife_get_sz(skb, ife);
++ u16 metalen = ife_get_sz(skb, p);
+ int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
+ unsigned int skboff = 0;
+ int new_len = skb->len + hdrm;
+@@ -832,25 +831,21 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ if (!ife_meta)
+ goto drop;
+
+- spin_lock(&ife->tcf_lock);
+-
+ /* XXX: we dont have a clever way of telling encode to
+ * not repeat some of the computations that are done by
+ * ops->presence_check...
+ */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->encode) {
+ err = e->ops->encode(skb, (void *)(ife_meta + skboff),
+ e);
+ }
+ if (err < 0) {
+ /* too corrupt to keep around if overwritten */
+- spin_unlock(&ife->tcf_lock);
+ goto drop;
+ }
+ skboff += err;
+ }
+- spin_unlock(&ife->tcf_lock);
+ oethh = (struct ethhdr *)skb->data;
+
+ if (!is_zero_ether_addr(p->eth_src))
+--
+2.51.0
+
--- /dev/null
+From 1aaad09de8bbc02941f105d9ed68bf10f83057d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Feb 2026 23:53:07 +0900
+Subject: net: sched: avoid qdisc_reset_all_tx_gt() vs dequeue race for
+ lockless qdiscs
+
+From: Koichiro Den <den@valinux.co.jp>
+
+[ Upstream commit 7f083faf59d14c04e01ec05a7507f036c965acf8 ]
+
+When shrinking the number of real tx queues,
+netif_set_real_num_tx_queues() calls qdisc_reset_all_tx_gt() to flush
+qdiscs for queues which will no longer be used.
+
+qdisc_reset_all_tx_gt() currently serializes qdisc_reset() with
+qdisc_lock(). However, for lockless qdiscs, the dequeue path is
+serialized by qdisc_run_begin/end() using qdisc->seqlock instead, so
+qdisc_reset() can run concurrently with __qdisc_run() and free skbs
+while they are still being dequeued, leading to UAF.
+
+This can easily be reproduced on e.g. virtio-net by imposing heavy
+traffic while frequently changing the number of queue pairs:
+
+ iperf3 -ub0 -c $peer -t 0 &
+ while :; do
+ ethtool -L eth0 combined 1
+ ethtool -L eth0 combined 2
+ done
+
+With KASAN enabled, this leads to reports like:
+
+ BUG: KASAN: slab-use-after-free in __qdisc_run+0x133f/0x1760
+ ...
+ Call Trace:
+ <TASK>
+ ...
+ __qdisc_run+0x133f/0x1760
+ __dev_queue_xmit+0x248f/0x3550
+ ip_finish_output2+0xa42/0x2110
+ ip_output+0x1a7/0x410
+ ip_send_skb+0x2e6/0x480
+ udp_send_skb+0xb0a/0x1590
+ udp_sendmsg+0x13c9/0x1fc0
+ ...
+ </TASK>
+
+ Allocated by task 1270 on cpu 5 at 44.558414s:
+ ...
+ alloc_skb_with_frags+0x84/0x7c0
+ sock_alloc_send_pskb+0x69a/0x830
+ __ip_append_data+0x1b86/0x48c0
+ ip_make_skb+0x1e8/0x2b0
+ udp_sendmsg+0x13a6/0x1fc0
+ ...
+
+ Freed by task 1306 on cpu 3 at 44.558445s:
+ ...
+ kmem_cache_free+0x117/0x5e0
+ pfifo_fast_reset+0x14d/0x580
+ qdisc_reset+0x9e/0x5f0
+ netif_set_real_num_tx_queues+0x303/0x840
+ virtnet_set_channels+0x1bf/0x260 [virtio_net]
+ ethnl_set_channels+0x684/0xae0
+ ethnl_default_set_doit+0x31a/0x890
+ ...
+
+Serialize qdisc_reset_all_tx_gt() against the lockless dequeue path by
+taking qdisc->seqlock for TCQ_F_NOLOCK qdiscs, matching the
+serialization model already used by dev_reset_queue().
+
+Additionally clear QDISC_STATE_NON_EMPTY after reset so the qdisc state
+reflects an empty queue, avoiding needless re-scheduling.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Signed-off-by: Koichiro Den <den@valinux.co.jp>
+Link: https://patch.msgid.link/20260228145307.3955532-1-den@valinux.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index b34e9e93a1463..7bb73448de0d3 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -724,13 +724,23 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+ {
+ struct Qdisc *qdisc;
++ bool nolock;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ if (qdisc) {
++ nolock = qdisc->flags & TCQ_F_NOLOCK;
++
++ if (nolock)
++ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
++ if (nolock) {
++ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
++ spin_unlock_bh(&qdisc->seqlock);
++ }
+ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 56c6c5ff4248d4999787cc2114c785aae3017503 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:25 +0000
+Subject: net: stmmac: Fix error handling in VLAN add and delete paths
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 35dfedce442c4060cfe5b98368bc9643fb995716 ]
+
+stmmac_vlan_rx_add_vid() updates active_vlans and the VLAN hash
+register before writing the HW filter entry. If the filter write
+fails, it leaves a stale VID in active_vlans and the hash register.
+
+stmmac_vlan_rx_kill_vid() has the reverse problem: it clears
+active_vlans before removing the HW filter. On failure, the VID is
+gone from active_vlans but still present in the HW filter table.
+
+To fix this, reorder the operations to update the hash table first,
+then attempt the HW filter operation. If the HW filter fails, roll
+back both the active_vlans bitmap and the hash table by calling
+stmmac_vlan_update() again.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-2-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index b5de07b84f77b..1b3ea615cbba2 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6451,9 +6451,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ clear_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto err_pm_put;
++ }
+ }
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6474,15 +6478,21 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
++ ret = stmmac_vlan_update(priv, is_double);
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ goto del_vlan_error;
++ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto del_vlan_error;
++ }
+ }
+
+- ret = stmmac_vlan_update(priv, is_double);
+-
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+--
+2.51.0
+
--- /dev/null
+From 41780dbb088a9f7c98480498928259ffd699c9f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:57 +0100
+Subject: net: vxlan: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit 168ff39e4758897d2eee4756977d036d52884c7e ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. If an IPv6 packet is injected into the interface,
+route_shortcircuit() is called and a NULL pointer dereference happens on
+neigh_lookup().
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000380
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x20/0x270
+ [...]
+ Call Trace:
+ <TASK>
+ vxlan_xmit+0x638/0x1ef0 [vxlan]
+ dev_hard_start_xmit+0x9e/0x2e0
+ __dev_queue_xmit+0xbee/0x14e0
+ packet_sendmsg+0x116f/0x1930
+ __sys_sendto+0x1f5/0x200
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x12f/0x1590
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by adding an early check on route_shortcircuit() when protocol
+is ETH_P_IPV6. Note that ipv6_mod_enabled() cannot be used here because
+VXLAN can be built-in even when IPv6 is built as a module.
+
+Fixes: e15a00aafa4b ("vxlan: add ipv6 route short circuit support")
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Link: https://patch.msgid.link/20260304120357.9778-2-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 50dacdc1b6a7a..9c3a12feb25d2 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2171,6 +2171,11 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct ipv6hdr *pip6;
+
++ /* check if nd_tbl is not initiliazed due to
++ * ipv6.disable=1 set during boot
++ */
++ if (!ipv6_stub->nd_tbl)
++ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+--
+2.51.0
+
--- /dev/null
+From 87851bd3ec98fd085ca567b53f0875f4e7e2eb84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:44 -0800
+Subject: nfc: nci: clear NCI_DATA_EXCHANGE before calling completion callback
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0efdc02f4f6d52f8ca5d5889560f325a836ce0a8 ]
+
+Move clear_bit(NCI_DATA_EXCHANGE) before invoking the data exchange
+callback in nci_data_exchange_complete().
+
+The callback (e.g. rawsock_data_exchange_complete) may immediately
+schedule another data exchange via schedule_work(tx_work). On a
+multi-CPU system, tx_work can run and reach nci_transceive() before
+the current nci_data_exchange_complete() clears the flag, causing
+test_and_set_bit(NCI_DATA_EXCHANGE) to return -EBUSY and the new
+transfer to fail.
+
+This causes intermittent flakes in nci/nci_dev in NIPA:
+
+ # # RUN NCI.NCI1_0.t4t_tag_read ...
+ # # t4t_tag_read: Test terminated by timeout
+ # # FAIL NCI.NCI1_0.t4t_tag_read
+ # not ok 3 NCI.NCI1_0.t4t_tag_read
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-5-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 3d36ea5701f02..7a3fb2a397a1e 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -33,7 +33,8 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+- goto exit;
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++ return;
+ }
+
+ cb = conn_info->data_exchange_cb;
+@@ -45,6 +46,12 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ del_timer_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
++ /* Mark the exchange as done before calling the callback.
++ * The callback (e.g. rawsock_data_exchange_complete) may
++ * want to immediately queue another data exchange.
++ */
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++
+ if (cb) {
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+@@ -54,9 +61,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+-
+-exit:
+- clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+ }
+
+ /* ----------------- NCI TX Data ----------------- */
+--
+2.51.0
+
--- /dev/null
+From 6c10925ef644dd9178897ced4ed9a9f932e9ff67 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:41 -0800
+Subject: nfc: nci: free skb on nci_transceive early error paths
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7bd4b0c4779f978a6528c9b7937d2ca18e936e2c ]
+
+nci_transceive() takes ownership of the skb passed by the caller,
+but the -EPROTO, -EINVAL, and -EBUSY error paths return without
+freeing it.
+
+Due to issues clearing NCI_DATA_EXCHANGE fixed by subsequent changes
+the nci/nci_dev selftest hits the error path occasionally in NIPA,
+and kmemleak detects leaks:
+
+unreferenced object 0xff11000015ce6a40 (size 640):
+ comm "nci_dev", pid 3954, jiffies 4295441246
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 00 a4 00 0c 02 e1 03 6b 6b 6b 6b 6b kkkk.......kkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ backtrace (crc 7c40cc2a):
+ kmem_cache_alloc_node_noprof+0x492/0x630
+ __alloc_skb+0x11e/0x5f0
+ alloc_skb_with_frags+0xc6/0x8f0
+ sock_alloc_send_pskb+0x326/0x3f0
+ nfc_alloc_send_skb+0x94/0x1d0
+ rawsock_sendmsg+0x162/0x4c0
+ do_syscall_64+0x117/0xfc0
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 6b62218718a06..cdc1aa8662544 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1023,18 +1023,23 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+- if (!conn_info)
++ if (!conn_info) {
++ kfree_skb(skb);
+ return -EPROTO;
++ }
+
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ pr_err("unable to exchange data, no active target\n");
++ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+- if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) {
++ kfree_skb(skb);
+ return -EBUSY;
++ }
+
+ /* store cb and context to be used on receiving data */
+ conn_info->data_exchange_cb = cb;
+--
+2.51.0
+
--- /dev/null
+From 11f309cd4f185fe90bb5500c2632104635249dc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:45 -0800
+Subject: nfc: rawsock: cancel tx_work before socket teardown
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit d793458c45df2aed498d7f74145eab7ee22d25aa ]
+
+In rawsock_release(), cancel any pending tx_work and purge the write
+queue before orphaning the socket. rawsock_tx_work runs on the system
+workqueue and calls nfc_data_exchange which dereferences the NCI
+device. Without synchronization, tx_work can race with socket and
+device teardown when a process is killed (e.g. by SIGKILL), leading
+to use-after-free or leaked references.
+
+Set SEND_SHUTDOWN first so that if tx_work is already running it will
+see the flag and skip transmitting, then use cancel_work_sync to wait
+for any in-progress execution to finish, and finally purge any
+remaining queued skbs.
+
+Fixes: 23b7869c0fd0 ("NFC: add the NFC socket raw protocol")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-6-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/rawsock.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 8dd569765f96e..cffbb96beb6cb 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -66,6 +66,17 @@ static int rawsock_release(struct socket *sock)
+ if (sock->type == SOCK_RAW)
+ nfc_sock_unlink(&raw_sk_list, sk);
+
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* Prevent rawsock_tx_work from starting new transmits and
++ * wait for any in-progress work to finish. This must happen
++ * before the socket is orphaned to avoid a race where
++ * rawsock_tx_work runs after the NCI device has been freed.
++ */
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ cancel_work_sync(&nfc_rawsock(sk)->tx_work);
++ rawsock_write_queue_purge(sk);
++ }
++
+ sock_orphan(sk);
+ sock_put(sk);
+
+--
+2.51.0
+
--- /dev/null
+From 7df03d22d7d7baedb6f360ad7c744037f2184f22 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:58 +0000
+Subject: octeon_ep: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 43b3160cb639079a15daeb5f080120afbfbfc918 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-3-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 21 +++++++++------
+ .../net/ethernet/marvell/octeon_ep/octep_rx.c | 27 +++++++++++++------
+ 2 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index aa98cc8fd344e..32b30cbb8c009 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -353,17 +353,22 @@ static void octep_clean_irqs(struct octep_device *oct)
+ */
+ static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index 4f3c1187a6e82..0ecfc4e36f3ac 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -317,10 +317,16 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
+
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+@@ -331,8 +337,9 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -405,7 +412,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ u16 data_offset;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -488,7 +495,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+@@ -511,22 +518,26 @@ int octep_oq_process_rx(struct octep_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 91b48c4e1b99607b88c6a2fafe92ca69ad54de5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:57 +0000
+Subject: octeon_ep: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 18c04a808c436d629d5812ce883e3822a5f5a47f ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion,
+and replace napi_complete with napi_complete_done.
+
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+napi_complete_done (vs napi_complete) allows for better
+interrupt coalescing.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-2-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index e171097c13654..aa98cc8fd344e 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -346,12 +346,12 @@ static void octep_clean_irqs(struct octep_device *oct)
+ }
+
+ /**
+- * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -367,7 +367,17 @@ static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+- wmb();
++ smp_wmb();
++}
++
++/**
++ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++{
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -393,7 +403,8 @@ static int octep_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+- napi_complete(napi);
++ octep_update_pkt(ioq_vector->iq, ioq_vector->oq);
++ napi_complete_done(napi, rx_done);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+ }
+--
+2.51.0
+
--- /dev/null
+From bc18b7486cf63e72b67ab739f4511e9f9b8e4db8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 01:01:29 +0000
+Subject: platform/x86: thinkpad_acpi: Fix errors reading battery thresholds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Teh <jonathan.teh@outlook.com>
+
+[ Upstream commit 53e977b1d50c46f2c4ec3865cd13a822f58ad3cd ]
+
+Check whether the battery supports the relevant charge threshold before
+reading the value to silence these errors:
+
+thinkpad_acpi: acpi_evalf(BCTG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCTG: evaluate failed
+thinkpad_acpi: acpi_evalf(BCSG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCSG: evaluate failed
+
+when reading the charge thresholds via sysfs on platforms that do not
+support them such as the ThinkPad T400.
+
+Fixes: 2801b9683f74 ("thinkpad_acpi: Add support for battery thresholds")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=202619
+Signed-off-by: Jonathan Teh <jonathan.teh@outlook.com>
+Reviewed-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://patch.msgid.link/MI0P293MB01967B206E1CA6F337EBFB12926CA@MI0P293MB0196.ITAP293.PROD.OUTLOOK.COM
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index c0977ffec96c4..3f2098e686f73 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9643,14 +9643,16 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
+ {
+ switch (what) {
+ case THRESHOLD_START:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
++ if (!battery_info.batteries[battery].start_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery)))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
++ if (!battery_info.batteries[battery].stop_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery)))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+--
+2.51.0
+
selftests-mptcp-more-stable-simult_flows-tests.patch
selftests-mptcp-join-check-removing-signal-subflow-endp.patch
arm-clean-up-the-memset64-c-wrapper.patch
+platform-x86-thinkpad_acpi-fix-errors-reading-batter.patch
+net-ethernet-ti-am65-cpsw-nuss-cpsw-ale-fix-multicas.patch
+net-dpaa2-replace-dpaa2_mac_is_type_fixed-with-dpaa2.patch
+net-dpaa2-switch-assign-port_priv-mac-after-dpaa2_ma.patch
+net-dpaa2-switch-replace-direct-mac-access-with-dpaa.patch
+net-dpaa2-switch-serialize-changes-to-priv-mac-with-.patch
+dpaa2-switch-do-not-clear-any-interrupts-automatical.patch
+dpaa2-switch-fix-interrupt-storm-after-receiving-bad.patch
+atm-lec-fix-null-ptr-deref-in-lec_arp_clear_vccs.patch
+can-bcm-fix-locking-for-bcm_op-runtime-updates.patch
+can-mcp251x-fix-deadlock-in-error-path-of-mcp251x_op.patch
+kunit-tool-print-summary-of-failed-tests-if-a-few-fa.patch
+kunit-tool-make-json-do-nothing-if-raw_ouput-is-set.patch
+kunit-tool-parse-ktap-compliant-test-output.patch
+kunit-tool-don-t-include-ktap-headers-and-the-like-i.patch
+kunit-tool-make-parser-preserve-whitespace-when-prin.patch
+kunit-kunit.py-extract-handlers.patch
+kunit-tool-remove-unused-imports-and-variables.patch
+kunit-tool-fix-pre-existing-mypy-strict-errors-and-u.patch
+kunit-tool-add-command-line-interface-to-filter-and-.patch
+kunit-tool-copy-caller-args-in-run_kernel-to-prevent.patch
+net-dsa-realtek-rtl8365mb-fix-rtl8365mb_phy_ocp_writ.patch
+octeon_ep-relocate-counter-updates-before-napi.patch
+octeon_ep-avoid-compiler-and-iq-oq-reordering.patch
+wifi-cw1200-fix-locking-in-error-paths.patch
+wifi-wlcore-fix-a-locking-bug.patch
+wifi-mt76-fix-possible-oob-access-in-mt76_connac2_ma.patch
+indirect_call_wrapper-do-not-reevaluate-function-poi.patch
+xen-acpi-processor-fix-_cst-detection-using-undersiz.patch
+bpf-export-bpf_link_inc_not_zero.patch
+bpf-fix-a-uaf-issue-in-bpf_trampoline_link_cgroup_sh.patch
+ipv6-fix-null-pointer-deref-in-ip6_rt_get_dev_rcu.patch
+amd-xgbe-fix-sleep-while-atomic-on-suspend-resume.patch
+net-sched-avoid-qdisc_reset_all_tx_gt-vs-dequeue-rac.patch
+net-nfc-nci-fix-zero-length-proprietary-notification.patch
+nfc-nci-free-skb-on-nci_transceive-early-error-paths.patch
+nfc-nci-clear-nci_data_exchange-before-calling-compl.patch
+nfc-rawsock-cancel-tx_work-before-socket-teardown.patch
+net-stmmac-fix-error-handling-in-vlan-add-and-delete.patch
+net-ethernet-mtk_eth_soc-reset-prog-ptr-to-old_prog-.patch
+net-bridge-fix-nd_tbl-null-dereference-when-ipv6-is-.patch
+net-vxlan-fix-nd_tbl-null-dereference-when-ipv6-is-d.patch
+net-ipv6-fix-panic-when-ipv4-route-references-loopba.patch
+net-sched-act_ife-fix-metalist-update-behavior.patch
+xdp-use-modulo-operation-to-calculate-xdp-frag-tailr.patch
+xdp-produce-a-warning-when-calculated-tailroom-is-ne.patch
+tracing-add-null-pointer-check-to-trigger_data_free.patch
--- /dev/null
+From 3aac0936f5fc700548c8040a129c5f0096b538a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 11:33:39 -0800
+Subject: tracing: Add NULL pointer check to trigger_data_free()
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 457965c13f0837a289c9164b842d0860133f6274 ]
+
+If trigger_data_alloc() fails and returns NULL, event_hist_trigger_parse()
+jumps to the out_free error path. While kfree() safely handles a NULL
+pointer, trigger_data_free() does not. This causes a NULL pointer
+dereference in trigger_data_free() when evaluating
+data->cmd_ops->set_filter.
+
+Fix the problem by adding a NULL pointer check to trigger_data_free().
+
+The problem was found by an experimental code review agent based on
+gemini-3.1-pro while reviewing backports into v6.18.y.
+
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Link: https://patch.msgid.link/20260305193339.2810953-1-linux@roeck-us.net
+Fixes: 0550069cc25f ("tracing: Properly process error handling in event_hist_trigger_parse()")
+Assisted-by: Gemini:gemini-3.1-pro
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_events_trigger.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 782ccb2433bb4..401d88d3b2c4b 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -19,6 +19,9 @@ static DEFINE_MUTEX(trigger_cmd_mutex);
+
+ void trigger_data_free(struct event_trigger_data *data)
+ {
++ if (!data)
++ return;
++
+ if (data->cmd_ops->set_filter)
+ data->cmd_ops->set_filter(NULL, data, NULL);
+
+--
+2.51.0
+
--- /dev/null
+From 2783d5a1aabdc679f22d589f21e7ca94d54c369a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:24 -0800
+Subject: wifi: cw1200: Fix locking in error paths
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit d98c24617a831e92e7224a07dcaed2dd0b02af96 ]
+
+cw1200_wow_suspend() must only return with priv->conf_mutex locked if it
+returns zero. This mutex must be unlocked if an error is returned. Add
+mutex_unlock() calls to the error paths from which that call is missing.
+This has been detected by the Clang thread-safety analyzer.
+
+Fixes: a910e4a94f69 ("cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-25-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/st/cw1200/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
+index a20ab577a3644..212b6f2af8de4 100644
+--- a/drivers/net/wireless/st/cw1200/pm.c
++++ b/drivers/net/wireless/st/cw1200/pm.c
+@@ -264,12 +264,14 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EAGAIN;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From c4b5a47326964d57b7aca61b0247bb160f9a9914 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:16 +0100
+Subject: wifi: mt76: Fix possible oob access in
+ mt76_connac2_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 4e10a730d1b511ff49723371ed6d694dd1b2c785 ]
+
+Check frame length before accessing the mgmt fields in
+mt76_connac2_mac_write_txwi_80211 in order to avoid a possible oob
+access.
+
+Fixes: 577dbc6c656d ("mt76: mt7915: enable offloading of sequence number assignment")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-3-b0f6d1ad4850@kernel.org
+[fix check to also cover mgmt->u.action.u.addba_req.capab,
+correct Fixes tag]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index f7d392fce8c28..f69cb83adcca9 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -375,6 +375,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + 1 + 2 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+--
+2.51.0
+
--- /dev/null
+From 2a985da2042bfdf56b8bcd4b3426833e0c26816d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:25 -0800
+Subject: wifi: wlcore: Fix a locking bug
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 72c6df8f284b3a49812ce2ac136727ace70acc7c ]
+
+Make sure that wl->mutex is locked before it is unlocked. This has been
+detected by the Clang thread-safety analyzer.
+
+Fixes: 45aa7f071b06 ("wlcore: Use generic runtime pm calls for wowlan elp configuration")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-26-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index b88ceb1f9800c..95de73f4a7dfd 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -1800,6 +1800,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
+
++ mutex_lock(&wl->mutex);
++
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+@@ -1816,8 +1818,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+- mutex_lock(&wl->mutex);
+-
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+--
+2.51.0
+
--- /dev/null
+From 9f6660af810b5d79b8d1c836db58011a32131681 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:50 +0100
+Subject: xdp: produce a warning when calculated tailroom is negative
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8821e857759be9db3cde337ad328b71fe5c8a55f ]
+
+Many ethernet drivers report xdp Rx queue frag size as being the same as
+DMA write size. However, the only user of this field, namely
+bpf_xdp_frags_increase_tail(), clearly expects a truesize.
+
+Such difference leads to unspecific memory corruption issues under certain
+circumstances, e.g. in ixgbevf maximum DMA write size is 3 KB, so when
+running xskxceiver's XDP_ADJUST_TAIL_GROW_MULTI_BUFF, 6K packet fully uses
+all DMA-writable space in 2 buffers. This would be fine, if only
+rxq->frag_size was properly set to 4K, but value of 3K results in a
+negative tailroom, because there is a non-zero page offset.
+
+We are supposed to return -EINVAL and be done with it in such case, but due
+to tailroom being stored as an unsigned int, it is reported to be somewhere
+near UINT_MAX, resulting in a tail being grown, even if the requested
+offset is too much (it is around 2K in the abovementioned test). This later
+leads to all kinds of unspecific calltraces.
+
+[ 7340.337579] xskxceiver[1440]: segfault at 1da718 ip 00007f4161aeac9d sp 00007f41615a6a00 error 6
+[ 7340.338040] xskxceiver[1441]: segfault at 7f410000000b ip 00000000004042b5 sp 00007f415bffecf0 error 4
+[ 7340.338179] in libc.so.6[61c9d,7f4161aaf000+160000]
+[ 7340.339230] in xskxceiver[42b5,400000+69000]
+[ 7340.340300] likely on CPU 6 (core 0, socket 6)
+[ 7340.340302] Code: ff ff 01 e9 f4 fe ff ff 0f 1f 44 00 00 4c 39 f0 74 73 31 c0 ba 01 00 00 00 f0 0f b1 17 0f 85 ba 00 00 00 49 8b 87 88 00 00 00 <4c> 89 70 08 eb cc 0f 1f 44 00 00 48 8d bd f0 fe ff ff 89 85 ec fe
+[ 7340.340888] likely on CPU 3 (core 0, socket 3)
+[ 7340.345088] Code: 00 00 00 ba 00 00 00 00 be 00 00 00 00 89 c7 e8 31 ca ff ff 89 45 ec 8b 45 ec 85 c0 78 07 b8 00 00 00 00 eb 46 e8 0b c8 ff ff <8b> 00 83 f8 69 74 24 e8 ff c7 ff ff 8b 00 83 f8 0b 74 18 e8 f3 c7
+[ 7340.404334] Oops: general protection fault, probably for non-canonical address 0x6d255010bdffc: 0000 [#1] SMP NOPTI
+[ 7340.405972] CPU: 7 UID: 0 PID: 1439 Comm: xskxceiver Not tainted 6.19.0-rc1+ #21 PREEMPT(lazy)
+[ 7340.408006] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-5.fc42 04/01/2014
+[ 7340.409716] RIP: 0010:lookup_swap_cgroup_id+0x44/0x80
+[ 7340.410455] Code: 83 f8 1c 73 39 48 ba ff ff ff ff ff ff ff 03 48 8b 04 c5 20 55 fa bd 48 21 d1 48 89 ca 83 e1 01 48 d1 ea c1 e1 04 48 8d 04 90 <8b> 00 48 83 c4 10 d3 e8 c3 cc cc cc cc 31 c0 e9 98 b7 dd 00 48 89
+[ 7340.412787] RSP: 0018:ffffcc5c04f7f6d0 EFLAGS: 00010202
+[ 7340.413494] RAX: 0006d255010bdffc RBX: ffff891f477895a8 RCX: 0000000000000010
+[ 7340.414431] RDX: 0001c17e3fffffff RSI: 00fa070000000000 RDI: 000382fc7fffffff
+[ 7340.415354] RBP: 00fa070000000000 R08: ffffcc5c04f7f8f8 R09: ffffcc5c04f7f7d0
+[ 7340.416283] R10: ffff891f4c1a7000 R11: ffffcc5c04f7f9c8 R12: ffffcc5c04f7f7d0
+[ 7340.417218] R13: 03ffffffffffffff R14: 00fa06fffffffe00 R15: ffff891f47789500
+[ 7340.418229] FS: 0000000000000000(0000) GS:ffff891ffdfaa000(0000) knlGS:0000000000000000
+[ 7340.419489] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 7340.420286] CR2: 00007f415bfffd58 CR3: 0000000103f03002 CR4: 0000000000772ef0
+[ 7340.421237] PKRU: 55555554
+[ 7340.421623] Call Trace:
+[ 7340.421987] <TASK>
+[ 7340.422309] ? softleaf_from_pte+0x77/0xa0
+[ 7340.422855] swap_pte_batch+0xa7/0x290
+[ 7340.423363] zap_nonpresent_ptes.constprop.0.isra.0+0xd1/0x270
+[ 7340.424102] zap_pte_range+0x281/0x580
+[ 7340.424607] zap_pmd_range.isra.0+0xc9/0x240
+[ 7340.425177] unmap_page_range+0x24d/0x420
+[ 7340.425714] unmap_vmas+0xa1/0x180
+[ 7340.426185] exit_mmap+0xe1/0x3b0
+[ 7340.426644] __mmput+0x41/0x150
+[ 7340.427098] exit_mm+0xb1/0x110
+[ 7340.427539] do_exit+0x1b2/0x460
+[ 7340.427992] do_group_exit+0x2d/0xc0
+[ 7340.428477] get_signal+0x79d/0x7e0
+[ 7340.428957] arch_do_signal_or_restart+0x34/0x100
+[ 7340.429571] exit_to_user_mode_loop+0x8e/0x4c0
+[ 7340.430159] do_syscall_64+0x188/0x6b0
+[ 7340.430672] ? __do_sys_clone3+0xd9/0x120
+[ 7340.431212] ? switch_fpu_return+0x4e/0xd0
+[ 7340.431761] ? arch_exit_to_user_mode_prepare.isra.0+0xa1/0xc0
+[ 7340.432498] ? do_syscall_64+0xbb/0x6b0
+[ 7340.433015] ? __handle_mm_fault+0x445/0x690
+[ 7340.433582] ? count_memcg_events+0xd6/0x210
+[ 7340.434151] ? handle_mm_fault+0x212/0x340
+[ 7340.434697] ? do_user_addr_fault+0x2b4/0x7b0
+[ 7340.435271] ? clear_bhb_loop+0x30/0x80
+[ 7340.435788] ? clear_bhb_loop+0x30/0x80
+[ 7340.436299] ? clear_bhb_loop+0x30/0x80
+[ 7340.436812] ? clear_bhb_loop+0x30/0x80
+[ 7340.437323] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 7340.437973] RIP: 0033:0x7f4161b14169
+[ 7340.438468] Code: Unable to access opcode bytes at 0x7f4161b1413f.
+[ 7340.439242] RSP: 002b:00007ffc6ebfa770 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca
+[ 7340.440173] RAX: fffffffffffffe00 RBX: 00000000000005a1 RCX: 00007f4161b14169
+[ 7340.441061] RDX: 00000000000005a1 RSI: 0000000000000109 RDI: 00007f415bfff990
+[ 7340.441943] RBP: 00007ffc6ebfa7a0 R08: 0000000000000000 R09: 00000000ffffffff
+[ 7340.442824] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+[ 7340.443707] R13: 0000000000000000 R14: 00007f415bfff990 R15: 00007f415bfff6c0
+[ 7340.444586] </TASK>
+[ 7340.444922] Modules linked in: rfkill intel_rapl_msr intel_rapl_common intel_uncore_frequency_common skx_edac_common nfit libnvdimm kvm_intel vfat fat kvm snd_pcm irqbypass rapl iTCO_wdt snd_timer intel_pmc_bxt iTCO_vendor_support snd ixgbevf virtio_net soundcore i2c_i801 pcspkr libeth_xdp net_failover i2c_smbus lpc_ich failover libeth virtio_balloon joydev 9p fuse loop zram lz4hc_compress lz4_compress 9pnet_virtio 9pnet netfs ghash_clmulni_intel serio_raw qemu_fw_cfg
+[ 7340.449650] ---[ end trace 0000000000000000 ]---
+
+The issue can be fixed in all in-tree drivers, but we cannot just trust OOT
+drivers to not do this. Therefore, make tailroom a signed int and produce a
+warning when it is negative to prevent such mistakes in the future.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-10-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 128e4b947d985..d71c24eafcb5a 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4078,13 +4078,14 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+ struct xdp_rxq_info *rxq = xdp->rxq;
+- unsigned int tailroom;
++ int tailroom;
+
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+ tailroom = rxq->frag_size - skb_frag_size(frag) -
+ skb_frag_off(frag) % rxq->frag_size;
++ WARN_ON_ONCE(tailroom < 0);
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From 547e46a6b20d4b874d3d5d781098be61eae5a14a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:42 +0100
+Subject: xdp: use modulo operation to calculate XDP frag tailroom
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 88b6b7f7b216108a09887b074395fa7b751880b1 ]
+
+The current formula for calculating XDP tailroom in mbuf packets works only
+if each frag has its own page (if rxq->frag_size is PAGE_SIZE), this
+defeats the purpose of the parameter overall and without any indication
+leads to negative calculated tailroom on at least half of frags, if shared
+pages are used.
+
+There are not many drivers that set rxq->frag_size. Among them:
+* i40e and enetc always split page uniformly between frags, use shared
+ pages
+* ice uses page_pool frags via libeth, those are power-of-2 and uniformly
+ distributed across page
+* idpf has variable frag_size with XDP on, so current API is not applicable
+* mlx5, mtk and mvneta use PAGE_SIZE or 0 as frag_size for page_pool
+
+As for AF_XDP ZC, only ice, i40e and idpf declare frag_size for it. Modulo
+operation yields good results for aligned chunks, they are all power-of-2,
+between 2K and PAGE_SIZE. Formula without modulo fails when chunk_size is
+2K. Buffers in unaligned mode are not distributed uniformly, so modulo
+operation would not work.
+
+To accommodate unaligned buffers, we could define frag_size as
+data + tailroom, and hence do not subtract offset when calculating
+tailroom, but this would necessitate more changes in the drivers.
+
+Define rxq->frag_size as an even portion of a page that fully belongs to a
+single frag. When calculating tailroom, locate the data start within such
+portion by performing a modulo operation on page offset.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-2-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index c177e40e70770..128e4b947d985 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4083,7 +4083,8 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+- tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag);
++ tailroom = rxq->frag_size - skb_frag_size(frag) -
++ skb_frag_off(frag) % rxq->frag_size;
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From fc4046fa81741b489aba6385c3fd58c9a00a0727 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 09:37:11 +0000
+Subject: xen/acpi-processor: fix _CST detection using undersized evaluation
+ buffer
+
+From: David Thomson <dt@linux-mail.net>
+
+[ Upstream commit 8b57227d59a86fc06d4f09de08f98133680f2cae ]
+
+read_acpi_id() attempts to evaluate _CST using a stack buffer of
+sizeof(union acpi_object) (48 bytes), but _CST returns a nested Package
+of sub-Packages (one per C-state, each containing a register descriptor,
+type, latency, and power) requiring hundreds of bytes. The evaluation
+always fails with AE_BUFFER_OVERFLOW.
+
+On modern systems using FFH/MWAIT entry (where pblk is zero), this
+causes the function to return before setting the acpi_id_cst_present
+bit. In check_acpi_ids(), flags.power is then zero for all Phase 2 CPUs
+(physical CPUs beyond dom0's vCPU count), so push_cxx_to_hypervisor() is
+never called for them.
+
+On a system with dom0_max_vcpus=2 and 8 physical CPUs, only PCPUs 0-1
+receive C-state data. PCPUs 2-7 are stuck in C0/C1 idle, unable to
+enter C2/C3. This costs measurable wall power (4W observed on an Intel
+Core Ultra 7 265K with Xen 4.20).
+
+The function never uses the _CST return value -- it only needs to know
+whether _CST exists. Replace the broken acpi_evaluate_object() call with
+acpi_has_method(), which correctly detects _CST presence using
+acpi_get_handle() without any buffer allocation. This brings C-state
+detection to parity with the P-state path, which already works correctly
+for Phase 2 CPUs.
+
+Fixes: 59a568029181 ("xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.")
+Signed-off-by: David Thomson <dt@linux-mail.net>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260224093707.19679-1-dt@linux-mail.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xen-acpi-processor.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 9cb61db67efde..12877f85bb79d 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -379,11 +379,8 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ acpi_psd[acpi_id].domain);
+ }
+
+- status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- if (!pblk)
+- return AE_OK;
+- }
++ if (!pblk && !acpi_has_method(handle, "_CST"))
++ return AE_OK;
+ /* .. and it has a C-state */
+ __set_bit(acpi_id, acpi_id_cst_present);
+
+--
+2.51.0
+
--- /dev/null
+From 4e43f34e431b7237328795b7c2ab923ebfa2bed7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 16:27:11 -0700
+Subject: ACPI: APEI: GHES: Disable KASAN instrumentation when compile testing
+ with clang < 18
+
+From: Nathan Chancellor <nathan@kernel.org>
+
+[ Upstream commit b584bfbd7ec417f257f651cc00a90c66e31dfbf1 ]
+
+After a recent innocuous change to drivers/acpi/apei/ghes.c, building
+ARCH=arm64 allmodconfig with clang-17 or older (which has both
+CONFIG_KASAN=y and CONFIG_WERROR=y) fails with:
+
+ drivers/acpi/apei/ghes.c:902:13: error: stack frame size (2768) exceeds limit (2048) in 'ghes_do_proc' [-Werror,-Wframe-larger-than]
+ 902 | static void ghes_do_proc(struct ghes *ghes,
+ | ^
+
+A KASAN pass that removes unneeded stack instrumentation, enabled by
+default in clang-18 [1], drastically improves stack usage in this case.
+
+To avoid the warning in the common allmodconfig case when it can break
+the build, disable KASAN for ghes.o when compile testing with clang-17
+and older. Disabling KASAN outright may hide legitimate runtime issues,
+so live with the warning in that case; the user can either increase the
+frame warning limit or disable -Werror, which they should probably do
+when debugging with KASAN anyways.
+
+Closes: https://github.com/ClangBuiltLinux/linux/issues/2148
+Link: https://github.com/llvm/llvm-project/commit/51fbab134560ece663517bf1e8c2a30300d08f1a [1]
+Signed-off-by: Nathan Chancellor <nathan@kernel.org>
+Cc: All applicable <stable@vger.kernel.org>
+Link: https://patch.msgid.link/20260114-ghes-avoid-wflt-clang-older-than-18-v1-1-9c8248bfe4f4@kernel.org
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/apei/Makefile | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/acpi/apei/Makefile b/drivers/acpi/apei/Makefile
+index 2c474e6477e12..346cdf0a0ef99 100644
+--- a/drivers/acpi/apei/Makefile
++++ b/drivers/acpi/apei/Makefile
+@@ -1,6 +1,10 @@
+ # SPDX-License-Identifier: GPL-2.0
+ obj-$(CONFIG_ACPI_APEI) += apei.o
+ obj-$(CONFIG_ACPI_APEI_GHES) += ghes.o
++# clang versions prior to 18 may blow out the stack with KASAN
++ifeq ($(CONFIG_COMPILE_TEST)_$(CONFIG_CC_IS_CLANG)_$(call clang-min-version, 180000),y_y_)
++KASAN_SANITIZE_ghes.o := n
++endif
+ obj-$(CONFIG_ACPI_APEI_EINJ) += einj.o
+ einj-y := einj-core.o
+ einj-$(CONFIG_ACPI_APEI_EINJ_CXL) += einj-cxl.o
+--
+2.51.0
+
--- /dev/null
+From 420759ecf73c4a1b094d1c9099163c9723605ef1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 22:37:53 +0530
+Subject: amd-xgbe: fix MAC_TCR_SS register width for 2.5G and 10M speeds
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 9439a661c2e80485406ce2c90b107ca17858382d ]
+
+Extend the MAC_TCR_SS (Speed Select) register field width from 2 bits
+to 3 bits to properly support all speed settings.
+
+The MAC_TCR register's SS field encoding requires 3 bits to represent
+all supported speeds:
+ - 0x00: 10Gbps (XGMII)
+ - 0x02: 2.5Gbps (GMII) / 100Mbps
+ - 0x03: 1Gbps / 10Mbps
+ - 0x06: 2.5Gbps (XGMII) - P100a only
+
+With only 2 bits, values 0x04-0x07 cannot be represented, which breaks
+2.5G XGMII mode on newer platforms and causes incorrect speed select
+values to be programmed.
+
+Fixes: 07445f3c7ca1 ("amd-xgbe: Add support for 10 Mbps speed")
+Co-developed-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260226170753.250312-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index aa25a8a0a106f..d99d2295eab0f 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -514,7 +514,7 @@
+ #define MAC_SSIR_SSINC_INDEX 16
+ #define MAC_SSIR_SSINC_WIDTH 8
+ #define MAC_TCR_SS_INDEX 29
+-#define MAC_TCR_SS_WIDTH 2
++#define MAC_TCR_SS_WIDTH 3
+ #define MAC_TCR_TE_INDEX 0
+ #define MAC_TCR_TE_WIDTH 1
+ #define MAC_TCR_VNE_INDEX 24
+--
+2.51.0
+
--- /dev/null
+From b57be456d2302bb20edd6dbbb1128485f344dfdf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 09:51:24 +0530
+Subject: amd-xgbe: fix sleep while atomic on suspend/resume
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit e2f27363aa6d983504c6836dd0975535e2e9dba0 ]
+
+The xgbe_powerdown() and xgbe_powerup() functions use spinlocks
+(spin_lock_irqsave) while calling functions that may sleep:
+- napi_disable() can sleep waiting for NAPI polling to complete
+- flush_workqueue() can sleep waiting for pending work items
+
+This causes a "BUG: scheduling while atomic" error during suspend/resume
+cycles on systems using the AMD XGBE Ethernet controller.
+
+The spinlock protection in these functions is unnecessary as these
+functions are called from suspend/resume paths which are already serialized
+by the PM core
+
+Fix this by removing the spinlock. Since only code that takes this lock
+is xgbe_powerdown() and xgbe_powerup(), remove it completely.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260302042124.1386445-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ----------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 -
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ---
+ 3 files changed, 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index e6a2492360227..c6fcddbff3f56 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1181,7 +1181,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+@@ -1192,8 +1191,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+@@ -1209,8 +1206,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+
+ pdata->power_down = 1;
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+@@ -1220,7 +1215,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+@@ -1231,8 +1225,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+@@ -1247,8 +1239,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ xgbe_start_timers(pdata);
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 0e8698928e4d7..6e8fafb2acbaa 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -185,7 +185,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+- spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 7526a0906b391..c98461252053f 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1083,9 +1083,6 @@ struct xgbe_prv_data {
+ unsigned int pp3;
+ unsigned int pp4;
+
+- /* Overall device lock */
+- spinlock_t lock;
+-
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+--
+2.51.0
+
--- /dev/null
+From 19bbee3c8d70a97dbcd64773ee78bbcfd839be95 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:32:40 +0800
+Subject: atm: lec: fix null-ptr-deref in lec_arp_clear_vccs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 101bacb303e89dc2e0640ae6a5e0fb97c4eb45bb ]
+
+syzkaller reported a null-ptr-deref in lec_arp_clear_vccs().
+This issue can be easily reproduced using the syzkaller reproducer.
+
+In the ATM LANE (LAN Emulation) module, the same atm_vcc can be shared by
+multiple lec_arp_table entries (e.g., via entry->vcc or entry->recv_vcc).
+When the underlying VCC is closed, lec_vcc_close() iterates over all
+ARP entries and calls lec_arp_clear_vccs() for each matched entry.
+
+For example, when lec_vcc_close() iterates through the hlists in
+priv->lec_arp_empty_ones or other ARP tables:
+
+1. In the first iteration, for the first matched ARP entry sharing the VCC,
+lec_arp_clear_vccs() frees the associated vpriv (which is vcc->user_back)
+and sets vcc->user_back to NULL.
+2. In the second iteration, for the next matched ARP entry sharing the same
+VCC, lec_arp_clear_vccs() is called again. It obtains a NULL vpriv from
+vcc->user_back (via LEC_VCC_PRIV(vcc)) and then attempts to dereference it
+via `vcc->pop = vpriv->old_pop`, leading to a null-ptr-deref crash.
+
+Fix this by adding a null check for vpriv before dereferencing
+it. If vpriv is already NULL, it means the VCC has been cleared
+by a previous call, so we can safely skip the cleanup and just
+clear the entry's vcc/recv_vcc pointers.
+
+The entire cleanup block (including vcc_release_async()) is placed inside
+the vpriv guard because a NULL vpriv indicates the VCC has already been
+fully released by a prior iteration — repeating the teardown would
+redundantly set flags and trigger callbacks on an already-closing socket.
+
+The Fixes tag points to the initial commit because the entry->vcc path has
+been vulnerable since the original code. The entry->recv_vcc path was later
+added by commit 8d9f73c0ad2f ("atm: fix a memory leak of vcc->user_back")
+with the same pattern, and both paths are fixed here.
+
+Reported-by: syzbot+72e3ea390c305de0e259@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68c95a83.050a0220.3c6139.0e5c.GAE@google.com/T/
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260225123250.189289-1-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index 42e8047c65105..4a8ca2d7ff595 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1260,24 +1260,28 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+ struct net_device *dev = (struct net_device *)vcc->proto_data;
+
+- vcc->pop = vpriv->old_pop;
+- if (vpriv->xoff)
+- netif_wake_queue(dev);
+- kfree(vpriv);
+- vcc->user_back = NULL;
+- vcc->push = entry->old_push;
+- vcc_release_async(vcc, -EPIPE);
++ if (vpriv) {
++ vcc->pop = vpriv->old_pop;
++ if (vpriv->xoff)
++ netif_wake_queue(dev);
++ kfree(vpriv);
++ vcc->user_back = NULL;
++ vcc->push = entry->old_push;
++ vcc_release_async(vcc, -EPIPE);
++ }
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+- kfree(vpriv);
+- vcc->user_back = NULL;
++ if (vpriv) {
++ kfree(vpriv);
++ vcc->user_back = NULL;
+
+- entry->recv_vcc->push = entry->old_recv_push;
+- vcc_release_async(entry->recv_vcc, -EPIPE);
++ entry->recv_vcc->push = entry->old_recv_push;
++ vcc_release_async(entry->recv_vcc, -EPIPE);
++ }
+ entry->recv_vcc = NULL;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 46b12366e69520ad32abf3a0e2eaab72782f80df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:03:01 +0800
+Subject: bpf/bonding: reject vlan+srcmac xmit_hash_policy change when XDP is
+ loaded
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 479d589b40b836442bbdadc3fdb37f001bb67f26 ]
+
+bond_option_mode_set() already rejects mode changes that would make a
+loaded XDP program incompatible via bond_xdp_check(). However,
+bond_option_xmit_hash_policy_set() has no such guard.
+
+For 802.3ad and balance-xor modes, bond_xdp_check() returns false when
+xmit_hash_policy is vlan+srcmac, because the 802.1q payload is usually
+absent due to hardware offload. This means a user can:
+
+1. Attach a native XDP program to a bond in 802.3ad/balance-xor mode
+ with a compatible xmit_hash_policy (e.g. layer2+3).
+2. Change xmit_hash_policy to vlan+srcmac while XDP remains loaded.
+
+This leaves bond->xdp_prog set but bond_xdp_check() now returning false
+for the same device. When the bond is later destroyed, dev_xdp_uninstall()
+calls bond_xdp_set(dev, NULL, NULL) to remove the program, which hits
+the bond_xdp_check() guard and returns -EOPNOTSUPP, triggering:
+
+WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL))
+
+Fix this by rejecting xmit_hash_policy changes to vlan+srcmac when an
+XDP program is loaded on a bond in 802.3ad or balance-xor mode.
+
+commit 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+introduced bond_xdp_check() which returns false for 802.3ad/balance-xor
+modes when xmit_hash_policy is vlan+srcmac. The check was wired into
+bond_xdp_set() to reject XDP attachment with an incompatible policy, but
+the symmetric path -- preventing xmit_hash_policy from being changed to an
+incompatible value after XDP is already loaded -- was left unguarded in
+bond_option_xmit_hash_policy_set().
+
+Note:
+commit 094ee6017ea0 ("bonding: check xdp prog when set bond mode")
+later added a similar guard to bond_option_mode_set(), but
+bond_option_xmit_hash_policy_set() remained unprotected.
+
+Reported-by: syzbot+5a287bcdc08104bc3132@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/6995aff6.050a0220.2eeac1.014e.GAE@google.com/T/
+Fixes: 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260226080306.98766-2-jiayuan.chen@linux.dev
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 9 +++++++--
+ drivers/net/bonding/bond_options.c | 2 ++
+ include/net/bonding.h | 1 +
+ 3 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index dd1f8cad953bf..2ac455a9d1bb1 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -322,7 +322,7 @@ static bool bond_sk_check(struct bonding *bond)
+ }
+ }
+
+-bool bond_xdp_check(struct bonding *bond, int mode)
++bool __bond_xdp_check(int mode, int xmit_policy)
+ {
+ switch (mode) {
+ case BOND_MODE_ROUNDROBIN:
+@@ -333,7 +333,7 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
+ * payload is not in the packet due to hardware offload.
+ */
+- if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
++ if (xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return true;
+ fallthrough;
+ default:
+@@ -341,6 +341,11 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ }
+ }
+
++bool bond_xdp_check(struct bonding *bond, int mode)
++{
++ return __bond_xdp_check(mode, bond->params.xmit_policy);
++}
++
+ /*---------------------------------- VLAN -----------------------------------*/
+
+ /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index a37b47b8ea8ed..33af81a55a45f 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1546,6 +1546,8 @@ static int bond_option_fail_over_mac_set(struct bonding *bond,
+ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+ {
++ if (bond->xdp_prog && !__bond_xdp_check(BOND_MODE(bond), newval->value))
++ return -EOPNOTSUPP;
+ netdev_dbg(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.xmit_policy = newval->value;
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 9fb40a5920209..66940d41d4854 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -696,6 +696,7 @@ void bond_debug_register(struct bonding *bond);
+ void bond_debug_unregister(struct bonding *bond);
+ void bond_debug_reregister(struct bonding *bond);
+ const char *bond_mode_name(int mode);
++bool __bond_xdp_check(int mode, int xmit_policy);
+ bool bond_xdp_check(struct bonding *bond, int mode);
+ void bond_setup(struct net_device *bond_dev);
+ unsigned int bond_get_num_tx_queues(void);
+--
+2.51.0
+
--- /dev/null
+From b7ddf6f92a243ea215ba59bc596d3117c03b42f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 17:52:17 +0800
+Subject: bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim
+
+From: Lang Xu <xulang@uniontech.com>
+
+[ Upstream commit 56145d237385ca0e7ca9ff7b226aaf2eb8ef368b ]
+
+The root cause of this bug is that when 'bpf_link_put' reduces the
+refcount of 'shim_link->link.link' to zero, the resource is considered
+released but may still be referenced via 'tr->progs_hlist' in
+'cgroup_shim_find'. The actual cleanup of 'tr->progs_hlist' in
+'bpf_shim_tramp_link_release' is deferred. During this window, another
+process can cause a use-after-free via 'bpf_trampoline_link_cgroup_shim'.
+
+Based on Martin KaFai Lau's suggestions, I have created a simple patch.
+
+To fix this:
+ Add an atomic non-zero check in 'bpf_trampoline_link_cgroup_shim'.
+ Only increment the refcount if it is not already zero.
+
+Testing:
+ I verified the fix by adding a delay in
+ 'bpf_shim_tramp_link_release' to make the bug easier to trigger:
+
+static void bpf_shim_tramp_link_release(struct bpf_link *link)
+{
+ /* ... */
+ if (!shim_link->trampoline)
+ return;
+
++ msleep(100);
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link,
+ shim_link->trampoline, NULL));
+ bpf_trampoline_put(shim_link->trampoline);
+}
+
+Before the patch, running a PoC easily reproduced the crash(almost 100%)
+with a call trace similar to KaiyanM's report.
+After the patch, the bug no longer occurs even after millions of
+iterations.
+
+Fixes: 69fd337a975c ("bpf: per-cgroup lsm flavor")
+Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
+Closes: https://lore.kernel.org/bpf/3c4ebb0b.46ff8.19abab8abe2.Coremail.kaiyanm@hust.edu.cn/
+Signed-off-by: Lang Xu <xulang@uniontech.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/279EEE1BA1DDB49D+20260303095217.34436-1-xulang@uniontech.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/trampoline.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index dbe7754b4f4e1..894cd6f205f5f 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -749,10 +749,8 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ mutex_lock(&tr->mutex);
+
+ shim_link = cgroup_shim_find(tr, bpf_func);
+- if (shim_link) {
++ if (shim_link && !IS_ERR(bpf_link_inc_not_zero(&shim_link->link.link))) {
+ /* Reusing existing shim attached by the other program. */
+- bpf_link_inc(&shim_link->link.link);
+-
+ mutex_unlock(&tr->mutex);
+ bpf_trampoline_put(tr); /* bpf_trampoline_get above */
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 8b4e95896d8adaf34bd79eee811db0aa6c30adb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 11:58:06 +0100
+Subject: can: bcm: fix locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c35636e91e392e1540949bbc67932167cb48bc3a ]
+
+Commit c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+added a locking for some variables that can be modified at runtime when
+updating the sending bcm_op with a new TX_SETUP command in bcm_tx_setup().
+
+Usually the RX_SETUP only handles and filters incoming traffic with one
+exception: When the RX_RTR_FRAME flag is set a predefined CAN frame is
+sent when a specific RTR frame is received. Therefore the rx bcm_op uses
+bcm_can_tx() which uses the bcm_tx_lock that was only initialized in
+bcm_tx_setup(). Add the missing spin_lock_init() when allocating the
+bcm_op in bcm_rx_setup() to handle the RTR case properly.
+
+Fixes: c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+Reported-by: syzbot+5b11eccc403dd1cea9f8@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-can/699466e4.a70a0220.2c38d7.00ff.GAE@google.com/
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20260218-bcm_spin_lock_init-v1-1-592634c8a5b5@hartkopp.net
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index e33ff2a5b20cc..152cc29e87d7a 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1170,6 +1170,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->nframes = msg_head->nframes;
+ op->cfsiz = CFSIZ(msg_head->flags);
+--
+2.51.0
+
--- /dev/null
+From 013de98a75ac18e78a3d07b28ec1e5ccb4fa4604 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:47:05 +0100
+Subject: can: mcp251x: fix deadlock in error path of mcp251x_open
+
+From: Alban Bedel <alban.bedel@lht.dlh.de>
+
+[ Upstream commit ab3f894de216f4a62adc3b57e9191888cbf26885 ]
+
+The mcp251x_open() function call free_irq() in its error path with the
+mpc_lock mutex held. But if an interrupt already occurred the
+interrupt handler will be waiting for the mpc_lock and free_irq() will
+deadlock waiting for the handler to finish.
+
+This issue is similar to the one fixed in commit 7dd9c26bd6cf ("can:
+mcp251x: fix deadlock if an interrupt occurs during mcp251x_open") but
+for the error path.
+
+To solve this issue move the call to free_irq() after the lock is
+released. Setting `priv->force_quit = 1` beforehand ensure that the IRQ
+handler will exit right away once it acquired the lock.
+
+Signed-off-by: Alban Bedel <alban.bedel@lht.dlh.de>
+Link: https://patch.msgid.link/20260209144706.2261954-1-alban.bedel@lht.dlh.de
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index ec5c64006a16f..74906aa98be3e 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1201,6 +1201,7 @@ static int mcp251x_open(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
++ bool release_irq = false;
+ unsigned long flags = 0;
+ int ret;
+
+@@ -1244,12 +1245,24 @@ static int mcp251x_open(struct net_device *net)
+ return 0;
+
+ out_free_irq:
+- free_irq(spi->irq, priv);
++ /* The IRQ handler might be running, and if so it will be waiting
++ * for the lock. But free_irq() must wait for the handler to finish
++ * so calling it here would deadlock.
++ *
++ * Setting priv->force_quit will let the handler exit right away
++ * without any access to the hardware. This make it safe to call
++ * free_irq() after the lock is released.
++ */
++ priv->force_quit = 1;
++ release_irq = true;
++
+ mcp251x_hw_sleep(spi);
+ out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
++ if (release_irq)
++ free_irq(spi->irq, priv);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f1effd6fbb73c8594dbef85fd6b41b65dd4c0ec8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:58:12 -0800
+Subject: dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ
+ handler
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 74badb9c20b1a9c02a95c735c6d3cd6121679c93 ]
+
+Commit 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ
+handler") introduces a range check for if_id to avoid an out-of-bounds
+access. If an out-of-bounds if_id is detected, the interrupt status is
+not cleared. This may result in an interrupt storm.
+
+Clear the interrupt status after detecting an out-of-bounds if_id to avoid
+the problem.
+
+Found by an experimental AI code review agent at Google.
+
+Fixes: 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ handler")
+Cc: Junrui Luo <moonafterrain@outlook.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20260227055812.1777915-1-linux@roeck-us.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index e78f400784770..a7c8ec0bdfe53 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1532,7 +1532,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ if_id = (status & 0xFFFF0000) >> 16;
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id);
+- goto out;
++ goto out_clear;
+ }
+ port_priv = ethsw->ports[if_id];
+
+@@ -1552,6 +1552,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ dpaa2_switch_port_connect_mac(port_priv);
+ }
+
++out_clear:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+--
+2.51.0
+
--- /dev/null
+From 1677e08e090807354d7160ed432fa4a4b6bd0605 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 16:24:52 +0800
+Subject: drm/sched: Fix kernel-doc warning for drm_sched_job_done()
+
+From: Yujie Liu <yujie.liu@intel.com>
+
+[ Upstream commit 61ded1083b264ff67ca8c2de822c66b6febaf9a8 ]
+
+There is a kernel-doc warning for the scheduler:
+
+Warning: drivers/gpu/drm/scheduler/sched_main.c:367 function parameter 'result' not described in 'drm_sched_job_done'
+
+Fix the warning by describing the undocumented error code.
+
+Fixes: 539f9ee4b52a ("drm/scheduler: properly forward fence errors")
+Signed-off-by: Yujie Liu <yujie.liu@intel.com>
+[phasta: Flesh out commit message]
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Link: https://patch.msgid.link/20260227082452.1802922-1-yujie.liu@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/scheduler/sched_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 4dde0dc525ce5..4f43c0fa4019f 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -404,6 +404,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+ /**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
++ * @result: 0 on success, -ERRNO on error
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+--
+2.51.0
+
--- /dev/null
+From 2699e837a11b18f0b26b67563b96921dece1dda5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:09:32 +0100
+Subject: drm/solomon: Fix page start when updating rectangle in page
+ addressing mode
+
+From: Francesco Lavra <flavra@baylibre.com>
+
+[ Upstream commit 36d9579fed6c9429aa172f77bd28c58696ce8e2b ]
+
+In page addressing mode, the pixel values of a dirty rectangle must be sent
+to the display controller one page at a time. The range of pages
+corresponding to a given rectangle is being incorrectly calculated as if
+the Y value of the top left coordinate of the rectangle was 0. This can
+result in rectangle updates being displayed on wrong parts of the screen.
+
+Fix the above issue by consolidating the start page calculation in a single
+place at the beginning of the update_rect function, and using the
+calculated value for all addressing modes.
+
+Fixes: b0daaa5cfaa5 ("drm/ssd130x: Support page addressing mode")
+Signed-off-by: Francesco Lavra <flavra@baylibre.com>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Link: https://patch.msgid.link/20260210180932.736502-1-flavra@baylibre.com
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/solomon/ssd130x.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index e0fc12d514d76..cd8347396082a 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -736,6 +736,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ unsigned int height = drm_rect_height(rect);
+ unsigned int line_length = DIV_ROUND_UP(width, 8);
+ unsigned int page_height = SSD130X_PAGE_HEIGHT;
++ u8 page_start = ssd130x->page_offset + y / page_height;
+ unsigned int pages = DIV_ROUND_UP(height, page_height);
+ struct drm_device *drm = &ssd130x->drm;
+ u32 array_idx = 0;
+@@ -773,14 +774,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+
+ if (!ssd130x->page_address_mode) {
+- u8 page_start;
+-
+ /* Set address range for horizontal addressing mode */
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ return ret;
+
+- page_start = ssd130x->page_offset + y / page_height;
+ ret = ssd130x_set_page_range(ssd130x, page_start, pages);
+ if (ret < 0)
+ return ret;
+@@ -812,7 +810,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+ if (ssd130x->page_address_mode) {
+ ret = ssd130x_set_page_pos(ssd130x,
+- ssd130x->page_offset + i,
++ page_start + i,
+ ssd130x->col_offset + x);
+ if (ret < 0)
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From eb0175baac512ad551d85c2e5131ba56495a7350 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 16:45:46 -0800
+Subject: drm/xe: Do not preempt fence signaling CS instructions
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit cdc8a1e11f4d5b480ec750e28010c357185b95a6 ]
+
+If a batch buffer is complete, it makes little sense to preempt the
+fence signaling instructions in the ring, as the largest portion of the
+work (the batch buffer) is already done and fence signaling consists of
+only a few instructions. If these instructions are preempted, the GuC
+would need to perform a context switch just to signal the fence, which
+is costly and delays fence signaling. Avoid this scenario by disabling
+preemption immediately after the BB start instruction and re-enabling it
+after executing the fence signaling instructions.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: Carlos Santa <carlos.santa@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Link: https://patch.msgid.link/20260115004546.58060-1-matthew.brost@intel.com
+(cherry picked from commit 2bcbf2dcde0c839a73af664a3c77d4e77d58a3eb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_ring_ops.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
+index fb31e09acb519..c9e8969f99fc7 100644
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -259,6 +259,9 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+@@ -322,6 +325,9 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+@@ -371,6 +377,9 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ i = emit_render_cache_flush(job, dw, i);
+
+ if (job->user_fence.used)
+--
+2.51.0
+
--- /dev/null
+From b061c988e1817906d6b7984e1edc1eef567caf20 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 17:28:11 +0000
+Subject: drm/xe/reg_sr: Fix leak on xa_store failure
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit 3091723785def05ebfe6a50866f87a044ae314ba ]
+
+Free the newly allocated entry when xa_store() fails to avoid a memory
+leak on the error path.
+
+v2: use goto fail_free. (Bala)
+
+Fixes: e5283bd4dfec ("drm/xe/reg_sr: Remove register pool")
+Cc: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patch.msgid.link/20260204172810.1486719-2-shuicheng.lin@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 6bc6fec71ac45f52db609af4e62bdb96b9f5fadb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_reg_sr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
+index d3773a9853872..ae9e6df2f4e12 100644
+--- a/drivers/gpu/drm/xe/xe_reg_sr.c
++++ b/drivers/gpu/drm/xe/xe_reg_sr.c
+@@ -102,10 +102,12 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
+ *pentry = *e;
+ ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL));
+ if (ret)
+- goto fail;
++ goto fail_free;
+
+ return 0;
+
++fail_free:
++ kfree(pentry);
+ fail:
+ xe_gt_err(gt,
+ "discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s, mcr: %s): ret=%d\n",
+--
+2.51.0
+
--- /dev/null
+From 2cbefba2071b0eedefdb4bf1792069bacfe373f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 16:14:20 +0200
+Subject: e1000e: clear DPG_EN after reset to avoid autonomous power-gating
+
+From: Vitaly Lifshits <vitaly.lifshits@intel.com>
+
+[ Upstream commit 0942fc6d324eb9c6b16187b2aa994c0823557f06 ]
+
+Panther Lake systems introduced an autonomous power gating feature for
+the integrated Gigabit Ethernet in shutdown state (S5) state. As part of
+it, the reset value of DPG_EN bit was changed to 1. Clear this bit after
+performing hardware reset to avoid errors such as Tx/Rx hangs, or packet
+loss/corruption.
+
+Fixes: 0c9183ce61bc ("e1000e: Add support for the next LOM generation")
+Signed-off-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Avigail Dahan <avigailx.dahan@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/e1000e/defines.h | 1 +
+ drivers/net/ethernet/intel/e1000e/ich8lan.c | 9 +++++++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
+index ba331899d1861..d4a1041e456dc 100644
+--- a/drivers/net/ethernet/intel/e1000e/defines.h
++++ b/drivers/net/ethernet/intel/e1000e/defines.h
+@@ -33,6 +33,7 @@
+
+ /* Extended Device Control */
+ #define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
++#define E1000_CTRL_EXT_DPG_EN 0x00000008 /* Dynamic Power Gating Enable */
+ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+ #define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index df4e7d781cb1c..f9328caefe44b 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -4925,6 +4925,15 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+ reg |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, reg);
+
++ /* The hardware reset value of the DPG_EN bit is 1.
++ * Clear DPG_EN to prevent unexpected autonomous power gating.
++ */
++ if (hw->mac.type >= e1000_pch_ptp) {
++ reg = er32(CTRL_EXT);
++ reg &= ~E1000_CTRL_EXT_DPG_EN;
++ ew32(CTRL_EXT, reg);
++ }
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 81d3e2ea802894683efb295c26513b0414a35cef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 May 2025 22:13:17 +0530
+Subject: HID: multitouch: add device ID for Apple Touch Bar
+
+From: Kerem Karabay <kekrby@gmail.com>
+
+[ Upstream commit 2c31ec923c323229566d799267000f8123af4449 ]
+
+This patch adds the device ID of Apple Touch Bar found on x86 MacBook Pros
+to the hid-multitouch driver.
+
+Note that this is device ID is for T2 Macs. Testing on T1 Macs would be
+appreciated.
+
+Signed-off-by: Kerem Karabay <kekrby@gmail.com>
+Co-developed-by: Aditya Garg <gargaditya08@live.com>
+Signed-off-by: Aditya Garg <gargaditya08@live.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Stable-dep-of: a2e70a89fa58 ("HID: multitouch: new class MT_CLS_EGALAX_P80H84")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/Kconfig | 1 +
+ drivers/hid/hid-multitouch.c | 17 +++++++++++++++++
+ 2 files changed, 18 insertions(+)
+
+diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
+index f283f271d87e7..586de50a26267 100644
+--- a/drivers/hid/Kconfig
++++ b/drivers/hid/Kconfig
+@@ -730,6 +730,7 @@ config HID_MULTITOUCH
+ Say Y here if you have one of the following devices:
+ - 3M PCT touch screens
+ - ActionStar dual touch panels
++ - Apple Touch Bar on x86 MacBook Pros
+ - Atmel panels
+ - Cando dual touch panels
+ - Chunghwa panels
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index b7c2640a61b4a..5aed9e320d306 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -216,6 +216,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_GOOGLE 0x0111
+ #define MT_CLS_RAZER_BLADE_STEALTH 0x0112
+ #define MT_CLS_SMART_TECH 0x0113
++#define MT_CLS_APPLE_TOUCHBAR 0x0114
+ #define MT_CLS_SIS 0x0457
+
+ #define MT_DEFAULT_MAXCONTACT 10
+@@ -402,6 +403,12 @@ static const struct mt_class mt_classes[] = {
+ MT_QUIRK_CONTACT_CNT_ACCURATE |
+ MT_QUIRK_SEPARATE_APP_REPORT,
+ },
++ { .name = MT_CLS_APPLE_TOUCHBAR,
++ .quirks = MT_QUIRK_HOVERING |
++ MT_QUIRK_SLOT_IS_CONTACTID_MINUS_ONE |
++ MT_QUIRK_APPLE_TOUCHBAR,
++ .maxcontacts = 11,
++ },
+ { .name = MT_CLS_SIS,
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+ MT_QUIRK_ALWAYS_VALID |
+@@ -1842,6 +1849,11 @@ static int mt_probe(struct hid_device *hdev, const struct hid_device_id *id)
+ if (ret != 0)
+ return ret;
+
++ if (mtclass->name == MT_CLS_APPLE_TOUCHBAR &&
++ !hid_find_field(hdev, HID_INPUT_REPORT,
++ HID_DG_TOUCHPAD, HID_DG_TRANSDUCER_INDEX))
++ return -ENODEV;
++
+ if (mtclass->quirks & MT_QUIRK_FIX_CONST_CONTACT_ID)
+ mt_fix_const_fields(hdev, HID_DG_CONTACTID);
+
+@@ -2332,6 +2344,11 @@ static const struct hid_device_id mt_devices[] = {
+ MT_USB_DEVICE(USB_VENDOR_ID_XIROKU,
+ USB_DEVICE_ID_XIROKU_CSR2) },
+
++ /* Apple Touch Bar */
++ { .driver_data = MT_CLS_APPLE_TOUCHBAR,
++ HID_USB_DEVICE(USB_VENDOR_ID_APPLE,
++ USB_DEVICE_ID_APPLE_TOUCHBAR_DISPLAY) },
++
+ /* Google MT devices */
+ { .driver_data = MT_CLS_GOOGLE,
+ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_GOOGLE,
+--
+2.51.0
+
--- /dev/null
+From 90640aedcbea92b2ab3b656babae928cb0776bcf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 2 Dec 2025 21:35:47 -0500
+Subject: HID: multitouch: add quirks for Lenovo Yoga Book 9i
+
+From: Brian Howard <blhoward2@gmail.com>
+
+[ Upstream commit 822bc5b3744b0b2c2c9678aa1d80b2cf04fdfabf ]
+
+The Lenovo Yoga Book 9i is a dual-screen laptop, with a single composite
+USB device providing both touch and tablet interfaces for both screens.
+All inputs report through a single device, differentiated solely by report
+numbers. As there is no way for udev to differentiate the inputs based on
+USB vendor/product ID or interface numbers, custom naming is required to
+match against for downstream configuration. A firmware bug also results
+in an erroneous InRange message report being received after the stylus
+leaves proximity, blocking later touch events. Add required quirks for
+Gen 8 to Gen 10 models, including a new quirk providing for custom input
+device naming and dropping erroneous InRange reports.
+
+Signed-off-by: Brian Howard <blhoward2@gmail.com>
+Tested-by: Brian Howard <blhoward2@gmail.com>
+Tested-by: Kris Fredrick <linux.baguette800@slmail.me>
+Reported-by: Andrei Shumailov <gentoo1993@gmail.com>
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220386
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Stable-dep-of: a2e70a89fa58 ("HID: multitouch: new class MT_CLS_EGALAX_P80H84")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-ids.h | 1 +
+ drivers/hid/hid-multitouch.c | 72 ++++++++++++++++++++++++++++++++++++
+ 2 files changed, 73 insertions(+)
+
+diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
+index dfa39a37405e3..0a65490dfcb43 100644
+--- a/drivers/hid/hid-ids.h
++++ b/drivers/hid/hid-ids.h
+@@ -832,6 +832,7 @@
+ #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5
+ #define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe
+ #define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae
++#define USB_DEVICE_ID_LENOVO_YOGABOOK9I 0x6161
+ #define USB_DEVICE_ID_LENOVO_OPTICAL_USB_MOUSE_600E 0x600e
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_608D 0x608d
+ #define USB_DEVICE_ID_LENOVO_PIXART_USB_MOUSE_6019 0x6019
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 5aed9e320d306..15a3d1de1becd 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -73,6 +73,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_DISABLE_WAKEUP BIT(21)
+ #define MT_QUIRK_ORIENTATION_INVERT BIT(22)
+ #define MT_QUIRK_APPLE_TOUCHBAR BIT(23)
++#define MT_QUIRK_YOGABOOK9I BIT(24)
+
+ #define MT_INPUTMODE_TOUCHSCREEN 0x02
+ #define MT_INPUTMODE_TOUCHPAD 0x03
+@@ -217,6 +218,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_RAZER_BLADE_STEALTH 0x0112
+ #define MT_CLS_SMART_TECH 0x0113
+ #define MT_CLS_APPLE_TOUCHBAR 0x0114
++#define MT_CLS_YOGABOOK9I 0x0115
+ #define MT_CLS_SIS 0x0457
+
+ #define MT_DEFAULT_MAXCONTACT 10
+@@ -413,6 +415,14 @@ static const struct mt_class mt_classes[] = {
+ .quirks = MT_QUIRK_NOT_SEEN_MEANS_UP |
+ MT_QUIRK_ALWAYS_VALID |
+ MT_QUIRK_CONTACT_CNT_ACCURATE,
++ },
++ { .name = MT_CLS_YOGABOOK9I,
++ .quirks = MT_QUIRK_ALWAYS_VALID |
++ MT_QUIRK_FORCE_MULTI_INPUT |
++ MT_QUIRK_SEPARATE_APP_REPORT |
++ MT_QUIRK_HOVERING |
++ MT_QUIRK_YOGABOOK9I,
++ .export_all_inputs = true
+ },
+ { }
+ };
+@@ -1514,6 +1524,38 @@ static void mt_report(struct hid_device *hid, struct hid_report *report)
+ if (rdata && rdata->is_mt_collection)
+ return mt_touch_report(hid, rdata);
+
++ /* Lenovo Yoga Book 9i requires consuming and dropping certain bogus reports */
++ if (rdata && rdata->application &&
++ (rdata->application->quirks & MT_QUIRK_YOGABOOK9I)) {
++
++ bool all_zero_report = true;
++
++ for (int f = 0; f < report->maxfield && all_zero_report; f++) {
++ struct hid_field *fld = report->field[f];
++
++ for (int i = 0; i < fld->report_count; i++) {
++ unsigned int usage = fld->usage[i].hid;
++
++ if (usage == HID_DG_INRANGE ||
++ usage == HID_DG_TIPSWITCH ||
++ usage == HID_DG_BARRELSWITCH ||
++ usage == HID_DG_BARRELSWITCH2 ||
++ usage == HID_DG_CONTACTID ||
++ usage == HID_DG_TILT_X ||
++ usage == HID_DG_TILT_Y) {
++
++ if (fld->value[i] != 0) {
++ all_zero_report = false;
++ break;
++ }
++ }
++ }
++ }
++
++ if (all_zero_report)
++ return;
++ }
++
+ if (field && field->hidinput && field->hidinput->input)
+ input_sync(field->hidinput->input);
+ }
+@@ -1704,6 +1746,30 @@ static int mt_input_configured(struct hid_device *hdev, struct hid_input *hi)
+ break;
+ }
+
++ /* Lenovo Yoga Book 9i requires custom naming to allow differentiation in udev */
++ if (hi->report && td->mtclass.quirks & MT_QUIRK_YOGABOOK9I) {
++ switch (hi->report->id) {
++ case 48:
++ suffix = "Touchscreen Top";
++ break;
++ case 56:
++ suffix = "Touchscreen Bottom";
++ break;
++ case 20:
++ suffix = "Stylus Top";
++ break;
++ case 40:
++ suffix = "Stylus Bottom";
++ break;
++ case 80:
++ suffix = "Emulated Touchpad";
++ break;
++ default:
++ suffix = "";
++ break;
++ }
++ }
++
+ if (suffix) {
+ hi->input->name = devm_kasprintf(&hdev->dev, GFP_KERNEL,
+ "%s %s", hdev->name, suffix);
+@@ -2186,6 +2252,12 @@ static const struct hid_device_id mt_devices[] = {
+ USB_VENDOR_ID_LENOVO,
+ USB_DEVICE_ID_LENOVO_X12_TAB2) },
+
++ /* Lenovo Yoga Book 9i */
++ { .driver_data = MT_CLS_YOGABOOK9I,
++ HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_LENOVO,
++ USB_DEVICE_ID_LENOVO_YOGABOOK9I) },
++
+ /* Logitech devices */
+ { .driver_data = MT_CLS_NSMU,
+ HID_DEVICE(BUS_BLUETOOTH, HID_GROUP_MULTITOUCH_WIN_8,
+--
+2.51.0
+
--- /dev/null
+From 43affba0421fe1a9cc36b256cbec6d6001a276c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 May 2025 22:13:13 +0530
+Subject: HID: multitouch: Get the contact ID from HID_DG_TRANSDUCER_INDEX
+ fields in case of Apple Touch Bar
+
+From: Kerem Karabay <kekrby@gmail.com>
+
+[ Upstream commit f41d736acc039d86512951f4e874b0f5e666babf ]
+
+In Apple Touch Bar, the contact ID is contained in fields with the
+HID_DG_TRANSDUCER_INDEX usage rather than HID_DG_CONTACTID, thus differing
+from the HID spec. Add a quirk for the same.
+
+Acked-by: Benjamin Tissoires <bentiss@kernel.org>
+Signed-off-by: Kerem Karabay <kekrby@gmail.com>
+Co-developed-by: Aditya Garg <gargaditya08@live.com>
+Signed-off-by: Aditya Garg <gargaditya08@live.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Stable-dep-of: a2e70a89fa58 ("HID: multitouch: new class MT_CLS_EGALAX_P80H84")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-multitouch.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index c3a914458358c..b7c2640a61b4a 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -72,6 +72,7 @@ MODULE_LICENSE("GPL");
+ #define MT_QUIRK_FORCE_MULTI_INPUT BIT(20)
+ #define MT_QUIRK_DISABLE_WAKEUP BIT(21)
+ #define MT_QUIRK_ORIENTATION_INVERT BIT(22)
++#define MT_QUIRK_APPLE_TOUCHBAR BIT(23)
+
+ #define MT_INPUTMODE_TOUCHSCREEN 0x02
+ #define MT_INPUTMODE_TOUCHPAD 0x03
+@@ -621,6 +622,7 @@ static struct mt_application *mt_find_application(struct mt_device *td,
+ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
+ struct hid_report *report)
+ {
++ struct mt_class *cls = &td->mtclass;
+ struct mt_report_data *rdata;
+ struct hid_field *field;
+ int r, n;
+@@ -645,7 +647,11 @@ static struct mt_report_data *mt_allocate_report_data(struct mt_device *td,
+
+ if (field->logical == HID_DG_FINGER || td->hdev->group != HID_GROUP_MULTITOUCH_WIN_8) {
+ for (n = 0; n < field->report_count; n++) {
+- if (field->usage[n].hid == HID_DG_CONTACTID) {
++ unsigned int hid = field->usage[n].hid;
++
++ if (hid == HID_DG_CONTACTID ||
++ (cls->quirks & MT_QUIRK_APPLE_TOUCHBAR &&
++ hid == HID_DG_TRANSDUCER_INDEX)) {
+ rdata->is_mt_collection = true;
+ break;
+ }
+@@ -823,6 +829,14 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi,
+ EV_KEY, BTN_TOUCH);
+ MT_STORE_FIELD(tip_state);
+ return 1;
++ case HID_DG_TRANSDUCER_INDEX:
++ /*
++ * Contact ID in case of Apple Touch Bars is contained
++ * in fields with HID_DG_TRANSDUCER_INDEX usage.
++ */
++ if (!(cls->quirks & MT_QUIRK_APPLE_TOUCHBAR))
++ return 0;
++ fallthrough;
+ case HID_DG_CONTACTID:
+ MT_STORE_FIELD(contactid);
+ app->touches_by_report++;
+--
+2.51.0
+
--- /dev/null
+From b7b8b5558bbc522c572460d5366dfd9167a98c02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 13:51:51 +0200
+Subject: HID: multitouch: new class MT_CLS_EGALAX_P80H84
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit a2e70a89fa58133521b2deae4427d35776bda935 ]
+
+Fixes: f9e82295eec1 ("HID: multitouch: add eGalaxTouch P80H84 support")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-multitouch.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 15a3d1de1becd..acf43847d862d 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -219,6 +219,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_SMART_TECH 0x0113
+ #define MT_CLS_APPLE_TOUCHBAR 0x0114
+ #define MT_CLS_YOGABOOK9I 0x0115
++#define MT_CLS_EGALAX_P80H84 0x0116
+ #define MT_CLS_SIS 0x0457
+
+ #define MT_DEFAULT_MAXCONTACT 10
+@@ -424,6 +425,11 @@ static const struct mt_class mt_classes[] = {
+ MT_QUIRK_YOGABOOK9I,
+ .export_all_inputs = true
+ },
++ { .name = MT_CLS_EGALAX_P80H84,
++ .quirks = MT_QUIRK_ALWAYS_VALID |
++ MT_QUIRK_IGNORE_DUPLICATES |
++ MT_QUIRK_CONTACT_CNT_ACCURATE,
++ },
+ { }
+ };
+
+@@ -2121,8 +2127,9 @@ static const struct hid_device_id mt_devices[] = {
+ { .driver_data = MT_CLS_EGALAX_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C000) },
+- { .driver_data = MT_CLS_EGALAX,
+- MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
++ { .driver_data = MT_CLS_EGALAX_P80H84,
++ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
+
+ /* Elan devices */
+--
+2.51.0
+
--- /dev/null
+From d4bf621166d8a789fc6ff1fc4e7fa20feae8b0cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 15:13:20 +0530
+Subject: hwmon: (aht10) Add support for dht20
+
+From: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+
+[ Upstream commit 3eaf1b631506e8de2cb37c278d5bc042521e82c1 ]
+
+Add support for dht20 temperature and humidity sensor from Aosong.
+Modify aht10 driver to handle different init command for dht20 sensor by
+adding init_cmd entry in the driver data. dht20 sensor is compatible with
+aht10 hwmon driver with this change.
+
+Tested on TI am62x SK board with dht20 sensor connected at i2c-2 port.
+
+Signed-off-by: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+Link: https://lore.kernel.org/r/2025112-94320-906858@bhairav-test.ee.iitb.ac.in
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Stable-dep-of: b7497b5a99f5 ("hwmon: (aht10) Fix initialization commands for AHT20")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/hwmon/aht10.rst | 10 +++++++++-
+ drivers/hwmon/Kconfig | 6 +++---
+ drivers/hwmon/aht10.c | 19 ++++++++++++++++---
+ 3 files changed, 28 insertions(+), 7 deletions(-)
+
+diff --git a/Documentation/hwmon/aht10.rst b/Documentation/hwmon/aht10.rst
+index 213644b4ecba6..7903b6434326d 100644
+--- a/Documentation/hwmon/aht10.rst
++++ b/Documentation/hwmon/aht10.rst
+@@ -20,6 +20,14 @@ Supported chips:
+
+ English: http://www.aosong.com/userfiles/files/media/Data%20Sheet%20AHT20.pdf
+
++ * Aosong DHT20
++
++ Prefix: 'dht20'
++
++ Addresses scanned: None
++
++ Datasheet: https://www.digikey.co.nz/en/htmldatasheets/production/9184855/0/0/1/101020932
++
+ Author: Johannes Cornelis Draaijer <jcdra1@gmail.com>
+
+
+@@ -33,7 +41,7 @@ The address of this i2c device may only be 0x38
+ Special Features
+ ----------------
+
+-AHT20 has additional CRC8 support which is sent as the last byte of the sensor
++AHT20, DHT20 has additional CRC8 support which is sent as the last byte of the sensor
+ values.
+
+ Usage Notes
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 58480a3f4683f..19622dd6ec93a 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -245,12 +245,12 @@ config SENSORS_ADT7475
+ will be called adt7475.
+
+ config SENSORS_AHT10
+- tristate "Aosong AHT10, AHT20"
++ tristate "Aosong AHT10, AHT20, DHT20"
+ depends on I2C
+ select CRC8
+ help
+- If you say yes here, you get support for the Aosong AHT10 and AHT20
+- temperature and humidity sensors
++ If you say yes here, you get support for the Aosong AHT10, AHT20 and
++ DHT20 temperature and humidity sensors
+
+ This driver can also be built as a module. If so, the module
+ will be called aht10.
+diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
+index 312ef3e987540..231aba885beaa 100644
+--- a/drivers/hwmon/aht10.c
++++ b/drivers/hwmon/aht10.c
+@@ -37,6 +37,8 @@
+ #define AHT10_CMD_MEAS 0b10101100
+ #define AHT10_CMD_RST 0b10111010
+
++#define DHT20_CMD_INIT 0x71
++
+ /*
+ * Flags in the answer byte/command
+ */
+@@ -48,11 +50,12 @@
+
+ #define AHT10_MAX_POLL_INTERVAL_LEN 30
+
+-enum aht10_variant { aht10, aht20 };
++enum aht10_variant { aht10, aht20, dht20};
+
+ static const struct i2c_device_id aht10_id[] = {
+ { "aht10", aht10 },
+ { "aht20", aht20 },
++ { "dht20", dht20 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(i2c, aht10_id);
+@@ -77,6 +80,7 @@ MODULE_DEVICE_TABLE(i2c, aht10_id);
+ * AHT10/AHT20
+ * @crc8: crc8 support flag
+ * @meas_size: measurements data size
++ * @init_cmd: Initialization command
+ */
+
+ struct aht10_data {
+@@ -92,6 +96,7 @@ struct aht10_data {
+ int humidity;
+ bool crc8;
+ unsigned int meas_size;
++ u8 init_cmd;
+ };
+
+ /**
+@@ -101,13 +106,13 @@ struct aht10_data {
+ */
+ static int aht10_init(struct aht10_data *data)
+ {
+- const u8 cmd_init[] = {AHT10_CMD_INIT, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
++ const u8 cmd_init[] = {data->init_cmd, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
+ 0x00};
+ int res;
+ u8 status;
+ struct i2c_client *client = data->client;
+
+- res = i2c_master_send(client, cmd_init, 3);
++ res = i2c_master_send(client, cmd_init, sizeof(cmd_init));
+ if (res < 0)
+ return res;
+
+@@ -352,9 +357,17 @@ static int aht10_probe(struct i2c_client *client)
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
++ data->init_cmd = AHT10_CMD_INIT;
++ break;
++ case dht20:
++ data->meas_size = AHT20_MEAS_SIZE;
++ data->crc8 = true;
++ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
++ data->init_cmd = DHT20_CMD_INIT;
+ break;
+ default:
+ data->meas_size = AHT10_MEAS_SIZE;
++ data->init_cmd = AHT10_CMD_INIT;
+ break;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 15b0ccf132ab733b9011b05369411748aa1fe1a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 01:03:31 +0800
+Subject: hwmon: (aht10) Fix initialization commands for AHT20
+
+From: Hao Yu <haoyufine@gmail.com>
+
+[ Upstream commit b7497b5a99f54ab8dcda5b14a308385b2fb03d8d ]
+
+According to the AHT20 datasheet (updated to V1.0 after the 2023.09
+version), the initialization command for AHT20 is 0b10111110 (0xBE).
+The previous sequence (0xE1) used in earlier versions is no longer
+compatible with newer AHT20 sensors. Update the initialization
+command to ensure the sensor is properly initialized.
+
+While at it, use binary notation for DHT20_CMD_INIT to match the notation
+used in the datasheet.
+
+Fixes: d2abcb5cc885 ("hwmon: (aht10) Add support for compatible aht20")
+Signed-off-by: Hao Yu <haoyufine@gmail.com>
+Link: https://lore.kernel.org/r/20260222170332.1616-3-haoyufine@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/aht10.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
+index 231aba885beaa..4099b5ba09824 100644
+--- a/drivers/hwmon/aht10.c
++++ b/drivers/hwmon/aht10.c
+@@ -37,7 +37,9 @@
+ #define AHT10_CMD_MEAS 0b10101100
+ #define AHT10_CMD_RST 0b10111010
+
+-#define DHT20_CMD_INIT 0x71
++#define AHT20_CMD_INIT 0b10111110
++
++#define DHT20_CMD_INIT 0b01110001
+
+ /*
+ * Flags in the answer byte/command
+@@ -357,7 +359,7 @@ static int aht10_probe(struct i2c_client *client)
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
+- data->init_cmd = AHT10_CMD_INIT;
++ data->init_cmd = AHT20_CMD_INIT;
+ break;
+ case dht20:
+ data->meas_size = AHT20_MEAS_SIZE;
+--
+2.51.0
+
--- /dev/null
+From 003c300fc3f915f3cb7c714472adde8943de3d10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:14 -0800
+Subject: hwmon: (it87) Check the it87_lock() return value
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 07ed4f05bbfd2bc014974dcc4297fd3aa1cb88c0 ]
+
+Return early in it87_resume() if it87_lock() fails instead of ignoring the
+return value of that function. This patch suppresses a Clang thread-safety
+warning.
+
+Cc: Frank Crawford <frank@crawford.emu.id.au>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Jean Delvare <jdelvare@suse.com>
+Cc: linux-hwmon@vger.kernel.org
+Fixes: 376e1a937b30 ("hwmon: (it87) Add calls to smbus_enable/smbus_disable as required")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20260223220102.2158611-15-bart.vanassche@linux.dev
+[groeck: Declare 'ret' at the beginning of it87_resume()]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/it87.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index e233aafa8856c..5cfb98a0512f0 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -3590,10 +3590,13 @@ static int it87_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct it87_data *data = dev_get_drvdata(dev);
++ int err;
+
+ it87_resume_sio(pdev);
+
+- it87_lock(data);
++ err = it87_lock(data);
++ if (err)
++ return err;
+
+ it87_check_pwm(dev);
+ it87_check_limit_regs(data);
+--
+2.51.0
+
--- /dev/null
+From b4503c3515b0b18318fadacf196c73f92e793f1c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 14:34:24 +0530
+Subject: hwmon: (max6639) : Configure based on DT property
+
+From: Naresh Solanki <naresh.solanki@9elements.com>
+
+[ Upstream commit 7506ebcd662b868780774d191a7c024c18c557a8 ]
+
+Remove platform data & initialize with defaults
+configuration & overwrite based on DT properties.
+
+Signed-off-by: Naresh Solanki <naresh.solanki@9elements.com>
+Message-ID: <20241007090426.811736-1-naresh.solanki@9elements.com>
+[groeck: Dropped some unnecessary empty lines]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Stable-dep-of: 170a4b21f49b ("hwmon: (max6639) fix inverted polarity")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max6639.c | 83 +++++++++++++++++++--------
+ include/linux/platform_data/max6639.h | 15 -----
+ 2 files changed, 60 insertions(+), 38 deletions(-)
+ delete mode 100644 include/linux/platform_data/max6639.h
+
+diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
+index c955b0f3a8d31..32b4d54b20766 100644
+--- a/drivers/hwmon/max6639.c
++++ b/drivers/hwmon/max6639.c
+@@ -19,7 +19,6 @@
+ #include <linux/hwmon-sysfs.h>
+ #include <linux/err.h>
+ #include <linux/mutex.h>
+-#include <linux/platform_data/max6639.h>
+ #include <linux/regmap.h>
+ #include <linux/util_macros.h>
+
+@@ -531,14 +530,49 @@ static int rpm_range_to_reg(int range)
+ return 1; /* default: 4000 RPM */
+ }
+
++static int max6639_probe_child_from_dt(struct i2c_client *client,
++ struct device_node *child,
++ struct max6639_data *data)
++
++{
++ struct device *dev = &client->dev;
++ u32 i;
++ int err, val;
++
++ err = of_property_read_u32(child, "reg", &i);
++ if (err) {
++ dev_err(dev, "missing reg property of %pOFn\n", child);
++ return err;
++ }
++
++ if (i > 1) {
++ dev_err(dev, "Invalid fan index reg %d\n", i);
++ return -EINVAL;
++ }
++
++ err = of_property_read_u32(child, "pulses-per-revolution", &val);
++ if (!err) {
++ if (val < 1 || val > 5) {
++ dev_err(dev, "invalid pulses-per-revolution %d of %pOFn\n", val, child);
++ return -EINVAL;
++ }
++ data->ppr[i] = val;
++ }
++
++ err = of_property_read_u32(child, "max-rpm", &val);
++ if (!err)
++ data->rpm_range[i] = rpm_range_to_reg(val);
++
++ return 0;
++}
++
+ static int max6639_init_client(struct i2c_client *client,
+ struct max6639_data *data)
+ {
+- struct max6639_platform_data *max6639_info =
+- dev_get_platdata(&client->dev);
+- int i;
+- int rpm_range = 1; /* default: 4000 RPM */
+- int err, ppr;
++ struct device *dev = &client->dev;
++ const struct device_node *np = dev->of_node;
++ struct device_node *child;
++ int i, err;
+
+ /* Reset chip to default values, see below for GCONFIG setup */
+ err = regmap_write(data->regmap, MAX6639_REG_GCONFIG, MAX6639_GCONFIG_POR);
+@@ -546,21 +580,29 @@ static int max6639_init_client(struct i2c_client *client,
+ return err;
+
+ /* Fans pulse per revolution is 2 by default */
+- if (max6639_info && max6639_info->ppr > 0 &&
+- max6639_info->ppr < 5)
+- ppr = max6639_info->ppr;
+- else
+- ppr = 2;
++ data->ppr[0] = 2;
++ data->ppr[1] = 2;
++
++ /* default: 4000 RPM */
++ data->rpm_range[0] = 1;
++ data->rpm_range[1] = 1;
+
+- data->ppr[0] = ppr;
+- data->ppr[1] = ppr;
++ for_each_child_of_node(np, child) {
++ if (strcmp(child->name, "fan"))
++ continue;
+
+- if (max6639_info)
+- rpm_range = rpm_range_to_reg(max6639_info->rpm_range);
+- data->rpm_range[0] = rpm_range;
+- data->rpm_range[1] = rpm_range;
++ err = max6639_probe_child_from_dt(client, child, data);
++ if (err) {
++ of_node_put(child);
++ return err;
++ }
++ }
+
+ for (i = 0; i < MAX6639_NUM_CHANNELS; i++) {
++ err = regmap_set_bits(data->regmap, MAX6639_REG_OUTPUT_MASK, BIT(1 - i));
++ if (err)
++ return err;
++
+ /* Set Fan pulse per revolution */
+ err = max6639_set_ppr(data, i, data->ppr[i]);
+ if (err)
+@@ -573,12 +615,7 @@ static int max6639_init_client(struct i2c_client *client,
+ return err;
+
+ /* Fans PWM polarity high by default */
+- if (max6639_info) {
+- if (max6639_info->pwm_polarity == 0)
+- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+- else
+- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+- }
++ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
+ if (err)
+ return err;
+
+diff --git a/include/linux/platform_data/max6639.h b/include/linux/platform_data/max6639.h
+deleted file mode 100644
+index 65bfdb4fdc157..0000000000000
+--- a/include/linux/platform_data/max6639.h
++++ /dev/null
+@@ -1,15 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-#ifndef _LINUX_MAX6639_H
+-#define _LINUX_MAX6639_H
+-
+-#include <linux/types.h>
+-
+-/* platform data for the MAX6639 temperature sensor and fan control */
+-
+-struct max6639_platform_data {
+- bool pwm_polarity; /* Polarity low (0) or high (1, default) */
+- int ppr; /* Pulses per rotation 1..4 (default == 2) */
+- int rpm_range; /* 2000, 4000 (default), 8000 or 16000 */
+-};
+-
+-#endif /* _LINUX_MAX6639_H */
+--
+2.51.0
+
--- /dev/null
+From 4ae303a857e295feb73a95b9e233a8cc869b86c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 22:20:39 +0100
+Subject: hwmon: (max6639) fix inverted polarity
+
+From: Olivier Sobrie <olivier@sobrie.be>
+
+[ Upstream commit 170a4b21f49b3dcff3115b4c90758f0a0d77375a ]
+
+According to MAX6639 documentation:
+
+ D1: PWM Output Polarity. PWM output is low at
+ 100% duty cycle when this bit is set to zero. PWM
+ output is high at 100% duty cycle when this bit is set
+ to 1.
+
+Up to commit 0f33272b60ed ("hwmon: (max6639) : Update hwmon init using
+info structure"), the polarity was set to high (0x2) when no platform
+data was set. After the patch, the polarity register wasn't set anymore
+if no platform data was specified. Nowadays, since commit 7506ebcd662b
+("hwmon: (max6639) : Configure based on DT property"), it is always set
+to low which doesn't match with the comment above and change the
+behavior compared to versions prior 0f33272b60ed.
+
+Fixes: 0f33272b60ed ("hwmon: (max6639) : Update hwmon init using info structure")
+Signed-off-by: Olivier Sobrie <olivier@sobrie.be>
+Link: https://lore.kernel.org/r/20260304212039.570274-1-olivier@sobrie.be
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max6639.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
+index 32b4d54b20766..0b0a9f4c2307f 100644
+--- a/drivers/hwmon/max6639.c
++++ b/drivers/hwmon/max6639.c
+@@ -615,7 +615,7 @@ static int max6639_init_client(struct i2c_client *client,
+ return err;
+
+ /* Fans PWM polarity high by default */
+- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
++ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (err)
+ return err;
+
+--
+2.51.0
+
--- /dev/null
+From 8641ce03e817968302f59fce85d9a537e7abefd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 18:41:15 -0700
+Subject: i2c: i801: Revert "i2c: i801: replace acpi_lock with I2C bus lock"
+
+From: Charles Haithcock <chaithco@redhat.com>
+
+[ Upstream commit cfc69c2e6c699c96949f7b0455195b0bfb7dc715 ]
+
+This reverts commit f707d6b9e7c18f669adfdb443906d46cfbaaa0c1.
+
+Under rare circumstances, multiple udev threads can collect i801 device
+info on boot and walk i801_acpi_io_handler somewhat concurrently. The
+first will note the area is reserved by acpi to prevent further touches.
+This ultimately causes the area to be deregistered. The second will
+enter i801_acpi_io_handler after the area is unregistered but before a
+check can be made that the area is unregistered. i2c_lock_bus relies on
+the now unregistered area containing lock_ops to lock the bus. The end
+result is a kernel panic on boot with the following backtrace;
+
+[ 14.971872] ioatdma 0000:09:00.2: enabling device (0100 -> 0102)
+[ 14.971873] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[ 14.971880] #PF: supervisor read access in kernel mode
+[ 14.971884] #PF: error_code(0x0000) - not-present page
+[ 14.971887] PGD 0 P4D 0
+[ 14.971894] Oops: 0000 [#1] PREEMPT SMP PTI
+[ 14.971900] CPU: 5 PID: 956 Comm: systemd-udevd Not tainted 5.14.0-611.5.1.el9_7.x86_64 #1
+[ 14.971905] Hardware name: XXXXXXXXXXXXXXXXXXXXXXX BIOS 1.20.10.SV91 01/30/2023
+[ 14.971908] RIP: 0010:i801_acpi_io_handler+0x2d/0xb0 [i2c_i801]
+[ 14.971929] Code: 00 00 49 8b 40 20 41 57 41 56 4d 8b b8 30 04 00 00 49 89 ce 41 55 41 89 d5 41 54 49 89 f4 be 02 00 00 00 55 4c 89 c5 53 89 fb <48> 8b 00 4c 89 c7 e8 18 61 54 e9 80 bd 80 04 00 00 00 75 09 4c 3b
+[ 14.971933] RSP: 0018:ffffbaa841483838 EFLAGS: 00010282
+[ 14.971938] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff9685e01ba568
+[ 14.971941] RDX: 0000000000000008 RSI: 0000000000000002 RDI: 0000000000000000
+[ 14.971944] RBP: ffff9685ca22f028 R08: ffff9685ca22f028 R09: ffff9685ca22f028
+[ 14.971948] R10: 000000000000000b R11: 0000000000000580 R12: 0000000000000580
+[ 14.971951] R13: 0000000000000008 R14: ffff9685e01ba568 R15: ffff9685c222f000
+[ 14.971954] FS: 00007f8287c0ab40(0000) GS:ffff96a47f940000(0000) knlGS:0000000000000000
+[ 14.971959] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 14.971963] CR2: 0000000000000000 CR3: 0000000168090001 CR4: 00000000003706f0
+[ 14.971966] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 14.971968] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 14.971972] Call Trace:
+[ 14.971977] <TASK>
+[ 14.971981] ? show_trace_log_lvl+0x1c4/0x2df
+[ 14.971994] ? show_trace_log_lvl+0x1c4/0x2df
+[ 14.972003] ? acpi_ev_address_space_dispatch+0x16e/0x3c0
+[ 14.972014] ? __die_body.cold+0x8/0xd
+[ 14.972021] ? page_fault_oops+0x132/0x170
+[ 14.972028] ? exc_page_fault+0x61/0x150
+[ 14.972036] ? asm_exc_page_fault+0x22/0x30
+[ 14.972045] ? i801_acpi_io_handler+0x2d/0xb0 [i2c_i801]
+[ 14.972061] acpi_ev_address_space_dispatch+0x16e/0x3c0
+[ 14.972069] ? __pfx_i801_acpi_io_handler+0x10/0x10 [i2c_i801]
+[ 14.972085] acpi_ex_access_region+0x5b/0xd0
+[ 14.972093] acpi_ex_field_datum_io+0x73/0x2e0
+[ 14.972100] acpi_ex_read_data_from_field+0x8e/0x230
+[ 14.972106] acpi_ex_resolve_node_to_value+0x23d/0x310
+[ 14.972114] acpi_ds_evaluate_name_path+0xad/0x110
+[ 14.972121] acpi_ds_exec_end_op+0x321/0x510
+[ 14.972127] acpi_ps_parse_loop+0xf7/0x680
+[ 14.972136] acpi_ps_parse_aml+0x17a/0x3d0
+[ 14.972143] acpi_ps_execute_method+0x137/0x270
+[ 14.972150] acpi_ns_evaluate+0x1f4/0x2e0
+[ 14.972158] acpi_evaluate_object+0x134/0x2f0
+[ 14.972164] acpi_evaluate_integer+0x50/0xe0
+[ 14.972173] ? vsnprintf+0x24b/0x570
+[ 14.972181] acpi_ac_get_state.part.0+0x23/0x70
+[ 14.972189] get_ac_property+0x4e/0x60
+[ 14.972195] power_supply_show_property+0x90/0x1f0
+[ 14.972205] add_prop_uevent+0x29/0x90
+[ 14.972213] power_supply_uevent+0x109/0x1d0
+[ 14.972222] dev_uevent+0x10e/0x2f0
+[ 14.972228] uevent_show+0x8e/0x100
+[ 14.972236] dev_attr_show+0x19/0x40
+[ 14.972246] sysfs_kf_seq_show+0x9b/0x100
+[ 14.972253] seq_read_iter+0x120/0x4b0
+[ 14.972262] ? selinux_file_permission+0x106/0x150
+[ 14.972273] vfs_read+0x24f/0x3a0
+[ 14.972284] ksys_read+0x5f/0xe0
+[ 14.972291] do_syscall_64+0x5f/0xe0
+...
+
+The kernel panic is mitigated by setting limiting the count of udev
+children to 1. Revert to using the acpi_lock to continue protecting
+marking the area as owned by firmware without relying on a lock in
+a potentially unmapped region of memory.
+
+Fixes: f707d6b9e7c1 ("i2c: i801: replace acpi_lock with I2C bus lock")
+Signed-off-by: Charles Haithcock <chaithco@redhat.com>
+[wsa: added Fixes-tag and updated comment stating the importance of the lock]
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-i801.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index be7ca6a0ebeb8..24363acfc3f8c 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -303,9 +303,10 @@ struct i801_priv {
+
+ /*
+ * If set to true the host controller registers are reserved for
+- * ACPI AML use.
++ * ACPI AML use. Needs extra protection by acpi_lock.
+ */
+ bool acpi_reserved;
++ struct mutex acpi_lock;
+ };
+
+ #define FEATURE_SMBUS_PEC BIT(0)
+@@ -893,8 +894,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ int hwpec, ret;
+ struct i801_priv *priv = i2c_get_adapdata(adap);
+
+- if (priv->acpi_reserved)
++ mutex_lock(&priv->acpi_lock);
++ if (priv->acpi_reserved) {
++ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
++ }
+
+ pm_runtime_get_sync(&priv->pci_dev->dev);
+
+@@ -935,6 +939,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+
+ pm_runtime_mark_last_busy(&priv->pci_dev->dev);
+ pm_runtime_put_autosuspend(&priv->pci_dev->dev);
++ mutex_unlock(&priv->acpi_lock);
+ return ret;
+ }
+
+@@ -1586,7 +1591,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+- i2c_lock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
++ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
+ priv->acpi_reserved = true;
+@@ -1606,7 +1611,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+- i2c_unlock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
++ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+ }
+@@ -1666,6 +1671,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ priv->adapter.dev.parent = &dev->dev;
+ acpi_use_parent_companion(&priv->adapter.dev);
+ priv->adapter.retries = 3;
++ mutex_init(&priv->acpi_lock);
+
+ priv->pci_dev = dev;
+ priv->features = id->driver_data;
+--
+2.51.0
+
--- /dev/null
+From df5282215241da759c87e40bd8ebcc053d7acd3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 11:50:23 +0100
+Subject: i40e: Fix preempt count leak in napi poll tracepoint
+
+From: Thomas Gleixner <tglx@kernel.org>
+
+[ Upstream commit 4b3d54a85bd37ebf2d9836f0d0de775c0ff21af9 ]
+
+Using get_cpu() in the tracepoint assignment causes an obvious preempt
+count leak because nothing invokes put_cpu() to undo it:
+
+ softirq: huh, entered softirq 3 NET_RX with preempt_count 00000100, exited with 00000101?
+
+This clearly has seen a lot of testing in the last 3+ years...
+
+Use smp_processor_id() instead.
+
+Fixes: 6d4d584a7ea8 ("i40e: Add i40e_napi_poll tracepoint")
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Cc: Tony Nguyen <anthony.l.nguyen@intel.com>
+Cc: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Cc: intel-wired-lan@lists.osuosl.org
+Cc: netdev@vger.kernel.org
+Reviewed-by: Joe Damato <joe@dama.to>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
+index 759f3d1c4c8f0..dde0ccd789ed1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
+@@ -88,7 +88,7 @@ TRACE_EVENT(i40e_napi_poll,
+ __entry->rx_clean_complete = rx_clean_complete;
+ __entry->tx_clean_complete = tx_clean_complete;
+ __entry->irq_num = q->irq_num;
+- __entry->curr_cpu = get_cpu();
++ __entry->curr_cpu = smp_processor_id();
+ __assign_str(qname);
+ __assign_str(dev_name);
+ __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
+--
+2.51.0
+
--- /dev/null
+From b480303bc31c1c4f27f48d036d84f000bb23eecf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:46 +0100
+Subject: i40e: fix registering XDP RxQ info
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8f497dc8a61429cc004720aa8e713743355d80cf ]
+
+Current way of handling XDP RxQ info in i40e has a problem, where frag_size
+is not updated when xsk_buff_pool is detached or when MTU is changed, this
+leads to growing tail always failing for multi-buffer packets.
+
+Couple XDP RxQ info registering with buffer allocations and unregistering
+with cleaning the ring.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-6-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 34 ++++++++++++---------
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 +--
+ 2 files changed, 22 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 31c83fc69cf41..981c01dce0cdf 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3633,18 +3633,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+- err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->queue_index,
+- ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
+- if (err)
+- return err;
+- }
+-
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+@@ -3656,17 +3646,23 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ }
+
+ skip:
+@@ -3704,7 +3700,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* set the context in the HMC */
+@@ -3713,7 +3710,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* configure Rx buffer alignment */
+@@ -3721,7 +3719,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_info(&vsi->back->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto unreg_xdp;
+ }
+ clear_ring_build_skb_enabled(ring);
+ } else {
+@@ -3751,6 +3750,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ return 0;
++unreg_xdp:
++ if (ring->vsi->type == I40E_VSI_MAIN)
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
++
++ return err;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index ca7517a68a2c3..bca8398a6ab4b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1469,6 +1469,9 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ if (!rx_ring->rx_bi)
+ return;
+
++ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
++
+ if (rx_ring->xsk_pool) {
+ i40e_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+@@ -1526,8 +1529,6 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+ {
+ i40e_clean_rx_ring(rx_ring);
+- if (rx_ring->vsi->type == I40E_VSI_MAIN)
+- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+--
+2.51.0
+
--- /dev/null
+From 6b11c379dadb8c564cef93a48e9a74a8a0cb1329 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:47 +0100
+Subject: i40e: use xdp.frame_sz as XDP RxQ info frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit c69d22c6c46a1d792ba8af3d8d6356fdc0e6f538 ]
+
+The only user of frag_size field in XDP RxQ info is
+bpf_xdp_frags_increase_tail(). It clearly expects whole buffer size instead
+of DMA write size. Different assumptions in i40e driver configuration lead
+to negative tailroom.
+
+Set frag_size to the same value as frame_sz in shared pages mode, use new
+helper to set frag_size when AF_XDP ZC is active.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-7-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 981c01dce0cdf..e7a06db26c915 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3619,6 +3619,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
++ u32 xdp_frame_sz;
+ int err = 0;
+ bool ok;
+
+@@ -3628,6 +3629,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
++ xdp_frame_sz = i40e_rx_pg_size(ring) / 2;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+@@ -3635,11 +3637,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
++ xdp_frame_sz = xsk_pool_get_rx_frag_step(ring->xsk_pool);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3655,7 +3658,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3666,7 +3669,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ skip:
+- xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
++ xdp_init_buff(&ring->xdp, xdp_frame_sz, &ring->xdp_rxq);
+
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+--
+2.51.0
+
--- /dev/null
+From 0c6ba82a195534a7f5a4ad56dbd17af6bd604cdf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 15:57:14 +0000
+Subject: iavf: fix netdev->max_mtu to respect actual hardware limit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b84852170153671bb0fa6737a6e48370addd8e1a ]
+
+iavf sets LIBIE_MAX_MTU as netdev->max_mtu, ignoring vf_res->max_mtu
+from PF [1]. This allows setting an MTU beyond the actual hardware
+limit, causing TX queue timeouts [2].
+
+Set correct netdev->max_mtu using vf_res->max_mtu from the PF.
+
+Note that currently PF drivers such as ice/i40e set the frame size in
+vf_res->max_mtu, not MTU. Convert vf_res->max_mtu to MTU before setting
+netdev->max_mtu.
+
+[1]
+ # ip -j -d link show $DEV | jq '.[0].max_mtu'
+ 16356
+
+[2]
+ iavf 0000:00:05.0 enp0s5: NETDEV WATCHDOG: CPU: 1: transmit queue 0 timed out 5692 ms
+ iavf 0000:00:05.0 enp0s5: NIC Link is Up Speed is 10 Gbps Full Duplex
+ iavf 0000:00:05.0 enp0s5: NETDEV WATCHDOG: CPU: 6: transmit queue 3 timed out 5312 ms
+ iavf 0000:00:05.0 enp0s5: NIC Link is Up Speed is 10 Gbps Full Duplex
+ ...
+
+Fixes: 5fa4caff59f2 ("iavf: switch to Page Pool")
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf_main.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 422af897d9330..dcd4f172ddc8a 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -2630,7 +2630,22 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+- netdev->max_mtu = LIBIE_MAX_MTU;
++
++ /* PF/VF API: vf_res->max_mtu is max frame size (not MTU).
++ * Convert to MTU.
++ */
++ if (!adapter->vf_res->max_mtu) {
++ netdev->max_mtu = LIBIE_MAX_MTU;
++ } else if (adapter->vf_res->max_mtu < LIBETH_RX_LL_LEN + ETH_MIN_MTU ||
++ adapter->vf_res->max_mtu >
++ LIBETH_RX_LL_LEN + LIBIE_MAX_MTU) {
++ netdev_warn_once(adapter->netdev,
++ "invalid max frame size %d from PF, using default MTU %d",
++ adapter->vf_res->max_mtu, LIBIE_MAX_MTU);
++ netdev->max_mtu = LIBIE_MAX_MTU;
++ } else {
++ netdev->max_mtu = adapter->vf_res->max_mtu - LIBETH_RX_LL_LEN;
++ }
+
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+--
+2.51.0
+
--- /dev/null
+From f9a909adfab132300102dc61c66e4460437f6e31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 21:55:59 +0000
+Subject: idpf: change IRQ naming to match netdev and ethtool queue numbering
+
+From: Brian Vazquez <brianvv@google.com>
+
+[ Upstream commit 1500a8662d2d41d6bb03e034de45ddfe6d7d362d ]
+
+The code uses the vidx for the IRQ name but that doesn't match ethtool
+reporting nor netdev naming, this makes it hard to tune the device and
+associate queues with IRQs. Sequentially requesting irqs starting from
+'0' makes the output consistent.
+
+This commit changes the interrupt numbering but preserves the name
+format, maintaining ABI compatibility. Existing tools relying on the old
+numbering are already non-functional, as they lack a useful correlation
+to the interrupts.
+
+Before:
+
+ethtool -L eth1 tx 1 combined 3
+
+grep . /proc/irq/*/*idpf*/../smp_affinity_list
+/proc/irq/67/idpf-Mailbox-0/../smp_affinity_list:0-55,112-167
+/proc/irq/68/idpf-eth1-TxRx-1/../smp_affinity_list:0
+/proc/irq/70/idpf-eth1-TxRx-3/../smp_affinity_list:1
+/proc/irq/71/idpf-eth1-TxRx-4/../smp_affinity_list:2
+/proc/irq/72/idpf-eth1-Tx-5/../smp_affinity_list:3
+
+ethtool -S eth1 | grep -v ': 0'
+NIC statistics:
+ tx_q-0_pkts: 1002
+ tx_q-1_pkts: 2679
+ tx_q-2_pkts: 1113
+ tx_q-3_pkts: 1192 <----- tx_q-3 vs idpf-eth1-Tx-5
+ rx_q-0_pkts: 1143
+ rx_q-1_pkts: 3172
+ rx_q-2_pkts: 1074
+
+After:
+
+ethtool -L eth1 tx 1 combined 3
+
+grep . /proc/irq/*/*idpf*/../smp_affinity_list
+
+/proc/irq/67/idpf-Mailbox-0/../smp_affinity_list:0-55,112-167
+/proc/irq/68/idpf-eth1-TxRx-0/../smp_affinity_list:0
+/proc/irq/70/idpf-eth1-TxRx-1/../smp_affinity_list:1
+/proc/irq/71/idpf-eth1-TxRx-2/../smp_affinity_list:2
+/proc/irq/72/idpf-eth1-Tx-3/../smp_affinity_list:3
+
+ethtool -S eth1 | grep -v ': 0'
+NIC statistics:
+ tx_q-0_pkts: 118
+ tx_q-1_pkts: 134
+ tx_q-2_pkts: 228
+ tx_q-3_pkts: 138 <--- tx_q-3 matches idpf-eth1-Tx-3
+ rx_q-0_pkts: 111
+ rx_q-1_pkts: 366
+ rx_q-2_pkts: 120
+
+Fixes: d4d558718266 ("idpf: initialize interrupts and enable vport")
+Signed-off-by: Brian Vazquez <brianvv@google.com>
+Reviewed-by: Brett Creeley <brett.creeley@amd.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 3ddf7b1e85ef4..6d33783ac8db4 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -3477,7 +3477,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
+ continue;
+
+ name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
+- vec_name, vidx);
++ vec_name, vector);
+
+ err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
+ name, q_vector);
+--
+2.51.0
+
--- /dev/null
+From 617b900dd1033ea3f993fe9a76aaab09a49872bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 17:26:03 +0000
+Subject: indirect_call_wrapper: do not reevaluate function pointer
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 710f5c76580306cdb9ec51fac8fcf6a8faff7821 ]
+
+We have an increasing number of READ_ONCE(xxx->function)
+combined with INDIRECT_CALL_[1234]() helpers.
+
+Unfortunately this forces INDIRECT_CALL_[1234]() to read
+xxx->function many times, which is not what we wanted.
+
+Fix these macros so that xxx->function value is not reloaded.
+
+$ scripts/bloat-o-meter -t vmlinux.0 vmlinux
+add/remove: 0/0 grow/shrink: 1/65 up/down: 122/-1084 (-962)
+Function old new delta
+ip_push_pending_frames 59 181 +122
+ip6_finish_output 687 681 -6
+__udp_enqueue_schedule_skb 1078 1072 -6
+ioam6_output 2319 2312 -7
+xfrm4_rcv_encap_finish2 64 56 -8
+xfrm4_output 297 289 -8
+vrf_ip_local_out 278 270 -8
+vrf_ip6_local_out 278 270 -8
+seg6_input_finish 64 56 -8
+rpl_output 700 692 -8
+ipmr_forward_finish 124 116 -8
+ip_forward_finish 143 135 -8
+ip6mr_forward2_finish 100 92 -8
+ip6_forward_finish 73 65 -8
+input_action_end_bpf 1091 1083 -8
+dst_input 52 44 -8
+__xfrm6_output 801 793 -8
+__xfrm4_output 83 75 -8
+bpf_input 500 491 -9
+__tcp_check_space 530 521 -9
+input_action_end_dt6 291 280 -11
+vti6_tnl_xmit 1634 1622 -12
+bpf_xmit 1203 1191 -12
+rpl_input 497 483 -14
+rawv6_send_hdrinc 1355 1341 -14
+ndisc_send_skb 1030 1016 -14
+ipv6_srh_rcv 1377 1363 -14
+ip_send_unicast_reply 1253 1239 -14
+ip_rcv_finish 226 212 -14
+ip6_rcv_finish 300 286 -14
+input_action_end_x_core 205 191 -14
+input_action_end_x 355 341 -14
+input_action_end_t 205 191 -14
+input_action_end_dx6_finish 127 113 -14
+input_action_end_dx4_finish 373 359 -14
+input_action_end_dt4 426 412 -14
+input_action_end_core 186 172 -14
+input_action_end_b6_encap 292 278 -14
+input_action_end_b6 198 184 -14
+igmp6_send 1332 1318 -14
+ip_sublist_rcv 864 848 -16
+ip6_sublist_rcv 1091 1075 -16
+ipv6_rpl_srh_rcv 1937 1920 -17
+xfrm_policy_queue_process 1246 1228 -18
+seg6_output_core 903 885 -18
+mld_sendpack 856 836 -20
+NF_HOOK 756 736 -20
+vti_tunnel_xmit 1447 1426 -21
+input_action_end_dx6 664 642 -22
+input_action_end 1502 1480 -22
+sock_sendmsg_nosec 134 111 -23
+ip6mr_forward2 388 364 -24
+sock_recvmsg_nosec 134 109 -25
+seg6_input_core 836 810 -26
+ip_send_skb 172 146 -26
+ip_local_out 140 114 -26
+ip6_local_out 140 114 -26
+__sock_sendmsg 162 136 -26
+__ip_queue_xmit 1196 1170 -26
+__ip_finish_output 405 379 -26
+ipmr_queue_fwd_xmit 373 346 -27
+sock_recvmsg 173 145 -28
+ip6_xmit 1635 1607 -28
+xfrm_output_resume 1418 1389 -29
+ip_build_and_send_pkt 625 591 -34
+dst_output 504 432 -72
+Total: Before=25217686, After=25216724, chg -0.00%
+
+Fixes: 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260227172603.1700433-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/indirect_call_wrapper.h | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
+index 35227d47cfc98..dc272b514a01b 100644
+--- a/include/linux/indirect_call_wrapper.h
++++ b/include/linux/indirect_call_wrapper.h
+@@ -16,22 +16,26 @@
+ */
+ #define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
++ typeof(f) __f1 = (f); \
++ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+- likely(f == f2) ? f2(__VA_ARGS__) : \
+- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
++ typeof(f) __f2 = (f); \
++ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
++ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f3) ? f3(__VA_ARGS__) : \
+- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
++ typeof(f) __f3 = (f); \
++ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
++ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f4) ? f4(__VA_ARGS__) : \
+- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
++ typeof(f) __f4 = (f); \
++ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
++ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
+ })
+
+ #define INDIRECT_CALLABLE_DECLARE(f) f
+--
+2.51.0
+
--- /dev/null
+From 85128a481e1711cbb96eac9503dfa2db3619855f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 11:45:48 -0800
+Subject: ipv6: fix NULL pointer deref in ip6_rt_get_dev_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2ffb4f5c2ccb2fa1c049dd11899aee7967deef5a ]
+
+l3mdev_master_dev_rcu() can return NULL when the slave device is being
+un-slaved from a VRF. All other callers deal with this, but we lost
+the fallback to loopback in ip6_rt_pcpu_alloc() -> ip6_rt_get_dev_rcu()
+with commit 4832c30d5458 ("net: ipv6: put host and anycast routes on
+device with address").
+
+ KASAN: null-ptr-deref in range [0x0000000000000108-0x000000000000010f]
+ RIP: 0010:ip6_rt_pcpu_alloc (net/ipv6/route.c:1418)
+ Call Trace:
+ ip6_pol_route (net/ipv6/route.c:2318)
+ fib6_rule_lookup (net/ipv6/fib6_rules.c:115)
+ ip6_route_output_flags (net/ipv6/route.c:2607)
+ vrf_process_v6_outbound (drivers/net/vrf.c:437)
+
+I was tempted to rework the un-slaving code to clear the flag first
+and insert synchronize_rcu() before we remove the upper. But looks like
+the explicit fallback to loopback_dev is an established pattern.
+And I guess avoiding the synchronize_rcu() is nice, too.
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260301194548.927324-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index aeac45af3a22a..0f741aa154faf 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1064,7 +1064,8 @@ static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
+ */
+ if (netif_is_l3_slave(dev) &&
+ !rt6_need_strict(&res->f6i->fib6_dst.addr))
+- dev = l3mdev_master_dev_rcu(dev);
++ dev = l3mdev_master_dev_rcu(dev) ? :
++ dev_net(dev)->loopback_dev;
+ else if (!netif_is_l3_master(dev))
+ dev = dev_net(dev)->loopback_dev;
+ /* last case is netif_is_l3_master(dev) is true in which
+--
+2.51.0
+
--- /dev/null
+From 8da4bfbaaa617729d332223672cbc040ec88ad9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Dec 2025 16:41:20 +0800
+Subject: kselftest/harness: Use helper to avoid zero-size memset warning
+
+From: Wake Liu <wakel@google.com>
+
+[ Upstream commit 19b8a76cd99bde6d299e60490f3e62b8d3df3997 ]
+
+When building kselftests with a toolchain that enables source
+fortification (e.g., Android's build environment, which uses
+-D_FORTIFY_SOURCE=3), a build failure occurs in tests that use an
+empty FIXTURE().
+
+The root cause is that an empty fixture struct results in
+`sizeof(self_private)` evaluating to 0. The compiler's fortification
+checks then detect the `memset()` call with a compile-time constant size
+of 0, issuing a `-Wuser-defined-warnings` which is promoted to an error
+by `-Werror`.
+
+An initial attempt to guard the call with `if (sizeof(self_private) > 0)`
+was insufficient. The compiler's static analysis is aggressive enough
+to flag the `memset(..., 0)` pattern before evaluating the conditional,
+thus still triggering the error.
+
+To resolve this robustly, this change introduces a `static inline`
+helper function, `__kselftest_memset_safe()`. This function wraps the
+size check and the `memset()` call. By replacing the direct `memset()`
+in the `__TEST_F_IMPL` macro with a call to this helper, we create an
+abstraction boundary. This prevents the compiler's static analyzer from
+"seeing" the problematic pattern at the macro expansion site, resolving
+the build failure.
+
+Build Context:
+Compiler: Android (14488419, +pgo, +bolt, +lto, +mlgo, based on r584948) clang version 22.0.0 (https://android.googlesource.com/toolchain/llvm-project 2d65e4108033380e6fe8e08b1f1826cd2bfb0c99)
+Relevant Options: -O2 -Wall -Werror -D_FORTIFY_SOURCE=3 -target i686-linux-android10000
+
+Test: m kselftest_futex_futex_requeue_pi
+
+Removed Gerrit Change-Id
+Shuah Khan <skhan@linuxfoundation.org>
+
+Link: https://lore.kernel.org/r/20251224084120.249417-1-wakel@google.com
+Signed-off-by: Wake Liu <wakel@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 6be268151426 ("selftests/harness: order TEST_F and XFAIL_ADD constructors")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kselftest_harness.h | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 666c9fde76da9..d67ec4d762db3 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -69,6 +69,12 @@
+
+ #include "kselftest.h"
+
++static inline void __kselftest_memset_safe(void *s, int c, size_t n)
++{
++ if (n > 0)
++ memset(s, c, n);
++}
++
+ #define TEST_TIMEOUT_DEFAULT 30
+
+ /* Utilities exposed to the test definitions */
+@@ -418,7 +424,7 @@
+ self = mmap(NULL, sizeof(*self), PROT_READ | PROT_WRITE, \
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0); \
+ } else { \
+- memset(&self_private, 0, sizeof(self_private)); \
++ __kselftest_memset_safe(&self_private, 0, sizeof(self_private)); \
+ self = &self_private; \
+ } \
+ } \
+--
+2.51.0
+
--- /dev/null
+From 7a2cf1d769aa6045ea6b0d10dbab4e253d7c6eb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:14:10 +0545
+Subject: kunit: tool: copy caller args in run_kernel to prevent mutation
+
+From: Shuvam Pandey <shuvampandey1@gmail.com>
+
+[ Upstream commit 40804c4974b8df2adab72f6475d343eaff72b7f6 ]
+
+run_kernel() appended KUnit flags directly to the caller-provided args
+list. When exec_tests() calls run_kernel() repeatedly (e.g. with
+--run_isolated), each call mutated the same list, causing later runs
+to inherit stale filter_glob values and duplicate kunit.enable flags.
+
+Fix this by copying args at the start of run_kernel(). Add a regression
+test that calls run_kernel() twice with the same list and verifies the
+original remains unchanged.
+
+Fixes: ff9e09a3762f ("kunit: tool: support running each suite/test separately")
+Signed-off-by: Shuvam Pandey <shuvampandey1@gmail.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_kernel.py | 6 ++++--
+ tools/testing/kunit/kunit_tool_test.py | 26 ++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index 61931c4926fd6..12b0f2ee56656 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -333,8 +333,10 @@ class LinuxSourceTree:
+ return self.validate_config(build_dir)
+
+ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
+- if not args:
+- args = []
++ # Copy to avoid mutating the caller-supplied list. exec_tests() reuses
++ # the same args across repeated run_kernel() calls (e.g. --run_isolated),
++ # so appending to the original would accumulate stale flags on each call.
++ args = list(args) if args else []
+ if filter_glob:
+ args.append('kunit.filter_glob=' + filter_glob)
+ if filter:
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index 2beb7327e53fc..70e5d0abe87f3 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -477,6 +477,32 @@ class LinuxSourceTreeTest(unittest.TestCase):
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
++ def test_run_kernel_args_not_mutated(self):
++ """Verify run_kernel() copies args so callers can reuse them."""
++ start_calls = []
++
++ def fake_start(start_args, unused_build_dir):
++ start_calls.append(list(start_args))
++ return subprocess.Popen(['printf', 'KTAP version 1\n'],
++ text=True, stdout=subprocess.PIPE)
++
++ with tempfile.TemporaryDirectory('') as build_dir:
++ tree = kunit_kernel.LinuxSourceTree(build_dir,
++ kunitconfig_paths=[os.devnull])
++ with mock.patch.object(tree._ops, 'start', side_effect=fake_start), \
++ mock.patch.object(kunit_kernel.subprocess, 'call'):
++ kernel_args = ['mem=1G']
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test1'):
++ pass
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test2'):
++ pass
++ self.assertEqual(kernel_args, ['mem=1G'],
++ 'run_kernel() should not modify caller args')
++ self.assertIn('kunit.filter_glob=suite.test1', start_calls[0])
++ self.assertIn('kunit.filter_glob=suite.test2', start_calls[1])
++
+ def test_build_reconfig_no_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+--
+2.51.0
+
--- /dev/null
+From 65253b3bca9ba92be2d79acb1a20c4864c6a8d29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:56 +0100
+Subject: net: bridge: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit e5e890630533bdc15b26a34bb8e7ef539bdf1322 ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. Then, if neigh_suppress is enabled and an ICMPv6
+Neighbor Discovery packet reaches the bridge, br_do_suppress_nd() will
+dereference ipv6_stub->nd_tbl which is NULL, passing it to
+neigh_lookup(). This causes a kernel NULL pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000268
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x16/0xe0
+ [...]
+ Call Trace:
+ <IRQ>
+ ? neigh_lookup+0x16/0xe0
+ br_do_suppress_nd+0x160/0x290 [bridge]
+ br_handle_frame_finish+0x500/0x620 [bridge]
+ br_handle_frame+0x353/0x440 [bridge]
+ __netif_receive_skb_core.constprop.0+0x298/0x1110
+ __netif_receive_skb_one_core+0x3d/0xa0
+ process_backlog+0xa0/0x140
+ __napi_poll+0x2c/0x170
+ net_rx_action+0x2c4/0x3a0
+ handle_softirqs+0xd0/0x270
+ do_softirq+0x3f/0x60
+
+Fix this by replacing IS_ENABLED(IPV6) call with ipv6_mod_enabled() in
+the callers. This is in essence disabling NS/NA suppression when IPv6 is
+disabled.
+
+Fixes: ed842faeb2bd ("bridge: suppress nd pkts on BR_NEIGH_SUPPRESS ports")
+Reported-by: Guruprasad C P <gurucp2005@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAHXs0ORzd62QOG-Fttqa2Cx_A_VFp=utE2H2VTX5nqfgs7LDxQ@mail.gmail.com/
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260304120357.9778-1-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_device.c | 2 +-
+ net/bridge/br_input.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 26b79feb385d2..3768cc9c8ecb3 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -72,7 +72,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ br_do_proxy_suppress_arp(skb, br, vid, NULL);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 44459c9d2ce77..e22088b07e70b 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -165,7 +165,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ (skb->protocol == htons(ETH_P_ARP) ||
+ skb->protocol == htons(ETH_P_RARP))) {
+ br_do_proxy_suppress_arp(skb, br, vid, p);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+--
+2.51.0
+
--- /dev/null
+From 4475826c03c95888e109b039a350cc20800708b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 18:13:14 -0300
+Subject: net: dsa: realtek: rtl8365mb: fix rtl8365mb_phy_ocp_write return
+ value
+
+From: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+
+[ Upstream commit 7cbe98f7bef965241a5908d50d557008cf998aee ]
+
+Function rtl8365mb_phy_ocp_write() always returns 0, even when an error
+occurs during register access. This patch fixes the return value to
+propagate the actual error code from regmap operations.
+
+Link: https://lore.kernel.org/netdev/a2dfde3c-d46f-434b-9d16-1e251e449068@yahoo.com/
+Fixes: 2796728460b8 ("net: dsa: realtek: rtl8365mb: serialize indirect PHY register access")
+Signed-off-by: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Luiz Angelo Daros de Luca <luizluca@gmail.com>
+Reviewed-by: Linus Walleij <linusw@kernel.org>
+Link: https://patch.msgid.link/20260301-realtek_namiltd_fix1-v1-1-43a6bb707f9c@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/realtek/rtl8365mb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
+index ad7044b295ec1..74a8336174e50 100644
+--- a/drivers/net/dsa/realtek/rtl8365mb.c
++++ b/drivers/net/dsa/realtek/rtl8365mb.c
+@@ -769,7 +769,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
+ out:
+ rtl83xx_unlock(priv);
+
+- return 0;
++ return ret;
+ }
+
+ static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
+--
+2.51.0
+
--- /dev/null
+From 0638062b415a44e9afccc024fc9c39bbbc3a8566 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 18:56:39 +0100
+Subject: net: ethernet: mtk_eth_soc: Reset prog ptr to old_prog in case of
+ error in mtk_xdp_setup()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 0abc73c8a40fd64ac1739c90bb4f42c418d27a5e ]
+
+Reset eBPF program pointer to old_prog and do not decrease its ref-count
+if mtk_open routine in mtk_xdp_setup() fails.
+
+Fixes: 7c26c20da5d42 ("net: ethernet: mtk_eth_soc: add basic XDP support")
+Suggested-by: Paolo Valerio <pvalerio@redhat.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260303-mtk-xdp-prog-ptr-fix-v2-1-97b6dbbe240f@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index 64d86068b51eb..45d4bac984a52 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3566,12 +3566,21 @@ static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
++
++ if (netif_running(dev) && need_update) {
++ int err;
++
++ err = mtk_open(dev);
++ if (err) {
++ rcu_assign_pointer(eth->prog, old_prog);
++
++ return err;
++ }
++ }
++
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+- if (netif_running(dev) && need_update)
+- return mtk_open(dev);
+-
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 87879570d8ba3878b541bf4ea5a3e89e23d4594c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 23:43:59 +0530
+Subject: net: ethernet: ti: am65-cpsw-nuss/cpsw-ale: Fix multicast entry
+ handling in ALE table
+
+From: Chintan Vankar <c-vankar@ti.com>
+
+[ Upstream commit be11a537224d72b906db6b98510619770298c8a4 ]
+
+In the current implementation, flushing multicast entries in MAC mode
+incorrectly deletes entries for all ports instead of only the target port,
+disrupting multicast traffic on other ports. The cause is adding multicast
+entries by setting only host port bit, and not setting the MAC port bits.
+
+Fix this by setting the MAC port's bit in the port mask while adding the
+multicast entry. Also fix the flush logic to preserve the host port bit
+during removal of MAC port and free ALE entries when mask contains only
+host port.
+
+Fixes: 5c50a856d550 ("drivers: net: ethernet: cpsw: add multicast address to ALE table")
+Signed-off-by: Chintan Vankar <c-vankar@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260224181359.2055322-1-c-vankar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ drivers/net/ethernet/ti/cpsw_ale.c | 9 ++++-----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 6b5cff087686e..68049bb2bd989 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -290,7 +290,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+- port_mask = ALE_PORT_HOST;
++ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index dc5e247ca5d1a..a6bb09545c608 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -443,14 +443,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+- mask &= ~port_mask;
++ mask &= (~port_mask | ALE_PORT_HOST);
+
+- /* free if only remaining port is host port */
+- if (mask)
++ if (mask == 0x0 || mask == ALE_PORT_HOST)
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+- else
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+--
+2.51.0
+
--- /dev/null
+From 7c9ceae0496b504826b2d1219268ab8858160d67 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 14:02:47 +0800
+Subject: net: ipv4: fix ARM64 alignment fault in multipath hash seed
+
+From: Yung Chih Su <yuuchihsu@gmail.com>
+
+[ Upstream commit 4ee7fa6cf78ff26d783d39e2949d14c4c1cd5e7f ]
+
+`struct sysctl_fib_multipath_hash_seed` contains two u32 fields
+(user_seed and mp_seed), making it an 8-byte structure with a 4-byte
+alignment requirement.
+
+In `fib_multipath_hash_from_keys()`, the code evaluates the entire
+struct atomically via `READ_ONCE()`:
+
+ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
+
+While this silently works on GCC by falling back to unaligned regular
+loads which the ARM64 kernel tolerates, it causes a fatal kernel panic
+when compiled with Clang and LTO enabled.
+
+Commit e35123d83ee3 ("arm64: lto: Strengthen READ_ONCE() to acquire
+when CONFIG_LTO=y") strengthens `READ_ONCE()` to use Load-Acquire
+instructions (`ldar` / `ldapr`) to prevent compiler reordering bugs
+under Clang LTO. Since the macro evaluates the full 8-byte struct,
+Clang emits a 64-bit `ldar` instruction. ARM64 architecture strictly
+requires `ldar` to be naturally aligned, thus executing it on a 4-byte
+aligned address triggers a strict Alignment Fault (FSC = 0x21).
+
+Fix the read side by moving the `READ_ONCE()` directly to the `u32`
+member, which emits a safe 32-bit `ldar Wn`.
+
+Furthermore, Eric Dumazet pointed out that `WRITE_ONCE()` on the entire
+struct in `proc_fib_multipath_hash_set_seed()` is also flawed. Analysis
+shows that Clang splits this 8-byte write into two separate 32-bit
+`str` instructions. While this avoids an alignment fault, it destroys
+atomicity and exposes a tear-write vulnerability. Fix this by
+explicitly splitting the write into two 32-bit `WRITE_ONCE()`
+operations.
+
+Finally, add the missing `READ_ONCE()` when reading `user_seed` in
+`proc_fib_multipath_hash_seed()` to ensure proper pairing and
+concurrency safety.
+
+Fixes: 4ee2a8cace3f ("net: ipv4: Add a sysctl to set multipath hash seed")
+Signed-off-by: Yung Chih Su <yuuchihsu@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20260302060247.7066-1-yuuchihsu@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/ip_fib.h | 2 +-
+ net/ipv4/sysctl_net_ipv4.c | 5 +++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index 967e4dc555fac..339b92cd5cec6 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -544,7 +544,7 @@ static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ siphash_aligned_key_t hash_key;
+ u32 mp_seed;
+
+- mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
++ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.mp_seed);
+ fib_multipath_hash_construct_key(&hash_key, mp_seed);
+
+ return flow_hash_from_keys_seed(keys, &hash_key);
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index a79b2a52ce01e..8d411cce0aedc 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -481,7 +481,8 @@ static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
+ proc_fib_multipath_hash_rand_seed),
+ };
+
+- WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new);
++ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.user_seed, new.user_seed);
++ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.mp_seed, new.mp_seed);
+ }
+
+ static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write,
+@@ -495,7 +496,7 @@ static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write
+ int ret;
+
+ mphs = &net->ipv4.sysctl_fib_multipath_hash_seed;
+- user_seed = mphs->user_seed;
++ user_seed = READ_ONCE(mphs->user_seed);
+
+ tmp = *table;
+ tmp.data = &user_seed;
+--
+2.51.0
+
--- /dev/null
+From 4855ac5b19bb5732136441a8b9c24fdc88b73fb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 19:38:13 +0800
+Subject: net: ipv6: fix panic when IPv4 route references loopback IPv6 nexthop
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 21ec92774d1536f71bdc90b0e3d052eff99cf093 ]
+
+When a standalone IPv6 nexthop object is created with a loopback device
+(e.g., "ip -6 nexthop add id 100 dev lo"), fib6_nh_init() misclassifies
+it as a reject route. This is because nexthop objects have no destination
+prefix (fc_dst=::), causing fib6_is_reject() to match any loopback
+nexthop. The reject path skips fib_nh_common_init(), leaving
+nhc_pcpu_rth_output unallocated. If an IPv4 route later references this
+nexthop, __mkroute_output() dereferences NULL nhc_pcpu_rth_output and
+panics.
+
+Simplify the check in fib6_nh_init() to only match explicit reject
+routes (RTF_REJECT) instead of using fib6_is_reject(). The loopback
+promotion heuristic in fib6_is_reject() is handled separately by
+ip6_route_info_create_nh(). After this change, the three cases behave
+as follows:
+
+1. Explicit reject route ("ip -6 route add unreachable 2001:db8::/64"):
+ RTF_REJECT is set, enters reject path, skips fib_nh_common_init().
+ No behavior change.
+
+2. Implicit loopback reject route ("ip -6 route add 2001:db8::/32 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. ip6_route_info_create_nh() still promotes it to reject
+ afterward. nhc_pcpu_rth_output is allocated but unused, which is
+ harmless.
+
+3. Standalone nexthop object ("ip -6 nexthop add id 100 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. nhc_pcpu_rth_output is properly allocated, fixing the crash
+ when IPv4 routes reference this nexthop.
+
+Suggested-by: Ido Schimmel <idosch@nvidia.com>
+Fixes: 493ced1ac47c ("ipv4: Allow routes to use nexthop objects")
+Reported-by: syzbot+334190e097a98a1b81bb@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698f8482.a70a0220.2c38d7.00ca.GAE@google.com/T/
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260304113817.294966-2-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 0f741aa154faf..7b9279d4c363c 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3577,7 +3577,6 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
+ struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
+- int addr_type;
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
+@@ -3619,11 +3618,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+
+ fib6_nh->fib_nh_weight = 1;
+
+- /* We cannot add true routes via loopback here,
+- * they would result in kernel looping; promote them to reject routes
++ /* Reset the nexthop device to the loopback device in case of reject
++ * routes.
+ */
+- addr_type = ipv6_addr_type(&cfg->fc_dst);
+- if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
++ if (cfg->fc_flags & RTF_REJECT) {
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != net->loopback_dev) {
+ if (dev) {
+--
+2.51.0
+
--- /dev/null
+From 1eb5b3a268ff44edfe46300e569c44058b0f0c75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 18:32:37 +0200
+Subject: net: nfc: nci: Fix zero-length proprietary notifications
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit f7d92f11bd33a6eb49c7c812255ef4ab13681f0f ]
+
+NCI NFC controllers may have proprietary OIDs with zero-length payload.
+One example is: drivers/nfc/nxp-nci/core.c, NXP_NCI_RF_TXLDO_ERROR_NTF.
+
+Allow a zero length payload in proprietary notifications *only*.
+
+Before:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+-- >8 --
+
+After:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x23, plen=0
+kernel: nci: nci_ntf_packet: unknown ntf opcode 0x123
+kernel: nfc nfc0: NFC: RF transmitter couldn't start. Bad power and/or configuration?
+-- >8 --
+
+After fixing the hardware:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 27
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x5, plen=24
+kernel: nci: nci_rf_intf_activated_ntf_packet: rf_discovery_id 1
+-- >8 --
+
+Fixes: d24b03535e5e ("nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260302163238.140576-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 1bdaf680b488c..3c42b149c729c 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1471,10 +1471,20 @@ static bool nci_valid_size(struct sk_buff *skb)
+ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
+
+ if (skb->len < hdr_size ||
+- !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
++
++ if (!nci_plen(skb->data)) {
++ /* Allow zero length in proprietary notifications (0x20 - 0x3F). */
++ if (nci_opcode_oid(nci_opcode(skb->data)) >= 0x20 &&
++ nci_mt(skb->data) == NCI_MT_NTF_PKT)
++ return true;
++
++ /* Disallow zero length otherwise. */
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From d8f82a895b36746bb0258e17a059f7b08d7aad9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 13:23:36 -0700
+Subject: net/rds: Fix circular locking dependency in rds_tcp_tune
+
+From: Allison Henderson <achender@kernel.org>
+
+[ Upstream commit 6a877ececd6daa002a9a0002cd0fbca6592a9244 ]
+
+syzbot reported a circular locking dependency in rds_tcp_tune() where
+sk_net_refcnt_upgrade() is called while holding the socket lock:
+
+======================================================
+WARNING: possible circular locking dependency detected
+======================================================
+kworker/u10:8/15040 is trying to acquire lock:
+ffffffff8e9aaf80 (fs_reclaim){+.+.}-{0:0},
+at: __kmalloc_cache_noprof+0x4b/0x6f0
+
+but task is already holding lock:
+ffff88805a3c1ce0 (k-sk_lock-AF_INET6){+.+.}-{0:0},
+at: rds_tcp_tune+0xd7/0x930
+
+The issue occurs because sk_net_refcnt_upgrade() performs memory
+allocation (via get_net_track() -> ref_tracker_alloc()) while the
+socket lock is held, creating a circular dependency with fs_reclaim.
+
+Fix this by moving sk_net_refcnt_upgrade() outside the socket lock
+critical section. This is safe because the fields modified by the
+sk_net_refcnt_upgrade() call (sk_net_refcnt, ns_tracker) are not
+accessed by any concurrent code path at this point.
+
+v2:
+ - Corrected fixes tag
+ - check patch line wrap nits
+ - ai commentary nits
+
+Reported-by: syzbot+2e2cf5331207053b8106@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2e2cf5331207053b8106
+Fixes: 3a58f13a881e ("net: rds: acquire refcount on TCP sockets")
+Signed-off-by: Allison Henderson <achender@kernel.org>
+Link: https://patch.msgid.link/20260227202336.167757-1-achender@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rds/tcp.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 3cc2f303bf786..b66dfcc3efaa0 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -495,18 +495,24 @@ bool rds_tcp_tune(struct socket *sock)
+ struct rds_tcp_net *rtn;
+
+ tcp_sock_set_nodelay(sock->sk);
+- lock_sock(sk);
+ /* TCP timer functions might access net namespace even after
+ * a process which created this net namespace terminated.
+ */
+ if (!sk->sk_net_refcnt) {
+- if (!maybe_get_net(net)) {
+- release_sock(sk);
++ if (!maybe_get_net(net))
+ return false;
+- }
++ /*
++ * sk_net_refcnt_upgrade() must be called before lock_sock()
++ * because it does a GFP_KERNEL allocation, which can trigger
++ * fs_reclaim and create a circular lock dependency with the
++ * socket lock. The fields it modifies (sk_net_refcnt,
++ * ns_tracker) are not accessed by any concurrent code path
++ * at this point.
++ */
+ sk_net_refcnt_upgrade(sk);
+ put_net(net);
+ }
++ lock_sock(sk);
+ rtn = net_generic(net, rds_tcp_netid);
+ if (rtn->sndbuf_size > 0) {
+ sk->sk_sndbuf = rtn->sndbuf_size;
+--
+2.51.0
+
--- /dev/null
+From dcd07f0de5a5de3cd00f470948dd738e09f206dc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 09:06:02 -0500
+Subject: net/sched: act_ife: Fix metalist update behavior
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit e2cedd400c3ec0302ffca2490e8751772906ac23 ]
+
+Whenever an ife action replace changes the metalist, instead of
+replacing the old data on the metalist, the current ife code is appending
+the new metadata. Aside from being innapropriate behavior, this may lead
+to an unbounded addition of metadata to the metalist which might cause an
+out of bounds error when running the encode op:
+
+[ 138.423369][ C1] ==================================================================
+[ 138.424317][ C1] BUG: KASAN: slab-out-of-bounds in ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.424906][ C1] Write of size 4 at addr ffff8880077f4ffe by task ife_out_out_bou/255
+[ 138.425778][ C1] CPU: 1 UID: 0 PID: 255 Comm: ife_out_out_bou Not tainted 7.0.0-rc1-00169-gfbdfa8da05b6 #624 PREEMPT(full)
+[ 138.425795][ C1] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+[ 138.425800][ C1] Call Trace:
+[ 138.425804][ C1] <IRQ>
+[ 138.425808][ C1] dump_stack_lvl (lib/dump_stack.c:122)
+[ 138.425828][ C1] print_report (mm/kasan/report.c:379 mm/kasan/report.c:482)
+[ 138.425839][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425844][ C1] ? __virt_addr_valid (./arch/x86/include/asm/preempt.h:95 (discriminator 1) ./include/linux/rcupdate.h:975 (discriminator 1) ./include/linux/mmzone.h:2207 (discriminator 1) arch/x86/mm/physaddr.c:54 (discriminator 1))
+[ 138.425853][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425859][ C1] kasan_report (mm/kasan/report.c:221 mm/kasan/report.c:597)
+[ 138.425868][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425878][ C1] kasan_check_range (mm/kasan/generic.c:186 (discriminator 1) mm/kasan/generic.c:200 (discriminator 1))
+[ 138.425884][ C1] __asan_memset (mm/kasan/shadow.c:84 (discriminator 2))
+[ 138.425889][ C1] ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425893][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:171)
+[ 138.425898][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425903][ C1] ife_encode_meta_u16 (net/sched/act_ife.c:57)
+[ 138.425910][ C1] ? __pfx_do_raw_spin_lock (kernel/locking/spinlock_debug.c:114)
+[ 138.425916][ C1] ? __asan_memcpy (mm/kasan/shadow.c:105 (discriminator 3))
+[ 138.425921][ C1] ? __pfx_ife_encode_meta_u16 (net/sched/act_ife.c:45)
+[ 138.425927][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425931][ C1] tcf_ife_act (net/sched/act_ife.c:847 net/sched/act_ife.c:879)
+
+To solve this issue, fix the replace behavior by adding the metalist to
+the ife rcu data structure.
+
+Fixes: aa9fd9a325d51 ("sched: act: ife: update parameters via rcu handling")
+Reported-by: Ruitong Liu <cnitlrt@gmail.com>
+Tested-by: Ruitong Liu <cnitlrt@gmail.com>
+Co-developed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20260304140603.76500-1-jhs@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tc_act/tc_ife.h | 4 +-
+ net/sched/act_ife.c | 93 ++++++++++++++++++-------------------
+ 2 files changed, 45 insertions(+), 52 deletions(-)
+
+diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
+index c7f24a2da1cad..24d4d5a62b3c2 100644
+--- a/include/net/tc_act/tc_ife.h
++++ b/include/net/tc_act/tc_ife.h
+@@ -13,15 +13,13 @@ struct tcf_ife_params {
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+-
++ struct list_head metalist;
+ struct rcu_head rcu;
+ };
+
+ struct tcf_ife_info {
+ struct tc_action common;
+ struct tcf_ife_params __rcu *params;
+- /* list of metaids allowed */
+- struct list_head metalist;
+ };
+ #define to_ife(a) ((struct tcf_ife_info *)a)
+
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 8e8f6af731d51..4ad01d4e820db 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -293,8 +293,8 @@ static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
+ /* called when adding new meta information
+ */
+ static int __add_metainfo(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool atomic, bool exists)
++ struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len, bool atomic)
+ {
+ struct tcf_meta_info *mi = NULL;
+ int ret = 0;
+@@ -313,45 +313,40 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
+ }
+ }
+
+- if (exists)
+- spin_lock_bh(&ife->tcf_lock);
+- list_add_tail(&mi->metalist, &ife->metalist);
+- if (exists)
+- spin_unlock_bh(&ife->tcf_lock);
++ list_add_tail(&mi->metalist, &p->metalist);
+
+ return ret;
+ }
+
+ static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid,
+- bool exists)
++ struct tcf_ife_params *p, u32 metaid)
+ {
+ int ret;
+
+ if (!try_module_get(ops->owner))
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
++ ret = __add_metainfo(ops, p, metaid, NULL, 0, true);
+ if (ret)
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool exists)
++static int add_metainfo(struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len)
+ {
+ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret;
+
+ if (!ops)
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
++ ret = __add_metainfo(ops, p, metaid, metaval, len, false);
+ if (ret)
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
++static int use_all_metadata(struct tcf_ife_params *p)
+ {
+ struct tcf_meta_ops *o;
+ int rc = 0;
+@@ -359,7 +354,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+
+ read_lock(&ife_mod_lock);
+ list_for_each_entry(o, &ifeoplist, list) {
+- rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
++ rc = add_metainfo_and_get_ops(o, p, o->metaid);
+ if (rc == 0)
+ installed += 1;
+ }
+@@ -371,7 +366,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+ return -EINVAL;
+ }
+
+-static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int dump_metalist(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+ struct tcf_meta_info *e;
+ struct nlattr *nest;
+@@ -379,14 +374,14 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ int total_encoded = 0;
+
+ /*can only happen on decode */
+- if (list_empty(&ife->metalist))
++ if (list_empty(&p->metalist))
+ return 0;
+
+ nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
+ if (!nest)
+ goto out_nlmsg_trim;
+
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry(e, &p->metalist, metalist) {
+ if (!e->ops->get(skb, e))
+ total_encoded += 1;
+ }
+@@ -403,13 +398,11 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ return -1;
+ }
+
+-/* under ife->tcf_lock */
+-static void _tcf_ife_cleanup(struct tc_action *a)
++static void __tcf_ife_cleanup(struct tcf_ife_params *p)
+ {
+- struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_meta_info *e, *n;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_safe(e, n, &p->metalist, metalist) {
+ list_del(&e->metalist);
+ if (e->metaval) {
+ if (e->ops->release)
+@@ -422,18 +415,23 @@ static void _tcf_ife_cleanup(struct tc_action *a)
+ }
+ }
+
++static void tcf_ife_cleanup_params(struct rcu_head *head)
++{
++ struct tcf_ife_params *p = container_of(head, struct tcf_ife_params,
++ rcu);
++
++ __tcf_ife_cleanup(p);
++ kfree(p);
++}
++
+ static void tcf_ife_cleanup(struct tc_action *a)
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_ife_params *p;
+
+- spin_lock_bh(&ife->tcf_lock);
+- _tcf_ife_cleanup(a);
+- spin_unlock_bh(&ife->tcf_lock);
+-
+ p = rcu_dereference_protected(ife->params, 1);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+ }
+
+ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+@@ -455,8 +453,7 @@ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+ return 0;
+ }
+
+-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+- bool exists, bool rtnl_held)
++static int populate_metalist(struct tcf_ife_params *p, struct nlattr **tb)
+ {
+ int len = 0;
+ int rc = 0;
+@@ -468,7 +465,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ val = nla_data(tb[i]);
+ len = nla_len(tb[i]);
+
+- rc = add_metainfo(ife, i, val, len, exists);
++ rc = add_metainfo(p, i, val, len);
+ if (rc)
+ return rc;
+ }
+@@ -523,6 +520,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
++ INIT_LIST_HEAD(&p->metalist);
+
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+@@ -567,8 +565,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ ife = to_ife(*a);
+- if (ret == ACT_P_CREATED)
+- INIT_LIST_HEAD(&ife->metalist);
+
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+@@ -600,8 +596,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ if (tb[TCA_IFE_METALST]) {
+- err = populate_metalist(ife, tb2, exists,
+- !(flags & TCA_ACT_FLAGS_NO_RTNL));
++ err = populate_metalist(p, tb2);
+ if (err)
+ goto metadata_parse_err;
+ } else {
+@@ -610,7 +605,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ * as we can. You better have at least one else we are
+ * going to bail out
+ */
+- err = use_all_metadata(ife, exists);
++ err = use_all_metadata(p);
+ if (err)
+ goto metadata_parse_err;
+ }
+@@ -626,13 +621,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+
+ return ret;
+ metadata_parse_err:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ release_idr:
++ __tcf_ife_cleanup(p);
+ kfree(p);
+ tcf_idr_release(*a, bind);
+ return err;
+@@ -679,7 +675,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
+ goto nla_put_failure;
+
+- if (dump_metalist(skb, ife)) {
++ if (dump_metalist(skb, p)) {
+ /*ignore failure to dump metalist */
+ pr_info("Failed to dump metalist\n");
+ }
+@@ -693,13 +689,13 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ return -1;
+ }
+
+-static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
++static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_params *p,
+ u16 metaid, u16 mlen, void *mdata)
+ {
+ struct tcf_meta_info *e;
+
+ /* XXX: use hash to speed up */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (metaid == e->metaid) {
+ if (e->ops) {
+ /* We check for decode presence already */
+@@ -716,10 +712,13 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ int action = ife->tcf_action;
++ struct tcf_ife_params *p;
+ u8 *ifehdr_end;
+ u8 *tlv_data;
+ u16 metalen;
+
++ p = rcu_dereference_bh(ife->params);
++
+ bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+ tcf_lastuse_update(&ife->tcf_tm);
+
+@@ -745,7 +744,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ return TC_ACT_SHOT;
+ }
+
+- if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
++ if (find_decode_metaid(skb, p, mtype, dlen, curr_data)) {
+ /* abuse overlimits to count when we receive metadata
+ * but dont have an ops for it
+ */
+@@ -769,12 +768,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ /*XXX: check if we can do this at install time instead of current
+ * send data path
+ **/
+-static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+- struct tcf_meta_info *e, *n;
++ struct tcf_meta_info *e;
+ int tot_run_sz = 0, run_sz = 0;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->check_presence) {
+ run_sz = e->ops->check_presence(skb, e);
+ tot_run_sz += run_sz;
+@@ -795,7 +794,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
+ where ORIGDATA = original ethernet header ...
+ */
+- u16 metalen = ife_get_sz(skb, ife);
++ u16 metalen = ife_get_sz(skb, p);
+ int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
+ unsigned int skboff = 0;
+ int new_len = skb->len + hdrm;
+@@ -833,25 +832,21 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ if (!ife_meta)
+ goto drop;
+
+- spin_lock(&ife->tcf_lock);
+-
+ /* XXX: we dont have a clever way of telling encode to
+ * not repeat some of the computations that are done by
+ * ops->presence_check...
+ */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->encode) {
+ err = e->ops->encode(skb, (void *)(ife_meta + skboff),
+ e);
+ }
+ if (err < 0) {
+ /* too corrupt to keep around if overwritten */
+- spin_unlock(&ife->tcf_lock);
+ goto drop;
+ }
+ skboff += err;
+ }
+- spin_unlock(&ife->tcf_lock);
+ oethh = (struct ethhdr *)skb->data;
+
+ if (!is_zero_ether_addr(p->eth_src))
+--
+2.51.0
+
--- /dev/null
+From 81432b109537b5fa81caa6309cbaba4d008b449a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Feb 2026 23:53:07 +0900
+Subject: net: sched: avoid qdisc_reset_all_tx_gt() vs dequeue race for
+ lockless qdiscs
+
+From: Koichiro Den <den@valinux.co.jp>
+
+[ Upstream commit 7f083faf59d14c04e01ec05a7507f036c965acf8 ]
+
+When shrinking the number of real tx queues,
+netif_set_real_num_tx_queues() calls qdisc_reset_all_tx_gt() to flush
+qdiscs for queues which will no longer be used.
+
+qdisc_reset_all_tx_gt() currently serializes qdisc_reset() with
+qdisc_lock(). However, for lockless qdiscs, the dequeue path is
+serialized by qdisc_run_begin/end() using qdisc->seqlock instead, so
+qdisc_reset() can run concurrently with __qdisc_run() and free skbs
+while they are still being dequeued, leading to UAF.
+
+This can easily be reproduced on e.g. virtio-net by imposing heavy
+traffic while frequently changing the number of queue pairs:
+
+ iperf3 -ub0 -c $peer -t 0 &
+ while :; do
+ ethtool -L eth0 combined 1
+ ethtool -L eth0 combined 2
+ done
+
+With KASAN enabled, this leads to reports like:
+
+ BUG: KASAN: slab-use-after-free in __qdisc_run+0x133f/0x1760
+ ...
+ Call Trace:
+ <TASK>
+ ...
+ __qdisc_run+0x133f/0x1760
+ __dev_queue_xmit+0x248f/0x3550
+ ip_finish_output2+0xa42/0x2110
+ ip_output+0x1a7/0x410
+ ip_send_skb+0x2e6/0x480
+ udp_send_skb+0xb0a/0x1590
+ udp_sendmsg+0x13c9/0x1fc0
+ ...
+ </TASK>
+
+ Allocated by task 1270 on cpu 5 at 44.558414s:
+ ...
+ alloc_skb_with_frags+0x84/0x7c0
+ sock_alloc_send_pskb+0x69a/0x830
+ __ip_append_data+0x1b86/0x48c0
+ ip_make_skb+0x1e8/0x2b0
+ udp_sendmsg+0x13a6/0x1fc0
+ ...
+
+ Freed by task 1306 on cpu 3 at 44.558445s:
+ ...
+ kmem_cache_free+0x117/0x5e0
+ pfifo_fast_reset+0x14d/0x580
+ qdisc_reset+0x9e/0x5f0
+ netif_set_real_num_tx_queues+0x303/0x840
+ virtnet_set_channels+0x1bf/0x260 [virtio_net]
+ ethnl_set_channels+0x684/0xae0
+ ethnl_default_set_doit+0x31a/0x890
+ ...
+
+Serialize qdisc_reset_all_tx_gt() against the lockless dequeue path by
+taking qdisc->seqlock for TCQ_F_NOLOCK qdiscs, matching the
+serialization model already used by dev_reset_queue().
+
+Additionally clear QDISC_STATE_NON_EMPTY after reset so the qdisc state
+reflects an empty queue, avoiding needless re-scheduling.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Signed-off-by: Koichiro Den <den@valinux.co.jp>
+Link: https://patch.msgid.link/20260228145307.3955532-1-den@valinux.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 1e002b1dea629..75a0d6095d2eb 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -758,13 +758,23 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+ {
+ struct Qdisc *qdisc;
++ bool nolock;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ if (qdisc) {
++ nolock = qdisc->flags & TCQ_F_NOLOCK;
++
++ if (nolock)
++ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
++ if (nolock) {
++ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
++ spin_unlock_bh(&qdisc->seqlock);
++ }
+ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 8da9cf5322808b32c4e86b505c8d85a18aafecaf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:25 +0000
+Subject: net: stmmac: Fix error handling in VLAN add and delete paths
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 35dfedce442c4060cfe5b98368bc9643fb995716 ]
+
+stmmac_vlan_rx_add_vid() updates active_vlans and the VLAN hash
+register before writing the HW filter entry. If the filter write
+fails, it leaves a stale VID in active_vlans and the hash register.
+
+stmmac_vlan_rx_kill_vid() has the reverse problem: it clears
+active_vlans before removing the HW filter. On failure, the VID is
+gone from active_vlans but still present in the HW filter table.
+
+To fix this, reorder the operations to update the hash table first,
+then attempt the HW filter operation. If the HW filter fails, roll
+back both the active_vlans bitmap and the hash table by calling
+stmmac_vlan_update() again.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-2-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 112287a6e9ab9..396216633149d 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6679,9 +6679,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ clear_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto err_pm_put;
++ }
+ }
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6702,15 +6706,21 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
++ ret = stmmac_vlan_update(priv, is_double);
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ goto del_vlan_error;
++ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto del_vlan_error;
++ }
+ }
+
+- ret = stmmac_vlan_update(priv, is_double);
+-
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+--
+2.51.0
+
--- /dev/null
+From aae9eb1ac6d9b7c72ca172f01c85f64ce7da9323 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 15:53:56 +0530
+Subject: net: ti: icssg-prueth: Fix ping failure after offload mode setup when
+ link speed is not 1G
+
+From: MD Danish Anwar <danishanwar@ti.com>
+
+[ Upstream commit 147792c395db870756a0dc87ce656c75ae7ab7e8 ]
+
+When both eth interfaces with links up are added to a bridge or hsr
+interface, ping fails if the link speed is not 1Gbps (e.g., 100Mbps).
+
+The issue is seen because when switching to offload (bridge/hsr) mode,
+prueth_emac_restart() restarts the firmware and clears DRAM with
+memset_io(), setting all memory to 0. This includes PORT_LINK_SPEED_OFFSET
+which firmware reads for link speed. The value 0 corresponds to
+FW_LINK_SPEED_1G (0x00), so for 1Gbps links the default value is correct
+and ping works. For 100Mbps links, the firmware needs FW_LINK_SPEED_100M
+(0x01) but gets 0 instead, causing ping to fail. The function
+emac_adjust_link() is called to reconfigure, but it detects no state change
+(emac->link is still 1, speed/duplex match PHY) so new_state remains false
+and icssg_config_set_speed() is never called to correct the firmware speed
+value.
+
+The fix resets emac->link to 0 before calling emac_adjust_link() in
+prueth_emac_common_start(). This forces new_state=true, ensuring
+icssg_config_set_speed() is called to write the correct speed value to
+firmware memory.
+
+Fixes: 06feac15406f ("net: ti: icssg-prueth: Fix emac link speed handling")
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Link: https://patch.msgid.link/20260226102356.2141871-1-danishanwar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/icssg/icssg_prueth.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index 055c5765bd861..5e1133c322a7d 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -307,6 +307,14 @@ static int prueth_emac_common_start(struct prueth *prueth)
+ if (ret)
+ goto disable_class;
+
++ /* Reset link state to force reconfiguration in
++ * emac_adjust_link(). Without this, if the link was already up
++ * before restart, emac_adjust_link() won't detect any state
++ * change and will skip critical configuration like writing
++ * speed to firmware.
++ */
++ emac->link = 0;
++
+ mutex_lock(&emac->ndev->phydev->lock);
+ emac_adjust_link(emac->ndev);
+ mutex_unlock(&emac->ndev->phydev->lock);
+--
+2.51.0
+
--- /dev/null
+From f122d32e023fda8941f63e09369e77f471521454 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:57 +0100
+Subject: net: vxlan: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit 168ff39e4758897d2eee4756977d036d52884c7e ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. If an IPv6 packet is injected into the interface,
+route_shortcircuit() is called and a NULL pointer dereference happens on
+neigh_lookup().
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000380
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x20/0x270
+ [...]
+ Call Trace:
+ <TASK>
+ vxlan_xmit+0x638/0x1ef0 [vxlan]
+ dev_hard_start_xmit+0x9e/0x2e0
+ __dev_queue_xmit+0xbee/0x14e0
+ packet_sendmsg+0x116f/0x1930
+ __sys_sendto+0x1f5/0x200
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x12f/0x1590
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by adding an early check on route_shortcircuit() when protocol
+is ETH_P_IPV6. Note that ipv6_mod_enabled() cannot be used here because
+VXLAN can be built-in even when IPv6 is built as a module.
+
+Fixes: e15a00aafa4b ("vxlan: add ipv6 route short circuit support")
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Link: https://patch.msgid.link/20260304120357.9778-2-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index c78451ed06ecc..2dbd7772363be 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2153,6 +2153,11 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct ipv6hdr *pip6;
+
++ /* check if nd_tbl is not initiliazed due to
++ * ipv6.disable=1 set during boot
++ */
++ if (!ipv6_stub->nd_tbl)
++ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+--
+2.51.0
+
--- /dev/null
+From 7a0096a4cf0040c4a2c21c769b67302d15c9c1c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 01:56:40 +0000
+Subject: net_sched: sch_fq: clear q->band_pkt_count[] in fq_reset()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a4c2b8be2e5329e7fac6e8f64ddcb8958155cfcb ]
+
+When/if a NIC resets, queues are deactivated by dev_deactivate_many(),
+then reactivated when the reset operation completes.
+
+fq_reset() removes all the skbs from various queues.
+
+If we do not clear q->band_pkt_count[], these counters keep growing
+and can eventually reach sch->limit, preventing new packets to be queued.
+
+Many thanks to Praveen for discovering the root cause.
+
+Fixes: 29f834aa326e ("net_sched: sch_fq: add 3 bands and WRR scheduling")
+Diagnosed-by: Praveen Kaligineedi <pkaligineedi@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Neal Cardwell <ncardwell@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20260304015640.961780-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_fq.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 1af9768cd8ff6..682daf79af373 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -815,6 +815,7 @@ static void fq_reset(struct Qdisc *sch)
+ for (idx = 0; idx < FQ_BANDS; idx++) {
+ q->band_flows[idx].new_flows.first = NULL;
+ q->band_flows[idx].old_flows.first = NULL;
++ q->band_pkt_count[idx] = 0;
+ }
+ q->delayed = RB_ROOT;
+ q->flows = 0;
+--
+2.51.0
+
--- /dev/null
+From a9701ccc35ae1e742c1e1cdb3155213196d9786c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 16:31:32 +0100
+Subject: netfilter: nft_set_pipapo: split gc into unlink and reclaim phase
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 9df95785d3d8302f7c066050117b04cd3c2048c2 ]
+
+Yiming Qian reports Use-after-free in the pipapo set type:
+ Under a large number of expired elements, commit-time GC can run for a very
+ long time in a non-preemptible context, triggering soft lockup warnings and
+ RCU stall reports (local denial of service).
+
+We must split GC in an unlink and a reclaim phase.
+
+We cannot queue elements for freeing until pointers have been swapped.
+Expired elements are still exposed to both the packet path and userspace
+dumpers via the live copy of the data structure.
+
+call_rcu() does not protect us: dump operations or element lookups starting
+after call_rcu has fired can still observe the free'd element, unless the
+commit phase has made enough progress to swap the clone and live pointers
+before any new reader has picked up the old version.
+
+This a similar approach as done recently for the rbtree backend in commit
+35f83a75529a ("netfilter: nft_set_rbtree: don't gc elements on insert").
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Reported-by: Yiming Qian <yimingqian591@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 5 +++
+ net/netfilter/nf_tables_api.c | 5 ---
+ net/netfilter/nft_set_pipapo.c | 51 ++++++++++++++++++++++++++-----
+ net/netfilter/nft_set_pipapo.h | 2 ++
+ 4 files changed, 50 insertions(+), 13 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index d440583aa4b24..79296ed87b9b3 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1851,6 +1851,11 @@ struct nft_trans_gc {
+ struct rcu_head rcu;
+ };
+
++static inline int nft_trans_gc_space(const struct nft_trans_gc *trans)
++{
++ return NFT_TRANS_GC_BATCHCOUNT - trans->count;
++}
++
+ static inline void nft_ctx_update(struct nft_ctx *ctx,
+ const struct nft_trans *trans)
+ {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 8dccd3598166b..c1b9b00907bbb 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10151,11 +10151,6 @@ static void nft_trans_gc_queue_work(struct nft_trans_gc *trans)
+ schedule_work(&trans_gc_work);
+ }
+
+-static int nft_trans_gc_space(struct nft_trans_gc *trans)
+-{
+- return NFT_TRANS_GC_BATCHCOUNT - trans->count;
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp)
+ {
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 642152e9c3227..ab5045bf3e599 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1667,11 +1667,11 @@ static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
+ }
+
+ /**
+- * pipapo_gc() - Drop expired entries from set, destroy start and end elements
++ * pipapo_gc_scan() - Drop expired entries from set and link them to gc list
+ * @set: nftables API set representation
+ * @m: Matching data
+ */
+-static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
++static void pipapo_gc_scan(struct nft_set *set, struct nft_pipapo_match *m)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct net *net = read_pnet(&set->net);
+@@ -1684,6 +1684,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ if (!gc)
+ return;
+
++ list_add(&gc->list, &priv->gc_head);
++
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ const struct nft_pipapo_field *f;
+@@ -1711,9 +1713,13 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ * NFT_SET_ELEM_DEAD_BIT.
+ */
+ if (__nft_set_elem_expired(&e->ext, tstamp)) {
+- gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
+- if (!gc)
+- return;
++ if (!nft_trans_gc_space(gc)) {
++ gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
++ if (!gc)
++ return;
++
++ list_add(&gc->list, &priv->gc_head);
++ }
+
+ nft_pipapo_gc_deactivate(net, set, e);
+ pipapo_drop(m, rulemap);
+@@ -1727,10 +1733,30 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ }
+ }
+
+- gc = nft_trans_gc_catchall_sync(gc);
++ priv->last_gc = jiffies;
++}
++
++/**
++ * pipapo_gc_queue() - Free expired elements
++ * @set: nftables API set representation
++ */
++static void pipapo_gc_queue(struct nft_set *set)
++{
++ struct nft_pipapo *priv = nft_set_priv(set);
++ struct nft_trans_gc *gc, *next;
++
++ /* always do a catchall cycle: */
++ gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
+ if (gc) {
++ gc = nft_trans_gc_catchall_sync(gc);
++ if (gc)
++ nft_trans_gc_queue_sync_done(gc);
++ }
++
++ /* always purge queued gc elements. */
++ list_for_each_entry_safe(gc, next, &priv->gc_head, list) {
++ list_del(&gc->list);
+ nft_trans_gc_queue_sync_done(gc);
+- priv->last_gc = jiffies;
+ }
+ }
+
+@@ -1784,6 +1810,10 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+ *
+ * We also need to create a new working copy for subsequent insertions and
+ * deletions.
++ *
++ * After the live copy has been replaced by the clone, we can safely queue
++ * expired elements that have been collected by pipapo_gc_scan() for
++ * memory reclaim.
+ */
+ static void nft_pipapo_commit(struct nft_set *set)
+ {
+@@ -1794,7 +1824,7 @@ static void nft_pipapo_commit(struct nft_set *set)
+ return;
+
+ if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
+- pipapo_gc(set, priv->clone);
++ pipapo_gc_scan(set, priv->clone);
+
+ old = rcu_replace_pointer(priv->match, priv->clone,
+ nft_pipapo_transaction_mutex_held(set));
+@@ -1802,6 +1832,8 @@ static void nft_pipapo_commit(struct nft_set *set)
+
+ if (old)
+ call_rcu(&old->rcu, pipapo_reclaim_match);
++
++ pipapo_gc_queue(set);
+ }
+
+ static void nft_pipapo_abort(const struct nft_set *set)
+@@ -2259,6 +2291,7 @@ static int nft_pipapo_init(const struct nft_set *set,
+ f->mt = NULL;
+ }
+
++ INIT_LIST_HEAD(&priv->gc_head);
+ rcu_assign_pointer(priv->match, m);
+
+ return 0;
+@@ -2308,6 +2341,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m;
+
++ WARN_ON_ONCE(!list_empty(&priv->gc_head));
++
+ m = rcu_dereference_protected(priv->match, true);
+
+ if (priv->clone) {
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index 4a2ff85ce1c43..49000f5510b28 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -156,12 +156,14 @@ struct nft_pipapo_match {
+ * @clone: Copy where pending insertions and deletions are kept
+ * @width: Total bytes to be matched for one packet, including padding
+ * @last_gc: Timestamp of last garbage collection run, jiffies
++ * @gc_head: list of nft_trans_gc to queue up for mem reclaim
+ */
+ struct nft_pipapo {
+ struct nft_pipapo_match __rcu *match;
+ struct nft_pipapo_match *clone;
+ int width;
+ unsigned long last_gc;
++ struct list_head gc_head;
+ };
+
+ struct nft_pipapo_elem;
+--
+2.51.0
+
--- /dev/null
+From eaa32d2e3318d08a3b9ee326f1209803fd85941d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:44 -0800
+Subject: nfc: nci: clear NCI_DATA_EXCHANGE before calling completion callback
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0efdc02f4f6d52f8ca5d5889560f325a836ce0a8 ]
+
+Move clear_bit(NCI_DATA_EXCHANGE) before invoking the data exchange
+callback in nci_data_exchange_complete().
+
+The callback (e.g. rawsock_data_exchange_complete) may immediately
+schedule another data exchange via schedule_work(tx_work). On a
+multi-CPU system, tx_work can run and reach nci_transceive() before
+the current nci_data_exchange_complete() clears the flag, causing
+test_and_set_bit(NCI_DATA_EXCHANGE) to return -EBUSY and the new
+transfer to fail.
+
+This causes intermittent flakes in nci/nci_dev in NIPA:
+
+ # # RUN NCI.NCI1_0.t4t_tag_read ...
+ # # t4t_tag_read: Test terminated by timeout
+ # # FAIL NCI.NCI1_0.t4t_tag_read
+ # not ok 3 NCI.NCI1_0.t4t_tag_read
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-5-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 3d36ea5701f02..7a3fb2a397a1e 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -33,7 +33,8 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+- goto exit;
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++ return;
+ }
+
+ cb = conn_info->data_exchange_cb;
+@@ -45,6 +46,12 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ del_timer_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
++ /* Mark the exchange as done before calling the callback.
++ * The callback (e.g. rawsock_data_exchange_complete) may
++ * want to immediately queue another data exchange.
++ */
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++
+ if (cb) {
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+@@ -54,9 +61,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+-
+-exit:
+- clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+ }
+
+ /* ----------------- NCI TX Data ----------------- */
+--
+2.51.0
+
--- /dev/null
+From 705935f95a4ef4c53d134fa4ee81001d7c079012 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:41 -0800
+Subject: nfc: nci: free skb on nci_transceive early error paths
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7bd4b0c4779f978a6528c9b7937d2ca18e936e2c ]
+
+nci_transceive() takes ownership of the skb passed by the caller,
+but the -EPROTO, -EINVAL, and -EBUSY error paths return without
+freeing it.
+
+Due to issues clearing NCI_DATA_EXCHANGE fixed by subsequent changes
+the nci/nci_dev selftest hits the error path occasionally in NIPA,
+and kmemleak detects leaks:
+
+unreferenced object 0xff11000015ce6a40 (size 640):
+ comm "nci_dev", pid 3954, jiffies 4295441246
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 00 a4 00 0c 02 e1 03 6b 6b 6b 6b 6b kkkk.......kkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ backtrace (crc 7c40cc2a):
+ kmem_cache_alloc_node_noprof+0x492/0x630
+ __alloc_skb+0x11e/0x5f0
+ alloc_skb_with_frags+0xc6/0x8f0
+ sock_alloc_send_pskb+0x326/0x3f0
+ nfc_alloc_send_skb+0x94/0x1d0
+ rawsock_sendmsg+0x162/0x4c0
+ do_syscall_64+0x117/0xfc0
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 3c42b149c729c..18ff1c23769ae 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1024,18 +1024,23 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+- if (!conn_info)
++ if (!conn_info) {
++ kfree_skb(skb);
+ return -EPROTO;
++ }
+
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ pr_err("unable to exchange data, no active target\n");
++ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+- if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) {
++ kfree_skb(skb);
+ return -EBUSY;
++ }
+
+ /* store cb and context to be used on receiving data */
+ conn_info->data_exchange_cb = cb;
+--
+2.51.0
+
--- /dev/null
+From 2091403c9850d4cf6639a8db8a237ab9fa389a87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:45 -0800
+Subject: nfc: rawsock: cancel tx_work before socket teardown
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit d793458c45df2aed498d7f74145eab7ee22d25aa ]
+
+In rawsock_release(), cancel any pending tx_work and purge the write
+queue before orphaning the socket. rawsock_tx_work runs on the system
+workqueue and calls nfc_data_exchange which dereferences the NCI
+device. Without synchronization, tx_work can race with socket and
+device teardown when a process is killed (e.g. by SIGKILL), leading
+to use-after-free or leaked references.
+
+Set SEND_SHUTDOWN first so that if tx_work is already running it will
+see the flag and skip transmitting, then use cancel_work_sync to wait
+for any in-progress execution to finish, and finally purge any
+remaining queued skbs.
+
+Fixes: 23b7869c0fd0 ("NFC: add the NFC socket raw protocol")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-6-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/rawsock.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 5125392bb68eb..028b4daafaf83 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -67,6 +67,17 @@ static int rawsock_release(struct socket *sock)
+ if (sock->type == SOCK_RAW)
+ nfc_sock_unlink(&raw_sk_list, sk);
+
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* Prevent rawsock_tx_work from starting new transmits and
++ * wait for any in-progress work to finish. This must happen
++ * before the socket is orphaned to avoid a race where
++ * rawsock_tx_work runs after the NCI device has been freed.
++ */
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ cancel_work_sync(&nfc_rawsock(sk)->tx_work);
++ rawsock_write_queue_purge(sk);
++ }
++
+ sock_orphan(sk);
+ sock_put(sk);
+
+--
+2.51.0
+
--- /dev/null
+From 22c177aaf488fd9e51cf50f5faefbedb44a8ecd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Jan 2026 22:48:08 +0800
+Subject: nvme: fix admin queue leak on controller reset
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit b84bb7bd913d8ca2f976ee6faf4a174f91c02b8d ]
+
+When nvme_alloc_admin_tag_set() is called during a controller reset,
+a previous admin queue may still exist. Release it properly before
+allocating a new one to avoid orphaning the old queue.
+
+This fixes a regression introduced by commit 03b3bcd319b3 ("nvme: fix
+admin request_queue lifetime").
+
+Cc: Keith Busch <kbusch@kernel.org>
+Fixes: 03b3bcd319b3 ("nvme: fix admin request_queue lifetime").
+Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
+Closes: https://lore.kernel.org/linux-block/CAHj4cs9wv3SdPo+N01Fw2SHBYDs9tj2M_e1-GdQOkRy=DsBB1w@mail.gmail.com/
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index a766290b1ee89..de4b9e9db45d4 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4609,6 +4609,13 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ if (ret)
+ return ret;
+
++ /*
++ * If a previous admin queue exists (e.g., from before a reset),
++ * put it now before allocating a new one to avoid orphaning it.
++ */
++ if (ctrl->admin_q)
++ blk_put_queue(ctrl->admin_q);
++
+ ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
+--
+2.51.0
+
--- /dev/null
+From ff9c09ac72db67ce85876ac30edbee49bb2ba140 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 19:19:28 -0500
+Subject: nvme: fix memory allocation in nvme_pr_read_keys()
+
+From: Sungwoo Kim <iam@sung-woo.kim>
+
+[ Upstream commit c3320153769f05fd7fe9d840cb555dd3080ae424 ]
+
+nvme_pr_read_keys() takes num_keys from userspace and uses it to
+calculate the allocation size for rse via struct_size(). The upper
+limit is PR_KEYS_MAX (64K).
+
+A malicious or buggy userspace can pass a large num_keys value that
+results in a 4MB allocation attempt at most, causing a warning in
+the page allocator when the order exceeds MAX_PAGE_ORDER.
+
+To fix this, use kvzalloc() instead of kzalloc().
+
+This bug has the same reasoning and fix with the patch below:
+https://lore.kernel.org/linux-block/20251212013510.3576091-1-kartikey406@gmail.com/
+
+Warning log:
+WARNING: mm/page_alloc.c:5216 at __alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216, CPU#1: syz-executor117/272
+Modules linked in:
+CPU: 1 UID: 0 PID: 272 Comm: syz-executor117 Not tainted 6.19.0 #1 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+RIP: 0010:__alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216
+Code: ff 83 bd a8 fe ff ff 0a 0f 86 69 fb ff ff 0f b6 1d f9 f9 c4 04 80 fb 01 0f 87 3b 76 30 ff 83 e3 01 75 09 c6 05 e4 f9 c4 04 01 <0f> 0b 48 c7 85 70 fe ff ff 00 00 00 00 e9 8f fd ff ff 31 c0 e9 0d
+RSP: 0018:ffffc90000fcf450 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 1ffff920001f9ea0
+RDX: 0000000000000000 RSI: 000000000000000b RDI: 0000000000040dc0
+RBP: ffffc90000fcf648 R08: ffff88800b6c3380 R09: 0000000000000001
+R10: ffffc90000fcf840 R11: ffff88807ffad280 R12: 0000000000000000
+R13: 0000000000040dc0 R14: 0000000000000001 R15: ffffc90000fcf620
+FS: 0000555565db33c0(0000) GS:ffff8880be26c000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000000002000000c CR3: 0000000003b72000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ alloc_pages_mpol+0x236/0x4d0 mm/mempolicy.c:2486
+ alloc_frozen_pages_noprof+0x149/0x180 mm/mempolicy.c:2557
+ ___kmalloc_large_node+0x10c/0x140 mm/slub.c:5598
+ __kmalloc_large_node_noprof+0x25/0xc0 mm/slub.c:5629
+ __do_kmalloc_node mm/slub.c:5645 [inline]
+ __kmalloc_noprof+0x483/0x6f0 mm/slub.c:5669
+ kmalloc_noprof include/linux/slab.h:961 [inline]
+ kzalloc_noprof include/linux/slab.h:1094 [inline]
+ nvme_pr_read_keys+0x8f/0x4c0 drivers/nvme/host/pr.c:245
+ blkdev_pr_read_keys block/ioctl.c:456 [inline]
+ blkdev_common_ioctl+0x1b71/0x29b0 block/ioctl.c:730
+ blkdev_ioctl+0x299/0x700 block/ioctl.c:786
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:597 [inline]
+ __se_sys_ioctl fs/ioctl.c:583 [inline]
+ __x64_sys_ioctl+0x1bf/0x220 fs/ioctl.c:583
+ x64_sys_call+0x1280/0x21b0 mnt/fuzznvme_1/fuzznvme/linux-build/v6.19/./arch/x86/include/generated/asm/syscalls_64.h:17
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x71/0x330 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7fb893d3108d
+Code: 28 c3 e8 46 1e 00 00 66 0f 1f 44 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007ffff61f2f38 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007ffff61f3138 RCX: 00007fb893d3108d
+RDX: 0000000020000040 RSI: 00000000c01070ce RDI: 0000000000000003
+RBP: 0000000000000001 R08: 0000000000000000 R09: 00007ffff61f3138
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001
+R13: 00007ffff61f3128 R14: 00007fb893dae530 R15: 0000000000000001
+ </TASK>
+
+Fixes: 5fd96a4e15de (nvme: Add pr_ops read_keys support)
+Acked-by: Chao Shi <cshi008@fiu.edu>
+Acked-by: Weidong Zhu <weizhu@fiu.edu>
+Acked-by: Dave Tian <daveti@purdue.edu>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Sungwoo Kim <iam@sung-woo.kim>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index d330916a3199d..e1d07f824b13c 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -214,7 +214,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ if (rse_len > U32_MAX)
+ return -EINVAL;
+
+- rse = kzalloc(rse_len, GFP_KERNEL);
++ rse = kvzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+@@ -239,7 +239,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ }
+
+ free_rse:
+- kfree(rse);
++ kvfree(rse);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From eb47db7435cd158307dd19996623a7729dac47a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Dec 2025 16:43:27 -0500
+Subject: nvme: reject invalid pr_read_keys() num_keys values
+
+From: Stefan Hajnoczi <stefanha@redhat.com>
+
+[ Upstream commit 38ec8469f39e0e96e7dd9b76f05e0f8eb78be681 ]
+
+The pr_read_keys() interface has a u32 num_keys parameter. The NVMe
+Reservation Report command has a u32 maximum length. Reject num_keys
+values that are too large to fit.
+
+This will become important when pr_read_keys() is exposed to untrusted
+userspace via an <linux/pr.h> ioctl.
+
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c3320153769f ("nvme: fix memory allocation in nvme_pr_read_keys()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pr.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index 80dd09aa01a3b..d330916a3199d 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -200,7 +200,8 @@ static int nvme_pr_resv_report(struct block_device *bdev, void *data,
+ static int nvme_pr_read_keys(struct block_device *bdev,
+ struct pr_keys *keys_info)
+ {
+- u32 rse_len, num_keys = keys_info->num_keys;
++ size_t rse_len;
++ u32 num_keys = keys_info->num_keys;
+ struct nvme_reservation_status_ext *rse;
+ int ret, i;
+ bool eds;
+@@ -210,6 +211,9 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ * enough to get enough keys to fill the return keys buffer.
+ */
+ rse_len = struct_size(rse, regctl_eds, num_keys);
++ if (rse_len > U32_MAX)
++ return -EINVAL;
++
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+--
+2.51.0
+
--- /dev/null
+From a8036b20755d21c65185d2b86f3c0e3748a0d46a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:58 +0000
+Subject: octeon_ep: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 43b3160cb639079a15daeb5f080120afbfbfc918 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-3-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 21 +++++++++------
+ .../net/ethernet/marvell/octeon_ep/octep_rx.c | 27 +++++++++++++------
+ 2 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index b7b1e4fd306d1..fd515964869a2 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -562,17 +562,22 @@ static void octep_clean_irqs(struct octep_device *oct)
+ */
+ static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index f2a7c6a76c742..74de19166488f 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -324,10 +324,16 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
+
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+@@ -338,8 +344,9 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -414,7 +421,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ u16 rx_ol_flags;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -499,7 +506,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
+@@ -522,22 +529,26 @@ int octep_oq_process_rx(struct octep_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 716ad30f24ad5959bf0fdb3415e7798c770a874f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:57 +0000
+Subject: octeon_ep: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 18c04a808c436d629d5812ce883e3822a5f5a47f ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion,
+and replace napi_complete with napi_complete_done.
+
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+napi_complete_done (vs napi_complete) allows for better
+interrupt coalescing.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-2-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 449c55c09b4a5..b7b1e4fd306d1 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -555,12 +555,12 @@ static void octep_clean_irqs(struct octep_device *oct)
+ }
+
+ /**
+- * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -576,7 +576,17 @@ static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+- wmb();
++ smp_wmb();
++}
++
++/**
++ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++{
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -602,7 +612,8 @@ static int octep_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+- napi_complete(napi);
++ octep_update_pkt(ioq_vector->iq, ioq_vector->oq);
++ napi_complete_done(napi, rx_done);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+ }
+--
+2.51.0
+
--- /dev/null
+From 2ea7a441886afe5b3a2969366bf28fced9d046df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:14:00 +0000
+Subject: octeon_ep_vf: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 6c73126ecd1080351b468fe43353b2f705487f44 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 1cd3b407977c3 ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-5-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeon_ep_vf/octep_vf_main.c | 21 ++++++++------
+ .../marvell/octeon_ep_vf/octep_vf_rx.c | 28 +++++++++++++------
+ 2 files changed, 33 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index a8332965084b9..72c1e9415efaa 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -296,17 +296,22 @@ static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+
+ static void octep_vf_update_pkt(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+index 6f865dbbba6c6..b579d5b545c46 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+@@ -325,9 +325,16 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
++
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+@@ -339,8 +346,9 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -369,7 +377,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct sk_buff *skb;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -463,7 +471,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
+@@ -486,22 +494,26 @@ int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 642ad1837d05f1b6633cfcf8ff3eac56ebf1738b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:59 +0000
+Subject: octeon_ep_vf: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 2ae7d20fb24f598f60faa8f6ecc856dac782261a ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion.
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+
+Fixes: 1cd3b407977c3 ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-4-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeon_ep_vf/octep_vf_main.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index b9430c4a33a32..a8332965084b9 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -288,12 +288,13 @@ static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+ }
+
+ /**
+- * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_vf_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
++
++static void octep_vf_update_pkt(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -310,6 +311,17 @@ static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
++}
++
++/**
++ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq,
++ struct octep_vf_oq *oq)
++{
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -335,6 +347,7 @@ static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
++ octep_vf_update_pkt(ioq_vector->iq, ioq_vector->oq);
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+--
+2.51.0
+
--- /dev/null
+From 89a1630e5c26a56f3c53219c7ccf18db948e5e74 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 17:39:07 +0800
+Subject: pinctrl: cirrus: cs42l43: Fix double-put in cs42l43_pin_probe()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fd5bed798f45eb3a178ad527b43ab92705faaf8a ]
+
+devm_add_action_or_reset() already invokes the action on failure,
+so the explicit put causes a double-put.
+
+Fixes: 9b07cdf86a0b ("pinctrl: cirrus: Fix fwnode leak in cs42l43_pin_probe()")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/cirrus/pinctrl-cs42l43.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+index 8b3f3b945e206..7734dae06a4ac 100644
+--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
++++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+@@ -569,10 +569,9 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
+ if (child) {
+ ret = devm_add_action_or_reset(&pdev->dev,
+ cs42l43_fwnode_put, child);
+- if (ret) {
+- fwnode_handle_put(child);
++ if (ret)
+ return ret;
+- }
++
+ if (!child->dev)
+ child->dev = priv->dev;
+ fwnode = child;
+--
+2.51.0
+
--- /dev/null
+From 6a7668b945ca3d98bd3f7f79e7a35e6373cf0764 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:46 +0100
+Subject: pinctrl: equilibrium: fix warning trace on load
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 3e00b1b332e54ba50cca6691f628b9c06574024f ]
+
+The callback functions 'eqbr_irq_mask()' and 'eqbr_irq_ack()' are also
+called in the callback function 'eqbr_irq_mask_ack()'. This is done to
+avoid source code duplication. The problem, is that in the function
+'eqbr_irq_mask()' also calles the gpiolib function 'gpiochip_disable_irq()'
+
+This generates the following warning trace in the log for every gpio on
+load.
+
+[ 6.088111] ------------[ cut here ]------------
+[ 6.092440] WARNING: CPU: 3 PID: 1 at drivers/gpio/gpiolib.c:3810 gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Modules linked in:
+[ 6.097847] CPU: 3 UID: 0 PID: 1 Comm: swapper/0 Tainted: G W 6.12.59+ #0
+[ 6.097847] Tainted: [W]=WARN
+[ 6.097847] RIP: 0010:gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Code: 39 c6 48 19 c0 21 c6 48 c1 e6 05 48 03 b2 38 03 00 00 48 81 fe 00 f0 ff ff 77 11 48 8b 46 08 f6 c4 02 74 06 f0 80 66 09 fb c3 <0f> 0b 90 0f 1f 40 00 c3 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40
+[ 6.097847] RSP: 0000:ffffc9000000b830 EFLAGS: 00010046
+[ 6.097847] RAX: 0000000000000045 RBX: ffff888001be02a0 RCX: 0000000000000008
+[ 6.097847] RDX: ffff888001be9000 RSI: ffff888001b2dd00 RDI: ffff888001be02a0
+[ 6.097847] RBP: ffffc9000000b860 R08: 0000000000000000 R09: 0000000000000000
+[ 6.097847] R10: 0000000000000001 R11: ffff888001b2a154 R12: ffff888001be0514
+[ 6.097847] R13: ffff888001be02a0 R14: 0000000000000008 R15: 0000000000000000
+[ 6.097847] FS: 0000000000000000(0000) GS:ffff888041d80000(0000) knlGS:0000000000000000
+[ 6.097847] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 6.097847] CR2: 0000000000000000 CR3: 0000000003030000 CR4: 00000000001026b0
+[ 6.097847] Call Trace:
+[ 6.097847] <TASK>
+[ 6.097847] ? eqbr_irq_mask+0x63/0x70
+[ 6.097847] ? no_action+0x10/0x10
+[ 6.097847] eqbr_irq_mask_ack+0x11/0x60
+
+In an other driver (drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c) the
+interrupt is not disabled here.
+
+To fix this, do not call the 'eqbr_irq_mask()' and 'eqbr_irq_ack()'
+function. Implement instead this directly without disabling the interrupts.
+
+Fixes: 52066a53bd11 ("pinctrl: equilibrium: Convert to immutable irq_chip")
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index e8b2efc7b41a0..5204466c6b3e6 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -63,8 +63,15 @@ static void eqbr_irq_ack(struct irq_data *d)
+
+ static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_irq_mask(d);
+- eqbr_irq_ack(d);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
++ unsigned int offset = irqd_to_hwirq(d);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&gctrl->lock, flags);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
++ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+--
+2.51.0
+
--- /dev/null
+From 4f9604874079cbae7c0d146f061c809e02130856 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:45 +0100
+Subject: pinctrl: equilibrium: rename irq_chip function callbacks
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 1f96b84835eafb3e6f366dc3a66c0e69504cec9d ]
+
+Renaming of the irq_chip callback functions to improve clarity.
+
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Stable-dep-of: 3e00b1b332e5 ("pinctrl: equilibrium: fix warning trace on load")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index c82491da2cc9f..e8b2efc7b41a0 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -22,7 +22,7 @@
+ #define PIN_NAME_LEN 10
+ #define PAD_REG_OFF 0x100
+
+-static void eqbr_gpio_disable_irq(struct irq_data *d)
++static void eqbr_irq_mask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -35,7 +35,7 @@ static void eqbr_gpio_disable_irq(struct irq_data *d)
+ gpiochip_disable_irq(gc, offset);
+ }
+
+-static void eqbr_gpio_enable_irq(struct irq_data *d)
++static void eqbr_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -49,7 +49,7 @@ static void eqbr_gpio_enable_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_ack_irq(struct irq_data *d)
++static void eqbr_irq_ack(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -61,10 +61,10 @@ static void eqbr_gpio_ack_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
++static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_gpio_disable_irq(d);
+- eqbr_gpio_ack_irq(d);
++ eqbr_irq_mask(d);
++ eqbr_irq_ack(d);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+@@ -91,7 +91,7 @@ static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ return 0;
+ }
+
+-static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
++static int eqbr_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -165,11 +165,11 @@ static void eqbr_irq_handler(struct irq_desc *desc)
+
+ static const struct irq_chip eqbr_irq_chip = {
+ .name = "gpio_irq",
+- .irq_mask = eqbr_gpio_disable_irq,
+- .irq_unmask = eqbr_gpio_enable_irq,
+- .irq_ack = eqbr_gpio_ack_irq,
+- .irq_mask_ack = eqbr_gpio_mask_ack_irq,
+- .irq_set_type = eqbr_gpio_set_irq_type,
++ .irq_ack = eqbr_irq_ack,
++ .irq_mask = eqbr_irq_mask,
++ .irq_mask_ack = eqbr_irq_mask_ack,
++ .irq_unmask = eqbr_irq_unmask,
++ .irq_set_type = eqbr_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+--
+2.51.0
+
--- /dev/null
+From 312e5e7ec915d3d44c6374da667a295bf429a59f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 01:01:29 +0000
+Subject: platform/x86: thinkpad_acpi: Fix errors reading battery thresholds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Teh <jonathan.teh@outlook.com>
+
+[ Upstream commit 53e977b1d50c46f2c4ec3865cd13a822f58ad3cd ]
+
+Check whether the battery supports the relevant charge threshold before
+reading the value to silence these errors:
+
+thinkpad_acpi: acpi_evalf(BCTG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCTG: evaluate failed
+thinkpad_acpi: acpi_evalf(BCSG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCSG: evaluate failed
+
+when reading the charge thresholds via sysfs on platforms that do not
+support them such as the ThinkPad T400.
+
+Fixes: 2801b9683f74 ("thinkpad_acpi: Add support for battery thresholds")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=202619
+Signed-off-by: Jonathan Teh <jonathan.teh@outlook.com>
+Reviewed-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://patch.msgid.link/MI0P293MB01967B206E1CA6F337EBFB12926CA@MI0P293MB0196.ITAP293.PROD.OUTLOOK.COM
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 2c67d9758e6b4..e4fe90b70e50e 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9499,14 +9499,16 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
+ {
+ switch (what) {
+ case THRESHOLD_START:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
++ if (!battery_info.batteries[battery].start_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery)))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
++ if (!battery_info.batteries[battery].stop_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery)))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+--
+2.51.0
+
--- /dev/null
+From 1c02171d15ca14b43befaa4dd086f58e078357d9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 19:37:56 +0900
+Subject: rust: kunit: fix warning when !CONFIG_PRINTK
+
+From: Alexandre Courbot <acourbot@nvidia.com>
+
+[ Upstream commit 7dd34dfc8dfa92a7244242098110388367996ac3 ]
+
+If `CONFIG_PRINTK` is not set, then the following warnings are issued
+during build:
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:16:12
+ |
+ 16 | pub fn err(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+ |
+ = note: `#[warn(unused_variables)]` (part of `#[warn(unused)]`) on by default
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:32:13
+ |
+ 32 | pub fn info(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+
+Fix this by adding a no-op assignment using `args` when `CONFIG_PRINTK`
+is not set.
+
+Fixes: a66d733da801 ("rust: support running Rust documentation tests as KUnit ones")
+Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/kunit.rs | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
+index 824da0e9738a0..7b38fca9f2429 100644
+--- a/rust/kernel/kunit.rs
++++ b/rust/kernel/kunit.rs
+@@ -13,6 +13,10 @@
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn err(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+@@ -29,6 +33,10 @@ pub fn err(args: fmt::Arguments<'_>) {
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn info(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+--
+2.51.0
+
--- /dev/null
+From b5711af0acd01d8835f7cc91307284e40c4ab322 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 09:36:37 +0800
+Subject: selftest/arm64: Fix sve2p1_sigill() to hwcap test
+
+From: Yifan Wu <wuyifan50@huawei.com>
+
+[ Upstream commit d87c828daa7ead9763416f75cc416496969cf1dc ]
+
+The FEAT_SVE2p1 is indicated by ID_AA64ZFR0_EL1.SVEver. However,
+the BFADD requires the FEAT_SVE_B16B16, which is indicated by
+ID_AA64ZFR0_EL1.B16B16. This could cause the test to incorrectly
+fail on a CPU that supports FEAT_SVE2.1 but not FEAT_SVE_B16B16.
+
+LD1Q Gather load quadwords which is decoded from SVE encodings and
+implied by FEAT_SVE2p1.
+
+Fixes: c5195b027d29 ("kselftest/arm64: Add SVE 2.1 to hwcap test")
+Signed-off-by: Yifan Wu <wuyifan50@huawei.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/arm64/abi/hwcap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
+index 265654ec48b9f..097bd51e14ca2 100644
+--- a/tools/testing/selftests/arm64/abi/hwcap.c
++++ b/tools/testing/selftests/arm64/abi/hwcap.c
+@@ -349,8 +349,8 @@ static void sve2_sigill(void)
+
+ static void sve2p1_sigill(void)
+ {
+- /* BFADD Z0.H, Z0.H, Z0.H */
+- asm volatile(".inst 0x65000000" : : : "z0");
++ /* LD1Q {Z0.Q}, P0/Z, [Z0.D, X0] */
++ asm volatile(".inst 0xC400A000" : : : "z0");
+ }
+
+ static void sveaes_sigill(void)
+--
+2.51.0
+
--- /dev/null
+From 27ea11cbb1b93956801d2328dd43e7a17b443e9b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 19:14:50 +0800
+Subject: selftests/harness: order TEST_F and XFAIL_ADD constructors
+
+From: Sun Jian <sun.jian.kdev@gmail.com>
+
+[ Upstream commit 6be2681514261324c8ee8a1c6f76cefdf700220f ]
+
+TEST_F() allocates and registers its struct __test_metadata via mmap()
+inside its constructor, and only then assigns the
+_##fixture_##test##_object pointer.
+
+XFAIL_ADD() runs in a constructor too and reads
+_##fixture_##test##_object to initialize xfail->test. If XFAIL_ADD runs
+first, xfail->test can be NULL and the expected failure will be reported
+as FAIL.
+
+Use constructor priorities to ensure TEST_F registration runs before
+XFAIL_ADD, without adding extra state or runtime lookups.
+
+Fixes: 2709473c9386 ("selftests: kselftest_harness: support using xfail")
+Signed-off-by: Sun Jian <sun.jian.kdev@gmail.com>
+Link: https://patch.msgid.link/20260225111451.347923-1-sun.jian.kdev@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kselftest_harness.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index d67ec4d762db3..a4e5b8613babf 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -75,6 +75,9 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
+ memset(s, c, n);
+ }
+
++#define KSELFTEST_PRIO_TEST_F 20000
++#define KSELFTEST_PRIO_XFAIL 20001
++
+ #define TEST_TIMEOUT_DEFAULT 30
+
+ /* Utilities exposed to the test definitions */
+@@ -465,7 +468,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
+ __test_check_assert(_metadata); \
+ } \
+ static struct __test_metadata *_##fixture_name##_##test_name##_object; \
+- static void __attribute__((constructor)) \
++ static void __attribute__((constructor(KSELFTEST_PRIO_TEST_F))) \
+ _register_##fixture_name##_##test_name(void) \
+ { \
+ struct __test_metadata *object = mmap(NULL, sizeof(*object), \
+@@ -879,7 +882,7 @@ struct __test_xfail {
+ .fixture = &_##fixture_name##_fixture_object, \
+ .variant = &_##fixture_name##_##variant_name##_object, \
+ }; \
+- static void __attribute__((constructor)) \
++ static void __attribute__((constructor(KSELFTEST_PRIO_XFAIL))) \
+ _register_##fixture_name##_##variant_name##_##test_name##_xfail(void) \
+ { \
+ _##fixture_name##_##variant_name##_##test_name##_xfail.test = \
+--
+2.51.0
+
arm-clean-up-the-memset64-c-wrapper.patch
net-stmmac-dwmac-loongson-set-clk_csr_i-to-100-150mhz.patch
btrfs-always-fallback-to-buffered-write-if-the-inode-requires-checksum.patch
+acpi-apei-ghes-disable-kasan-instrumentation-when-co.patch
+nvme-fix-admin-queue-leak-on-controller-reset.patch
+hwmon-aht10-add-support-for-dht20.patch
+hwmon-aht10-fix-initialization-commands-for-aht20.patch
+pinctrl-equilibrium-rename-irq_chip-function-callbac.patch
+pinctrl-equilibrium-fix-warning-trace-on-load.patch
+platform-x86-thinkpad_acpi-fix-errors-reading-batter.patch
+hid-multitouch-get-the-contact-id-from-hid_dg_transd.patch
+hid-multitouch-add-device-id-for-apple-touch-bar.patch
+hid-multitouch-add-quirks-for-lenovo-yoga-book-9i.patch
+hid-multitouch-new-class-mt_cls_egalax_p80h84.patch
+pinctrl-cirrus-cs42l43-fix-double-put-in-cs42l43_pin.patch
+hwmon-it87-check-the-it87_lock-return-value.patch
+idpf-change-irq-naming-to-match-netdev-and-ethtool-q.patch
+i40e-fix-preempt-count-leak-in-napi-poll-tracepoint.patch
+e1000e-clear-dpg_en-after-reset-to-avoid-autonomous-.patch
+drm-solomon-fix-page-start-when-updating-rectangle-i.patch
+net-ethernet-ti-am65-cpsw-nuss-cpsw-ale-fix-multicas.patch
+xsk-get-rid-of-xdp_buff_xsk-xskb_list_node.patch
+xsk-s-free_list_node-list_node.patch
+xsk-fix-fragment-node-deletion-to-prevent-buffer-lea.patch
+xsk-fix-zero-copy-af_xdp-fragment-drop.patch
+dpaa2-switch-fix-interrupt-storm-after-receiving-bad.patch
+atm-lec-fix-null-ptr-deref-in-lec_arp_clear_vccs.patch
+net-ti-icssg-prueth-fix-ping-failure-after-offload-m.patch
+amd-xgbe-fix-mac_tcr_ss-register-width-for-2.5g-and-.patch
+can-bcm-fix-locking-for-bcm_op-runtime-updates.patch
+can-mcp251x-fix-deadlock-in-error-path-of-mcp251x_op.patch
+wifi-rsi-don-t-default-to-eopnotsupp-in-rsi_mac80211.patch
+drm-xe-do-not-preempt-fence-signaling-cs-instruction.patch
+rust-kunit-fix-warning-when-config_printk.patch
+kunit-tool-copy-caller-args-in-run_kernel-to-prevent.patch
+net-dsa-realtek-rtl8365mb-fix-rtl8365mb_phy_ocp_writ.patch
+bpf-bonding-reject-vlan-srcmac-xmit_hash_policy-chan.patch
+octeon_ep-relocate-counter-updates-before-napi.patch
+octeon_ep-avoid-compiler-and-iq-oq-reordering.patch
+octeon_ep_vf-relocate-counter-updates-before-napi.patch
+octeon_ep_vf-avoid-compiler-and-iq-oq-reordering.patch
+wifi-cw1200-fix-locking-in-error-paths.patch
+wifi-wlcore-fix-a-locking-bug.patch
+wifi-mt76-mt7996-fix-possible-oob-access-in-mt7996_m.patch
+wifi-mt76-mt7925-fix-possible-oob-access-in-mt7925_m.patch
+wifi-mt76-fix-possible-oob-access-in-mt76_connac2_ma.patch
+indirect_call_wrapper-do-not-reevaluate-function-poi.patch
+net-rds-fix-circular-locking-dependency-in-rds_tcp_t.patch
+xen-acpi-processor-fix-_cst-detection-using-undersiz.patch
+iavf-fix-netdev-max_mtu-to-respect-actual-hardware-l.patch
+bpf-fix-a-uaf-issue-in-bpf_trampoline_link_cgroup_sh.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch-8361
+ipv6-fix-null-pointer-deref-in-ip6_rt_get_dev_rcu.patch
+net-ipv4-fix-arm64-alignment-fault-in-multipath-hash.patch
+amd-xgbe-fix-sleep-while-atomic-on-suspend-resume.patch
+drm-sched-fix-kernel-doc-warning-for-drm_sched_job_d.patch
+i2c-i801-revert-i2c-i801-replace-acpi_lock-with-i2c-.patch
+drm-xe-reg_sr-fix-leak-on-xa_store-failure.patch
+nvme-reject-invalid-pr_read_keys-num_keys-values.patch
+nvme-fix-memory-allocation-in-nvme_pr_read_keys.patch
+hwmon-max6639-configure-based-on-dt-property.patch
+hwmon-max6639-fix-inverted-polarity.patch
+net-sched-avoid-qdisc_reset_all_tx_gt-vs-dequeue-rac.patch
+net-nfc-nci-fix-zero-length-proprietary-notification.patch
+net_sched-sch_fq-clear-q-band_pkt_count-in-fq_reset.patch
+nfc-nci-free-skb-on-nci_transceive-early-error-paths.patch
+nfc-nci-clear-nci_data_exchange-before-calling-compl.patch
+nfc-rawsock-cancel-tx_work-before-socket-teardown.patch
+net-stmmac-fix-error-handling-in-vlan-add-and-delete.patch
+netfilter-nft_set_pipapo-split-gc-into-unlink-and-re.patch
+net-ethernet-mtk_eth_soc-reset-prog-ptr-to-old_prog-.patch
+kselftest-harness-use-helper-to-avoid-zero-size-mems.patch
+selftests-harness-order-test_f-and-xfail_add-constru.patch
+net-bridge-fix-nd_tbl-null-dereference-when-ipv6-is-.patch
+net-vxlan-fix-nd_tbl-null-dereference-when-ipv6-is-d.patch
+net-ipv6-fix-panic-when-ipv4-route-references-loopba.patch
+net-sched-act_ife-fix-metalist-update-behavior.patch
+xdp-use-modulo-operation-to-calculate-xdp-frag-tailr.patch
+xsk-introduce-helper-to-determine-rxq-frag_size.patch
+i40e-fix-registering-xdp-rxq-info.patch
+i40e-use-xdp.frame_sz-as-xdp-rxq-info-frag_size.patch
+xdp-produce-a-warning-when-calculated-tailroom-is-ne.patch
+selftest-arm64-fix-sve2p1_sigill-to-hwcap-test.patch
+tracing-add-null-pointer-check-to-trigger_data_free.patch
--- /dev/null
+From 91689e38a3ec5000738bddfb38e30bfeb86cbb50 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:11 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ smb2_compound_op()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 12c43a062acb0ac137fc2a4a106d4d084b8c5416 ]
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: 6a5f6592a0b6 ("SMB311: Add support for query info using posix extensions (level 100)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2inode.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 1c65787657ddc..cac14c7b3fbc2 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -315,7 +315,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ cfile->fid.volatile_fid,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ } else {
+@@ -325,7 +325,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ }
+--
+2.51.0
+
--- /dev/null
+From 0a3b31e4ee2f90f19f078983524df4318fc33748 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:12 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ SMB311_posix_query_info()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 9621b996e4db1dbc2b3dc5d5910b7d6179397320 ]
+
+SMB311_posix_query_info() is currently unused, but it may still be used in
+some stable versions, so these changes are submitted as a separate patch.
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: b1bc1874b885 ("smb311: Add support for SMB311 query info (non-compounded)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2pdu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 121463b9273bc..b6821815248e7 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3933,7 +3933,7 @@ int
+ SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
+ {
+- size_t output_len = sizeof(struct smb311_posix_qinfo *) +
++ size_t output_len = sizeof(struct smb311_posix_qinfo) +
+ (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
+ *plen = 0;
+
+--
+2.51.0
+
--- /dev/null
+From 29aa0487d821e84d861daa81935df9f18eb7182c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 11:33:39 -0800
+Subject: tracing: Add NULL pointer check to trigger_data_free()
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 457965c13f0837a289c9164b842d0860133f6274 ]
+
+If trigger_data_alloc() fails and returns NULL, event_hist_trigger_parse()
+jumps to the out_free error path. While kfree() safely handles a NULL
+pointer, trigger_data_free() does not. This causes a NULL pointer
+dereference in trigger_data_free() when evaluating
+data->cmd_ops->set_filter.
+
+Fix the problem by adding a NULL pointer check to trigger_data_free().
+
+The problem was found by an experimental code review agent based on
+gemini-3.1-pro while reviewing backports into v6.18.y.
+
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Link: https://patch.msgid.link/20260305193339.2810953-1-linux@roeck-us.net
+Fixes: 0550069cc25f ("tracing: Properly process error handling in event_hist_trigger_parse()")
+Assisted-by: Gemini:gemini-3.1-pro
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_events_trigger.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index d5dbda9b0e4b0..1e4e699c25478 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -19,6 +19,9 @@ static DEFINE_MUTEX(trigger_cmd_mutex);
+
+ void trigger_data_free(struct event_trigger_data *data)
+ {
++ if (!data)
++ return;
++
+ if (data->cmd_ops->set_filter)
+ data->cmd_ops->set_filter(NULL, data, NULL);
+
+--
+2.51.0
+
--- /dev/null
+From 70c10ba125872ed3d93e01de3dfc112a6b5d0558 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:24 -0800
+Subject: wifi: cw1200: Fix locking in error paths
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit d98c24617a831e92e7224a07dcaed2dd0b02af96 ]
+
+cw1200_wow_suspend() must only return with priv->conf_mutex locked if it
+returns zero. This mutex must be unlocked if an error is returned. Add
+mutex_unlock() calls to the error paths from which that call is missing.
+This has been detected by the Clang thread-safety analyzer.
+
+Fixes: a910e4a94f69 ("cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-25-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/st/cw1200/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
+index a20ab577a3644..212b6f2af8de4 100644
+--- a/drivers/net/wireless/st/cw1200/pm.c
++++ b/drivers/net/wireless/st/cw1200/pm.c
+@@ -264,12 +264,14 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EAGAIN;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From e5f97ac69a6c9862a2ebfdb406c271e72cac1db7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:16 +0100
+Subject: wifi: mt76: Fix possible oob access in
+ mt76_connac2_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 4e10a730d1b511ff49723371ed6d694dd1b2c785 ]
+
+Check frame length before accessing the mgmt fields in
+mt76_connac2_mac_write_txwi_80211 in order to avoid a possible oob
+access.
+
+Fixes: 577dbc6c656d ("mt76: mt7915: enable offloading of sequence number assignment")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-3-b0f6d1ad4850@kernel.org
+[fix check to also cover mgmt->u.action.u.addba_req.capab,
+correct Fixes tag]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index a3db65254e37f..268f414f0a023 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -396,6 +396,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + 1 + 2 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+--
+2.51.0
+
--- /dev/null
+From f79c77b38c61834e6a21b6b734b122432e8788bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:15 +0100
+Subject: wifi: mt76: mt7925: Fix possible oob access in
+ mt7925_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit c41a9abd6ae31d130e8f332e7c8800c4c866234b ]
+
+Check frame length before accessing the mgmt fields in
+mt7925_mac_write_txwi_80211 in order to avoid a possible oob access.
+
+Fixes: c948b5da6bbec ("wifi: mt76: mt7925: add Mediatek Wi-Fi7 driver for mt7925 chips")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-2-b0f6d1ad4850@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+index f1bd0c174acf4..2ab439f28e16f 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+@@ -671,6 +671,7 @@ mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ tid = MT_TX_ADDBA;
+--
+2.51.0
+
--- /dev/null
+From 0e1ee1c1f391053331025703289f32e451d48358 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:14 +0100
+Subject: wifi: mt76: mt7996: Fix possible oob access in
+ mt7996_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 60862846308627e9e15546bb647a00de44deb27b ]
+
+Check frame length before accessing the mgmt fields in
+mt7996_mac_write_txwi_80211 in order to avoid a possible oob access.
+
+Fixes: 98686cd21624c ("wifi: mt76: mt7996: add driver for MediaTek Wi-Fi 7 (802.11be) devices")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-1-b0f6d1ad4850@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 0990a3d481f2d..b7a5426c933d0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -759,6 +759,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ tid = MT_TX_ADDBA;
+--
+2.51.0
+
--- /dev/null
+From 0d328988a8baf503390cfa0e4812ba61163f3083 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 17:28:04 +0100
+Subject: wifi: rsi: Don't default to -EOPNOTSUPP in rsi_mac80211_config
+
+From: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
+
+[ Upstream commit d973b1039ccde6b241b438d53297edce4de45b5c ]
+
+This triggers a WARN_ON in ieee80211_hw_conf_init and isn't the expected
+behavior from the driver - other drivers default to 0 too.
+
+Fixes: 0a44dfc07074 ("wifi: mac80211: simplify non-chanctx drivers")
+Signed-off-by: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
+Link: https://patch.msgid.link/20260221-rsi-config-ret-v1-1-9a8f805e2f31@puri.sm
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/rsi/rsi_91x_mac80211.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+index c92bb8815320e..85fd5090e0b8a 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+@@ -666,7 +666,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+- int status = -EOPNOTSUPP;
++ int status = 0;
+
+ mutex_lock(&common->mutex);
+
+--
+2.51.0
+
--- /dev/null
+From ca6725659a27a9ee1657a6fbbf9e9d71d3eba6db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:25 -0800
+Subject: wifi: wlcore: Fix a locking bug
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 72c6df8f284b3a49812ce2ac136727ace70acc7c ]
+
+Make sure that wl->mutex is locked before it is unlocked. This has been
+detected by the Clang thread-safety analyzer.
+
+Fixes: 45aa7f071b06 ("wlcore: Use generic runtime pm calls for wowlan elp configuration")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-26-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 42805ed7ca120..da6db99b0d575 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -1879,6 +1879,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
+
++ mutex_lock(&wl->mutex);
++
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+@@ -1895,8 +1897,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+- mutex_lock(&wl->mutex);
+-
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+--
+2.51.0
+
--- /dev/null
+From ad7b5e6fdd874bbe1c671b8c251a56e8ef066642 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:50 +0100
+Subject: xdp: produce a warning when calculated tailroom is negative
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8821e857759be9db3cde337ad328b71fe5c8a55f ]
+
+Many ethernet drivers report xdp Rx queue frag size as being the same as
+DMA write size. However, the only user of this field, namely
+bpf_xdp_frags_increase_tail(), clearly expects a truesize.
+
+Such difference leads to unspecific memory corruption issues under certain
+circumstances, e.g. in ixgbevf maximum DMA write size is 3 KB, so when
+running xskxceiver's XDP_ADJUST_TAIL_GROW_MULTI_BUFF, 6K packet fully uses
+all DMA-writable space in 2 buffers. This would be fine, if only
+rxq->frag_size was properly set to 4K, but value of 3K results in a
+negative tailroom, because there is a non-zero page offset.
+
+We are supposed to return -EINVAL and be done with it in such case, but due
+to tailroom being stored as an unsigned int, it is reported to be somewhere
+near UINT_MAX, resulting in a tail being grown, even if the requested
+offset is too much (it is around 2K in the abovementioned test). This later
+leads to all kinds of unspecific calltraces.
+
+[ 7340.337579] xskxceiver[1440]: segfault at 1da718 ip 00007f4161aeac9d sp 00007f41615a6a00 error 6
+[ 7340.338040] xskxceiver[1441]: segfault at 7f410000000b ip 00000000004042b5 sp 00007f415bffecf0 error 4
+[ 7340.338179] in libc.so.6[61c9d,7f4161aaf000+160000]
+[ 7340.339230] in xskxceiver[42b5,400000+69000]
+[ 7340.340300] likely on CPU 6 (core 0, socket 6)
+[ 7340.340302] Code: ff ff 01 e9 f4 fe ff ff 0f 1f 44 00 00 4c 39 f0 74 73 31 c0 ba 01 00 00 00 f0 0f b1 17 0f 85 ba 00 00 00 49 8b 87 88 00 00 00 <4c> 89 70 08 eb cc 0f 1f 44 00 00 48 8d bd f0 fe ff ff 89 85 ec fe
+[ 7340.340888] likely on CPU 3 (core 0, socket 3)
+[ 7340.345088] Code: 00 00 00 ba 00 00 00 00 be 00 00 00 00 89 c7 e8 31 ca ff ff 89 45 ec 8b 45 ec 85 c0 78 07 b8 00 00 00 00 eb 46 e8 0b c8 ff ff <8b> 00 83 f8 69 74 24 e8 ff c7 ff ff 8b 00 83 f8 0b 74 18 e8 f3 c7
+[ 7340.404334] Oops: general protection fault, probably for non-canonical address 0x6d255010bdffc: 0000 [#1] SMP NOPTI
+[ 7340.405972] CPU: 7 UID: 0 PID: 1439 Comm: xskxceiver Not tainted 6.19.0-rc1+ #21 PREEMPT(lazy)
+[ 7340.408006] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-5.fc42 04/01/2014
+[ 7340.409716] RIP: 0010:lookup_swap_cgroup_id+0x44/0x80
+[ 7340.410455] Code: 83 f8 1c 73 39 48 ba ff ff ff ff ff ff ff 03 48 8b 04 c5 20 55 fa bd 48 21 d1 48 89 ca 83 e1 01 48 d1 ea c1 e1 04 48 8d 04 90 <8b> 00 48 83 c4 10 d3 e8 c3 cc cc cc cc 31 c0 e9 98 b7 dd 00 48 89
+[ 7340.412787] RSP: 0018:ffffcc5c04f7f6d0 EFLAGS: 00010202
+[ 7340.413494] RAX: 0006d255010bdffc RBX: ffff891f477895a8 RCX: 0000000000000010
+[ 7340.414431] RDX: 0001c17e3fffffff RSI: 00fa070000000000 RDI: 000382fc7fffffff
+[ 7340.415354] RBP: 00fa070000000000 R08: ffffcc5c04f7f8f8 R09: ffffcc5c04f7f7d0
+[ 7340.416283] R10: ffff891f4c1a7000 R11: ffffcc5c04f7f9c8 R12: ffffcc5c04f7f7d0
+[ 7340.417218] R13: 03ffffffffffffff R14: 00fa06fffffffe00 R15: ffff891f47789500
+[ 7340.418229] FS: 0000000000000000(0000) GS:ffff891ffdfaa000(0000) knlGS:0000000000000000
+[ 7340.419489] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 7340.420286] CR2: 00007f415bfffd58 CR3: 0000000103f03002 CR4: 0000000000772ef0
+[ 7340.421237] PKRU: 55555554
+[ 7340.421623] Call Trace:
+[ 7340.421987] <TASK>
+[ 7340.422309] ? softleaf_from_pte+0x77/0xa0
+[ 7340.422855] swap_pte_batch+0xa7/0x290
+[ 7340.423363] zap_nonpresent_ptes.constprop.0.isra.0+0xd1/0x270
+[ 7340.424102] zap_pte_range+0x281/0x580
+[ 7340.424607] zap_pmd_range.isra.0+0xc9/0x240
+[ 7340.425177] unmap_page_range+0x24d/0x420
+[ 7340.425714] unmap_vmas+0xa1/0x180
+[ 7340.426185] exit_mmap+0xe1/0x3b0
+[ 7340.426644] __mmput+0x41/0x150
+[ 7340.427098] exit_mm+0xb1/0x110
+[ 7340.427539] do_exit+0x1b2/0x460
+[ 7340.427992] do_group_exit+0x2d/0xc0
+[ 7340.428477] get_signal+0x79d/0x7e0
+[ 7340.428957] arch_do_signal_or_restart+0x34/0x100
+[ 7340.429571] exit_to_user_mode_loop+0x8e/0x4c0
+[ 7340.430159] do_syscall_64+0x188/0x6b0
+[ 7340.430672] ? __do_sys_clone3+0xd9/0x120
+[ 7340.431212] ? switch_fpu_return+0x4e/0xd0
+[ 7340.431761] ? arch_exit_to_user_mode_prepare.isra.0+0xa1/0xc0
+[ 7340.432498] ? do_syscall_64+0xbb/0x6b0
+[ 7340.433015] ? __handle_mm_fault+0x445/0x690
+[ 7340.433582] ? count_memcg_events+0xd6/0x210
+[ 7340.434151] ? handle_mm_fault+0x212/0x340
+[ 7340.434697] ? do_user_addr_fault+0x2b4/0x7b0
+[ 7340.435271] ? clear_bhb_loop+0x30/0x80
+[ 7340.435788] ? clear_bhb_loop+0x30/0x80
+[ 7340.436299] ? clear_bhb_loop+0x30/0x80
+[ 7340.436812] ? clear_bhb_loop+0x30/0x80
+[ 7340.437323] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 7340.437973] RIP: 0033:0x7f4161b14169
+[ 7340.438468] Code: Unable to access opcode bytes at 0x7f4161b1413f.
+[ 7340.439242] RSP: 002b:00007ffc6ebfa770 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca
+[ 7340.440173] RAX: fffffffffffffe00 RBX: 00000000000005a1 RCX: 00007f4161b14169
+[ 7340.441061] RDX: 00000000000005a1 RSI: 0000000000000109 RDI: 00007f415bfff990
+[ 7340.441943] RBP: 00007ffc6ebfa7a0 R08: 0000000000000000 R09: 00000000ffffffff
+[ 7340.442824] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+[ 7340.443707] R13: 0000000000000000 R14: 00007f415bfff990 R15: 00007f415bfff6c0
+[ 7340.444586] </TASK>
+[ 7340.444922] Modules linked in: rfkill intel_rapl_msr intel_rapl_common intel_uncore_frequency_common skx_edac_common nfit libnvdimm kvm_intel vfat fat kvm snd_pcm irqbypass rapl iTCO_wdt snd_timer intel_pmc_bxt iTCO_vendor_support snd ixgbevf virtio_net soundcore i2c_i801 pcspkr libeth_xdp net_failover i2c_smbus lpc_ich failover libeth virtio_balloon joydev 9p fuse loop zram lz4hc_compress lz4_compress 9pnet_virtio 9pnet netfs ghash_clmulni_intel serio_raw qemu_fw_cfg
+[ 7340.449650] ---[ end trace 0000000000000000 ]---
+
+The issue can be fixed in all in-tree drivers, but we cannot just trust OOT
+drivers to not do this. Therefore, make tailroom a signed int and produce a
+warning when it is negative to prevent such mistakes in the future.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-10-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 2482c5d162f5f..1f96c3aa01cad 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4154,13 +4154,14 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+ struct xdp_rxq_info *rxq = xdp->rxq;
+- unsigned int tailroom;
++ int tailroom;
+
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+ tailroom = rxq->frag_size - skb_frag_size(frag) -
+ skb_frag_off(frag) % rxq->frag_size;
++ WARN_ON_ONCE(tailroom < 0);
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From d0c00f29f5f462a9993a945b7a8334565c0ac693 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:42 +0100
+Subject: xdp: use modulo operation to calculate XDP frag tailroom
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 88b6b7f7b216108a09887b074395fa7b751880b1 ]
+
+The current formula for calculating XDP tailroom in mbuf packets works only
+if each frag has its own page (if rxq->frag_size is PAGE_SIZE), this
+defeats the purpose of the parameter overall and without any indication
+leads to negative calculated tailroom on at least half of frags, if shared
+pages are used.
+
+There are not many drivers that set rxq->frag_size. Among them:
+* i40e and enetc always split page uniformly between frags, use shared
+ pages
+* ice uses page_pool frags via libeth, those are power-of-2 and uniformly
+ distributed across page
+* idpf has variable frag_size with XDP on, so current API is not applicable
+* mlx5, mtk and mvneta use PAGE_SIZE or 0 as frag_size for page_pool
+
+As for AF_XDP ZC, only ice, i40e and idpf declare frag_size for it. Modulo
+operation yields good results for aligned chunks, they are all power-of-2,
+between 2K and PAGE_SIZE. Formula without modulo fails when chunk_size is
+2K. Buffers in unaligned mode are not distributed uniformly, so modulo
+operation would not work.
+
+To accommodate unaligned buffers, we could define frag_size as
+data + tailroom, and hence do not subtract offset when calculating
+tailroom, but this would necessitate more changes in the drivers.
+
+Define rxq->frag_size as an even portion of a page that fully belongs to a
+single frag. When calculating tailroom, locate the data start within such
+portion by performing a modulo operation on page offset.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-2-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 182a7388e84f5..2482c5d162f5f 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4159,7 +4159,8 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+- tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag);
++ tailroom = rxq->frag_size - skb_frag_size(frag) -
++ skb_frag_off(frag) % rxq->frag_size;
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From b2e7edbd0a18d146d35375f1297d07afc048b32f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 09:37:11 +0000
+Subject: xen/acpi-processor: fix _CST detection using undersized evaluation
+ buffer
+
+From: David Thomson <dt@linux-mail.net>
+
+[ Upstream commit 8b57227d59a86fc06d4f09de08f98133680f2cae ]
+
+read_acpi_id() attempts to evaluate _CST using a stack buffer of
+sizeof(union acpi_object) (48 bytes), but _CST returns a nested Package
+of sub-Packages (one per C-state, each containing a register descriptor,
+type, latency, and power) requiring hundreds of bytes. The evaluation
+always fails with AE_BUFFER_OVERFLOW.
+
+On modern systems using FFH/MWAIT entry (where pblk is zero), this
+causes the function to return before setting the acpi_id_cst_present
+bit. In check_acpi_ids(), flags.power is then zero for all Phase 2 CPUs
+(physical CPUs beyond dom0's vCPU count), so push_cxx_to_hypervisor() is
+never called for them.
+
+On a system with dom0_max_vcpus=2 and 8 physical CPUs, only PCPUs 0-1
+receive C-state data. PCPUs 2-7 are stuck in C0/C1 idle, unable to
+enter C2/C3. This costs measurable wall power (4W observed on an Intel
+Core Ultra 7 265K with Xen 4.20).
+
+The function never uses the _CST return value -- it only needs to know
+whether _CST exists. Replace the broken acpi_evaluate_object() call with
+acpi_has_method(), which correctly detects _CST presence using
+acpi_get_handle() without any buffer allocation. This brings C-state
+detection to parity with the P-state path, which already works correctly
+for Phase 2 CPUs.
+
+Fixes: 59a568029181 ("xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.")
+Signed-off-by: David Thomson <dt@linux-mail.net>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260224093707.19679-1-dt@linux-mail.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xen-acpi-processor.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 2967039398463..520756159d3d3 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -379,11 +379,8 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ acpi_psd[acpi_id].domain);
+ }
+
+- status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- if (!pblk)
+- return AE_OK;
+- }
++ if (!pblk && !acpi_has_method(handle, "_CST"))
++ return AE_OK;
+ /* .. and it has a C-state */
+ __set_bit(acpi_id, acpi_id_cst_present);
+
+--
+2.51.0
+
--- /dev/null
+From edf1ffeee20e0e0ff0f4473b9540173799acf923 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:26 +0000
+Subject: xsk: Fix fragment node deletion to prevent buffer leak
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit 60abb0ac11dccd6b98fd9182bc5f85b621688861 ]
+
+After commit b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node"),
+the list_node field is reused for both the xskb pool list and the buffer
+free list, this causes a buffer leak as described below.
+
+xp_free() checks if a buffer is already on the free list using
+list_empty(&xskb->list_node). When list_del() is used to remove a node
+from the xskb pool list, it doesn't reinitialize the node pointers.
+This means list_empty() will return false even after the node has been
+removed, causing xp_free() to incorrectly skip adding the buffer to the
+free list.
+
+Fix this by using list_del_init() instead of list_del() in all fragment
+handling paths, this ensures the list node is reinitialized after removal,
+allowing the list_empty() to work correctly.
+
+Fixes: b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node")
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-2-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: f7387d6579d6 ("xsk: Fix zero-copy AF_XDP fragment drop")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 6 +++---
+ net/xdp/xsk.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 40085afd91607..27d0068d0b704 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -127,7 +127,7 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ goto out;
+
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ xp_free(pos);
+ }
+
+@@ -152,7 +152,7 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+ struct xdp_buff_xsk, list_node);
+ if (frag) {
+- list_del(&frag->list_node);
++ list_del_init(&frag->list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -163,7 +163,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+ {
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->list_node);
++ list_del_init(&xskb->list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index bbf45b68dbf5c..158c92918bc3a 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -178,7 +178,7 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ }
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 6ecf1befb9e32a6c9056ea745a42507c450039ea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:27 +0000
+Subject: xsk: Fix zero-copy AF_XDP fragment drop
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit f7387d6579d65efd490a864254101cb665f2e7a7 ]
+
+AF_XDP should ensure that only a complete packet is sent to application.
+In the zero-copy case, if the Rx queue gets full as fragments are being
+enqueued, the remaining fragments are dropped.
+
+For the multi-buffer case, add a check to ensure that the Rx queue has
+enough space for all fragments of a packet before starting to enqueue
+them.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-3-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xdp/xsk.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 158c92918bc3a..ed1aeaded9be7 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -159,25 +159,31 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ struct xdp_buff_xsk *pos, *tmp;
+ struct list_head *xskb_list;
+ u32 contd = 0;
++ u32 num_desc;
+ int err;
+
+- if (frags)
+- contd = XDP_PKT_CONTD;
++ if (likely(!frags)) {
++ err = __xsk_rcv_zc(xs, xskb, len, contd);
++ if (err)
++ goto err;
++ return 0;
++ }
+
+- err = __xsk_rcv_zc(xs, xskb, len, contd);
+- if (err)
++ contd = XDP_PKT_CONTD;
++ num_desc = xdp_get_shared_info_from_buff(xdp)->nr_frags + 1;
++ if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
++ xs->rx_queue_full++;
++ err = -ENOBUFS;
+ goto err;
+- if (likely(!frags))
+- return 0;
++ }
+
++ __xsk_rcv_zc(xs, xskb, len, contd);
+ xskb_list = &xskb->pool->xskb_list;
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+- err = __xsk_rcv_zc(xs, pos, len, contd);
+- if (err)
+- goto err;
++ __xsk_rcv_zc(xs, pos, len, contd);
+ list_del_init(&pos->list_node);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7e29783242d255e31892c786eebee681eb04350a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 14:24:53 +0200
+Subject: xsk: Get rid of xdp_buff_xsk::xskb_list_node
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit b692bf9a7543af7ad11a59d182a3757578f0ba53 ]
+
+Let's bring xdp_buff_xsk back to occupying 2 cachelines by removing
+xskb_list_node - for the purpose of gathering the xskb frags
+free_list_node can be used, head of the list (xsk_buff_pool::xskb_list)
+stays as-is, just reuse the node ptr.
+
+It is safe to do as a single xdp_buff_xsk can never reside in two
+pool's lists simultaneously.
+
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Link: https://lore.kernel.org/bpf/20241007122458.282590-2-maciej.fijalkowski@intel.com
+Stable-dep-of: f7387d6579d6 ("xsk: Fix zero-copy AF_XDP fragment drop")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 14 +++++++-------
+ include/net/xsk_buff_pool.h | 1 -
+ net/xdp/xsk.c | 4 ++--
+ net/xdp/xsk_buff_pool.c | 1 -
+ 4 files changed, 9 insertions(+), 11 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 0a5dca2b2b3f6..360bc1244c6af 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -126,8 +126,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
+- list_del(&pos->xskb_list_node);
++ list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
++ list_del(&pos->free_list_node);
+ xp_free(pos);
+ }
+
+@@ -140,7 +140,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+ {
+ struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+- list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
++ list_add_tail(&frag->free_list_node, &frag->pool->xskb_list);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+@@ -150,9 +150,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+- struct xdp_buff_xsk, xskb_list_node);
++ struct xdp_buff_xsk, free_list_node);
+ if (frag) {
+- list_del(&frag->xskb_list_node);
++ list_del(&frag->free_list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -163,7 +163,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+ {
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->xskb_list_node);
++ list_del(&xskb->free_list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+@@ -172,7 +172,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+- xskb_list_node);
++ free_list_node);
+ return &frag->xdp;
+ }
+
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index 823fd5c7a3b18..ff3ad172fffc1 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -30,7 +30,6 @@ struct xdp_buff_xsk {
+ struct xsk_buff_pool *pool;
+ u64 orig_addr;
+ struct list_head free_list_node;
+- struct list_head xskb_list_node;
+ };
+
+ #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index f031b07baa57a..c039db447d2e7 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -171,14 +171,14 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ return 0;
+
+ xskb_list = &xskb->pool->xskb_list;
+- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
++ list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->xskb_list_node);
++ list_del(&pos->free_list_node);
+ }
+
+ return 0;
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index b69dbd8615fc4..d1a9f4e9b685a 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -103,7 +103,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ xskb->pool = pool;
+ xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
+ INIT_LIST_HEAD(&xskb->free_list_node);
+- INIT_LIST_HEAD(&xskb->xskb_list_node);
+ if (pool->unaligned)
+ pool->free_heads[i] = xskb;
+ else
+--
+2.51.0
+
--- /dev/null
+From f9051d323f6c36dbb4c956434436fef773e122e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:43 +0100
+Subject: xsk: introduce helper to determine rxq->frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 16394d80539937d348dd3b9ea32415c54e67a81b ]
+
+rxq->frag_size is basically a step between consecutive strictly aligned
+frames. In ZC mode, chunk size fits exactly, but if chunks are unaligned,
+there is no safe way to determine accessible space to grow tailroom.
+
+Report frag_size to be zero, if chunks are unaligned, chunk_size otherwise.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-3-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 27d0068d0b704..997e28dd38963 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -47,6 +47,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+@@ -296,6 +301,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return 0;
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return 0;
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+--
+2.51.0
+
--- /dev/null
+From c8cce00bc798e107e921ffd3e50e1b63a1854f88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 14:24:54 +0200
+Subject: xsk: s/free_list_node/list_node/
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 30ec2c1baaead43903ad63ff8e3083949059083c ]
+
+Now that free_list_node's purpose is two-folded, make it just a
+'list_node'.
+
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Link: https://lore.kernel.org/bpf/20241007122458.282590-3-maciej.fijalkowski@intel.com
+Stable-dep-of: f7387d6579d6 ("xsk: Fix zero-copy AF_XDP fragment drop")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 14 +++++++-------
+ include/net/xsk_buff_pool.h | 2 +-
+ net/xdp/xsk.c | 4 ++--
+ net/xdp/xsk_buff_pool.c | 14 +++++++-------
+ 4 files changed, 17 insertions(+), 17 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 360bc1244c6af..40085afd91607 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -126,8 +126,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+- list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
+- list_del(&pos->free_list_node);
++ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
++ list_del(&pos->list_node);
+ xp_free(pos);
+ }
+
+@@ -140,7 +140,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+ {
+ struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+- list_add_tail(&frag->free_list_node, &frag->pool->xskb_list);
++ list_add_tail(&frag->list_node, &frag->pool->xskb_list);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+@@ -150,9 +150,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+- struct xdp_buff_xsk, free_list_node);
++ struct xdp_buff_xsk, list_node);
+ if (frag) {
+- list_del(&frag->free_list_node);
++ list_del(&frag->list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -163,7 +163,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+ {
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->free_list_node);
++ list_del(&xskb->list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+@@ -172,7 +172,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+- free_list_node);
++ list_node);
+ return &frag->xdp;
+ }
+
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index ff3ad172fffc1..e21062cf62294 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -29,7 +29,7 @@ struct xdp_buff_xsk {
+ dma_addr_t frame_dma;
+ struct xsk_buff_pool *pool;
+ u64 orig_addr;
+- struct list_head free_list_node;
++ struct list_head list_node;
+ };
+
+ #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index c039db447d2e7..bbf45b68dbf5c 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -171,14 +171,14 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ return 0;
+
+ xskb_list = &xskb->pool->xskb_list;
+- list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
++ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->free_list_node);
++ list_del(&pos->list_node);
+ }
+
+ return 0;
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index d1a9f4e9b685a..9db08365fcb00 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -102,7 +102,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ xskb = &pool->heads[i];
+ xskb->pool = pool;
+ xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
+- INIT_LIST_HEAD(&xskb->free_list_node);
++ INIT_LIST_HEAD(&xskb->list_node);
+ if (pool->unaligned)
+ pool->free_heads[i] = xskb;
+ else
+@@ -549,8 +549,8 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
+ } else {
+ pool->free_list_cnt--;
+ xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
+- free_list_node);
+- list_del_init(&xskb->free_list_node);
++ list_node);
++ list_del_init(&xskb->list_node);
+ }
+
+ xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
+@@ -616,8 +616,8 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
+
+ i = nb_entries;
+ while (i--) {
+- xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
+- list_del_init(&xskb->free_list_node);
++ xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, list_node);
++ list_del_init(&xskb->list_node);
+
+ *xdp = &xskb->xdp;
+ xdp++;
+@@ -687,11 +687,11 @@ EXPORT_SYMBOL(xp_can_alloc);
+
+ void xp_free(struct xdp_buff_xsk *xskb)
+ {
+- if (!list_empty(&xskb->free_list_node))
++ if (!list_empty(&xskb->list_node))
+ return;
+
+ xskb->pool->free_list_cnt++;
+- list_add(&xskb->free_list_node, &xskb->pool->free_list);
++ list_add(&xskb->list_node, &xskb->pool->free_list);
+ }
+ EXPORT_SYMBOL(xp_free);
+
+--
+2.51.0
+
--- /dev/null
+From 6a64e213fb538c46a95ea84974afdc258e13c565 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 22:37:53 +0530
+Subject: amd-xgbe: fix MAC_TCR_SS register width for 2.5G and 10M speeds
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 9439a661c2e80485406ce2c90b107ca17858382d ]
+
+Extend the MAC_TCR_SS (Speed Select) register field width from 2 bits
+to 3 bits to properly support all speed settings.
+
+The MAC_TCR register's SS field encoding requires 3 bits to represent
+all supported speeds:
+ - 0x00: 10Gbps (XGMII)
+ - 0x02: 2.5Gbps (GMII) / 100Mbps
+ - 0x03: 1Gbps / 10Mbps
+ - 0x06: 2.5Gbps (XGMII) - P100a only
+
+With only 2 bits, values 0x04-0x07 cannot be represented, which breaks
+2.5G XGMII mode on newer platforms and causes incorrect speed select
+values to be programmed.
+
+Fixes: 07445f3c7ca1 ("amd-xgbe: Add support for 10 Mbps speed")
+Co-developed-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260226170753.250312-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 62b01de93db49..826c5caa70d71 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -431,7 +431,7 @@
+ #define MAC_SSIR_SSINC_INDEX 16
+ #define MAC_SSIR_SSINC_WIDTH 8
+ #define MAC_TCR_SS_INDEX 29
+-#define MAC_TCR_SS_WIDTH 2
++#define MAC_TCR_SS_WIDTH 3
+ #define MAC_TCR_TE_INDEX 0
+ #define MAC_TCR_TE_WIDTH 1
+ #define MAC_TCR_VNE_INDEX 24
+--
+2.51.0
+
--- /dev/null
+From f7f8eded9905671436ddc153cb9d8f5938da2955 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 09:51:24 +0530
+Subject: amd-xgbe: fix sleep while atomic on suspend/resume
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit e2f27363aa6d983504c6836dd0975535e2e9dba0 ]
+
+The xgbe_powerdown() and xgbe_powerup() functions use spinlocks
+(spin_lock_irqsave) while calling functions that may sleep:
+- napi_disable() can sleep waiting for NAPI polling to complete
+- flush_workqueue() can sleep waiting for pending work items
+
+This causes a "BUG: scheduling while atomic" error during suspend/resume
+cycles on systems using the AMD XGBE Ethernet controller.
+
+The spinlock protection in these functions is unnecessary as these
+functions are called from suspend/resume paths which are already serialized
+by the PM core
+
+Fix this by removing the spinlock. Since only code that takes this lock
+is xgbe_powerdown() and xgbe_powerup(), remove it completely.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260302042124.1386445-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ----------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 -
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ---
+ 3 files changed, 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index ba5e728ae6308..89ece3dbd773a 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1089,7 +1089,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+@@ -1100,8 +1099,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+@@ -1117,8 +1114,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+
+ pdata->power_down = 1;
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+@@ -1128,7 +1123,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+@@ -1139,8 +1133,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+@@ -1155,8 +1147,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ xgbe_start_timers(pdata);
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index d1f0419edb234..7d45ea22a02e2 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -76,7 +76,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+- spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index e8bbb68059013..6fec51a065e22 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1003,9 +1003,6 @@ struct xgbe_prv_data {
+ unsigned int pp3;
+ unsigned int pp4;
+
+- /* Overall device lock */
+- spinlock_t lock;
+-
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+--
+2.51.0
+
--- /dev/null
+From 48d86c4ec074c406239cca0a95e4bf7635bd6fd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 18:48:05 -0800
+Subject: ata: libata-eh: Fix detection of deferred qc timeouts
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit ee0e6e69a772d601e152e5368a1da25d656122a8 ]
+
+If the ata_qc_for_each_raw() loop finishes without finding a matching SCSI
+command for any QC, the variable qc will hold a pointer to the last element
+examined, which has the tag i == ATA_MAX_QUEUE - 1. This qc can match the
+port deferred QC (ap->deferred_qc).
+
+If that happens, the condition qc == ap->deferred_qc evaluates to true
+despite the loop not breaking with a match on the SCSI command for this QC.
+In that case, the error handler mistakenly intercepts a command that has
+not been issued yet and that has not timed out, and thus erroneously
+returning a timeout error.
+
+Fix the problem by checking for i < ATA_MAX_QUEUE in addition to
+qc == ap->deferred_qc.
+
+The problem was found by an experimental code review agent based on
+gemini-3.1-pro while reviewing backports into v6.18.y.
+
+Assisted-by: Gemini:gemini-3.1-pro
+Fixes: eddb98ad9364 ("ata: libata-eh: correctly handle deferred qc timeouts")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+[cassel: modified commit log as suggested by Damien]
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-eh.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index b373cceb95d23..44fddfbb76296 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -647,7 +647,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ break;
+ }
+
+- if (qc == ap->deferred_qc) {
++ if (i < ATA_MAX_QUEUE && qc == ap->deferred_qc) {
+ /*
+ * This is a deferred command that timed out while
+ * waiting for the command queue to drain. Since the qc
+--
+2.51.0
+
--- /dev/null
+From ed1fef2fd7017693914a5c4a27d9dfd8bfe20c99 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:32:40 +0800
+Subject: atm: lec: fix null-ptr-deref in lec_arp_clear_vccs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 101bacb303e89dc2e0640ae6a5e0fb97c4eb45bb ]
+
+syzkaller reported a null-ptr-deref in lec_arp_clear_vccs().
+This issue can be easily reproduced using the syzkaller reproducer.
+
+In the ATM LANE (LAN Emulation) module, the same atm_vcc can be shared by
+multiple lec_arp_table entries (e.g., via entry->vcc or entry->recv_vcc).
+When the underlying VCC is closed, lec_vcc_close() iterates over all
+ARP entries and calls lec_arp_clear_vccs() for each matched entry.
+
+For example, when lec_vcc_close() iterates through the hlists in
+priv->lec_arp_empty_ones or other ARP tables:
+
+1. In the first iteration, for the first matched ARP entry sharing the VCC,
+lec_arp_clear_vccs() frees the associated vpriv (which is vcc->user_back)
+and sets vcc->user_back to NULL.
+2. In the second iteration, for the next matched ARP entry sharing the same
+VCC, lec_arp_clear_vccs() is called again. It obtains a NULL vpriv from
+vcc->user_back (via LEC_VCC_PRIV(vcc)) and then attempts to dereference it
+via `vcc->pop = vpriv->old_pop`, leading to a null-ptr-deref crash.
+
+Fix this by adding a null check for vpriv before dereferencing
+it. If vpriv is already NULL, it means the VCC has been cleared
+by a previous call, so we can safely skip the cleanup and just
+clear the entry's vcc/recv_vcc pointers.
+
+The entire cleanup block (including vcc_release_async()) is placed inside
+the vpriv guard because a NULL vpriv indicates the VCC has already been
+fully released by a prior iteration — repeating the teardown would
+redundantly set flags and trigger callbacks on an already-closing socket.
+
+The Fixes tag points to the initial commit because the entry->vcc path has
+been vulnerable since the original code. The entry->recv_vcc path was later
+added by commit 8d9f73c0ad2f ("atm: fix a memory leak of vcc->user_back")
+with the same pattern, and both paths are fixed here.
+
+Reported-by: syzbot+72e3ea390c305de0e259@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68c95a83.050a0220.3c6139.0e5c.GAE@google.com/T/
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260225123250.189289-1-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index afb8d3eb21850..c39dc5d367979 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1260,24 +1260,28 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+ struct net_device *dev = (struct net_device *)vcc->proto_data;
+
+- vcc->pop = vpriv->old_pop;
+- if (vpriv->xoff)
+- netif_wake_queue(dev);
+- kfree(vpriv);
+- vcc->user_back = NULL;
+- vcc->push = entry->old_push;
+- vcc_release_async(vcc, -EPIPE);
++ if (vpriv) {
++ vcc->pop = vpriv->old_pop;
++ if (vpriv->xoff)
++ netif_wake_queue(dev);
++ kfree(vpriv);
++ vcc->user_back = NULL;
++ vcc->push = entry->old_push;
++ vcc_release_async(vcc, -EPIPE);
++ }
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+- kfree(vpriv);
+- vcc->user_back = NULL;
++ if (vpriv) {
++ kfree(vpriv);
++ vcc->user_back = NULL;
+
+- entry->recv_vcc->push = entry->old_recv_push;
+- vcc_release_async(entry->recv_vcc, -EPIPE);
++ entry->recv_vcc->push = entry->old_recv_push;
++ vcc_release_async(entry->recv_vcc, -EPIPE);
++ }
+ entry->recv_vcc = NULL;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 8b1e9cb4174dc14893123631c37efc1f4529fbeb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 11:15:50 +0800
+Subject: block: use trylock to avoid lockdep circular dependency in sysfs
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit ce8ee8583ed83122405eabaa8fb351be4d9dc65c ]
+
+Use trylock instead of blocking lock acquisition for update_nr_hwq_lock
+in queue_requests_store() and elv_iosched_store() to avoid circular lock
+dependency with kernfs active reference during concurrent disk deletion:
+
+ update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
+ kn->active -> update_nr_hwq_lock (via sysfs write path)
+
+Return -EBUSY when the lock is not immediately available.
+
+Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
+Closes: https://lore.kernel.org/linux-block/CAHj4cs-em-4acsHabMdT=jJhXkCzjnprD-aQH1OgrZo4nTnmMw@mail.gmail.com/
+Fixes: 626ff4f8ebcb ("blk-mq: convert to serialize updating nr_requests with update_nr_hwq_lock")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Tested-by: Yi Zhang <yi.zhang@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-sysfs.c | 8 +++++++-
+ block/elevator.c | 12 +++++++++++-
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e0a70d26972b3..af12526d866a9 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -78,8 +78,14 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
+ /*
+ * Serialize updating nr_requests with concurrent queue_requests_store()
+ * and switching elevator.
++ *
++ * Use trylock to avoid circular lock dependency with kernfs active
++ * reference during concurrent disk deletion:
++ * update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
++ * kn->active -> update_nr_hwq_lock (via this sysfs write path)
+ */
+- down_write(&set->update_nr_hwq_lock);
++ if (!down_write_trylock(&set->update_nr_hwq_lock))
++ return -EBUSY;
+
+ if (nr == q->nr_requests)
+ goto unlock;
+diff --git a/block/elevator.c b/block/elevator.c
+index a2f8b2251dc6e..7a97998cd8bd7 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -806,7 +806,16 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
+ elv_iosched_load_module(ctx.name);
+ ctx.type = elevator_find_get(ctx.name);
+
+- down_read(&set->update_nr_hwq_lock);
++ /*
++ * Use trylock to avoid circular lock dependency with kernfs active
++ * reference during concurrent disk deletion:
++ * update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
++ * kn->active -> update_nr_hwq_lock (via this sysfs write path)
++ */
++ if (!down_read_trylock(&set->update_nr_hwq_lock)) {
++ ret = -EBUSY;
++ goto out;
++ }
+ if (!blk_queue_no_elv_switch(q)) {
+ ret = elevator_change(q, &ctx);
+ if (!ret)
+@@ -816,6 +825,7 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
+ }
+ up_read(&set->update_nr_hwq_lock);
+
++out:
+ if (ctx.type)
+ elevator_put(ctx.type);
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From c0422d2506d23fb1f6e370c6269be9155efa19a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:03:01 +0800
+Subject: bpf/bonding: reject vlan+srcmac xmit_hash_policy change when XDP is
+ loaded
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 479d589b40b836442bbdadc3fdb37f001bb67f26 ]
+
+bond_option_mode_set() already rejects mode changes that would make a
+loaded XDP program incompatible via bond_xdp_check(). However,
+bond_option_xmit_hash_policy_set() has no such guard.
+
+For 802.3ad and balance-xor modes, bond_xdp_check() returns false when
+xmit_hash_policy is vlan+srcmac, because the 802.1q payload is usually
+absent due to hardware offload. This means a user can:
+
+1. Attach a native XDP program to a bond in 802.3ad/balance-xor mode
+ with a compatible xmit_hash_policy (e.g. layer2+3).
+2. Change xmit_hash_policy to vlan+srcmac while XDP remains loaded.
+
+This leaves bond->xdp_prog set but bond_xdp_check() now returning false
+for the same device. When the bond is later destroyed, dev_xdp_uninstall()
+calls bond_xdp_set(dev, NULL, NULL) to remove the program, which hits
+the bond_xdp_check() guard and returns -EOPNOTSUPP, triggering:
+
+WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL))
+
+Fix this by rejecting xmit_hash_policy changes to vlan+srcmac when an
+XDP program is loaded on a bond in 802.3ad or balance-xor mode.
+
+commit 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+introduced bond_xdp_check() which returns false for 802.3ad/balance-xor
+modes when xmit_hash_policy is vlan+srcmac. The check was wired into
+bond_xdp_set() to reject XDP attachment with an incompatible policy, but
+the symmetric path -- preventing xmit_hash_policy from being changed to an
+incompatible value after XDP is already loaded -- was left unguarded in
+bond_option_xmit_hash_policy_set().
+
+Note:
+commit 094ee6017ea0 ("bonding: check xdp prog when set bond mode")
+later added a similar guard to bond_option_mode_set(), but
+bond_option_xmit_hash_policy_set() remained unprotected.
+
+Reported-by: syzbot+5a287bcdc08104bc3132@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/6995aff6.050a0220.2eeac1.014e.GAE@google.com/T/
+Fixes: 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260226080306.98766-2-jiayuan.chen@linux.dev
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 9 +++++++--
+ drivers/net/bonding/bond_options.c | 2 ++
+ include/net/bonding.h | 1 +
+ 3 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index dba8f68690947..55f98d6254af8 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -324,7 +324,7 @@ static bool bond_sk_check(struct bonding *bond)
+ }
+ }
+
+-bool bond_xdp_check(struct bonding *bond, int mode)
++bool __bond_xdp_check(int mode, int xmit_policy)
+ {
+ switch (mode) {
+ case BOND_MODE_ROUNDROBIN:
+@@ -335,7 +335,7 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
+ * payload is not in the packet due to hardware offload.
+ */
+- if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
++ if (xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return true;
+ fallthrough;
+ default:
+@@ -343,6 +343,11 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ }
+ }
+
++bool bond_xdp_check(struct bonding *bond, int mode)
++{
++ return __bond_xdp_check(mode, bond->params.xmit_policy);
++}
++
+ /*---------------------------------- VLAN -----------------------------------*/
+
+ /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index f1c6e9d8f6167..adc216df43459 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1574,6 +1574,8 @@ static int bond_option_fail_over_mac_set(struct bonding *bond,
+ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+ {
++ if (bond->xdp_prog && !__bond_xdp_check(BOND_MODE(bond), newval->value))
++ return -EOPNOTSUPP;
+ netdev_dbg(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.xmit_policy = newval->value;
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 4620784035570..99c1bdadcd11a 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -698,6 +698,7 @@ void bond_debug_register(struct bonding *bond);
+ void bond_debug_unregister(struct bonding *bond);
+ void bond_debug_reregister(struct bonding *bond);
+ const char *bond_mode_name(int mode);
++bool __bond_xdp_check(int mode, int xmit_policy);
+ bool bond_xdp_check(struct bonding *bond, int mode);
+ void bond_setup(struct net_device *bond_dev);
+ unsigned int bond_get_num_tx_queues(void);
+--
+2.51.0
+
--- /dev/null
+From 703d2d48fc856d524096d76ff10fe2f0c933de02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2026 16:02:47 -0800
+Subject: bpf: collect only live registers in linked regs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eduard Zingerman <eddyz87@gmail.com>
+
+[ Upstream commit 2658a1720a1944fbaeda937000ad2b3c3dfaf1bb ]
+
+Fix an inconsistency between func_states_equal() and
+collect_linked_regs():
+- regsafe() uses check_ids() to verify that cached and current states
+ have identical register id mapping.
+- func_states_equal() calls regsafe() only for registers computed as
+ live by compute_live_registers().
+- clean_live_states() is supposed to remove dead registers from cached
+ states, but it can skip states belonging to an iterator-based loop.
+- collect_linked_regs() collects all registers sharing the same id,
+ ignoring the marks computed by compute_live_registers().
+ Linked registers are stored in the state's jump history.
+- backtrack_insn() marks all linked registers for an instruction
+ as precise whenever one of the linked registers is precise.
+
+The above might lead to a scenario:
+- There is an instruction I with register rY known to be dead at I.
+- Instruction I is reached via two paths: first A, then B.
+- On path A:
+ - There is an id link between registers rX and rY.
+ - Checkpoint C is created at I.
+ - Linked register set {rX, rY} is saved to the jump history.
+ - rX is marked as precise at I, causing both rX and rY
+ to be marked precise at C.
+- On path B:
+ - There is no id link between registers rX and rY,
+ otherwise register states are sub-states of those in C.
+ - Because rY is dead at I, check_ids() returns true.
+ - Current state is considered equal to checkpoint C,
+ propagate_precision() propagates spurious precision
+ mark for register rY along the path B.
+ - Depending on a program, this might hit verifier_bug()
+ in the backtrack_insn(), e.g. if rY ∈ [r1..r5]
+ and backtrack_insn() spots a function call.
+
+The reproducer program is in the next patch.
+This was hit by sched_ext scx_lavd scheduler code.
+
+Changes in tests:
+- verifier_scalar_ids.c selftests need modification to preserve
+ some registers as live for __msg() checks.
+- exceptions_assert.c adjusted to match changes in the verifier log,
+ R0 is dead after conditional instruction and thus does not get
+ range.
+- precise.c adjusted to match changes in the verifier log, register r9
+ is dead after comparison and it's range is not important for test.
+
+Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
+Fixes: 0fb3cf6110a5 ("bpf: use register liveness information for func_states_equal")
+Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/r/20260306-linked-regs-and-propagate-precision-v1-1-18e859be570d@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 13 ++++-
+ .../selftests/bpf/progs/exceptions_assert.c | 34 +++++------
+ .../selftests/bpf/progs/verifier_scalar_ids.c | 56 ++++++++++++++-----
+ .../testing/selftests/bpf/verifier/precise.c | 8 +--
+ 4 files changed, 73 insertions(+), 38 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index e37ff28e3cd9d..74d645add518d 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -16783,17 +16783,24 @@ static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_st
+ * in verifier state, save R in linked_regs if R->id == id.
+ * If there are too many Rs sharing same id, reset id for leftover Rs.
+ */
+-static void collect_linked_regs(struct bpf_verifier_state *vstate, u32 id,
++static void collect_linked_regs(struct bpf_verifier_env *env,
++ struct bpf_verifier_state *vstate,
++ u32 id,
+ struct linked_regs *linked_regs)
+ {
++ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ struct bpf_func_state *func;
+ struct bpf_reg_state *reg;
++ u16 live_regs;
+ int i, j;
+
+ id = id & ~BPF_ADD_CONST;
+ for (i = vstate->curframe; i >= 0; i--) {
++ live_regs = aux[frame_insn_idx(vstate, i)].live_regs_before;
+ func = vstate->frame[i];
+ for (j = 0; j < BPF_REG_FP; j++) {
++ if (!(live_regs & BIT(j)))
++ continue;
+ reg = &func->regs[j];
+ __collect_linked_regs(linked_regs, reg, id, i, j, true);
+ }
+@@ -16999,9 +17006,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ * if parent state is created.
+ */
+ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id)
+- collect_linked_regs(this_branch, src_reg->id, &linked_regs);
++ collect_linked_regs(env, this_branch, src_reg->id, &linked_regs);
+ if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
+- collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
++ collect_linked_regs(env, this_branch, dst_reg->id, &linked_regs);
+ if (linked_regs.cnt > 1) {
+ err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
+ if (err)
+diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
+index a01c2736890f9..858af5988a38a 100644
+--- a/tools/testing/selftests/bpf/progs/exceptions_assert.c
++++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
+@@ -18,43 +18,43 @@
+ return *(u64 *)num; \
+ }
+
+-__msg(": R0=0xffffffff80000000")
++__msg("R{{.}}=0xffffffff80000000")
+ check_assert(s64, ==, eq_int_min, INT_MIN);
+-__msg(": R0=0x7fffffff")
++__msg("R{{.}}=0x7fffffff")
+ check_assert(s64, ==, eq_int_max, INT_MAX);
+-__msg(": R0=0")
++__msg("R{{.}}=0")
+ check_assert(s64, ==, eq_zero, 0);
+-__msg(": R0=0x8000000000000000 R1=0x8000000000000000")
++__msg("R{{.}}=0x8000000000000000")
+ check_assert(s64, ==, eq_llong_min, LLONG_MIN);
+-__msg(": R0=0x7fffffffffffffff R1=0x7fffffffffffffff")
++__msg("R{{.}}=0x7fffffffffffffff")
+ check_assert(s64, ==, eq_llong_max, LLONG_MAX);
+
+-__msg(": R0=scalar(id=1,smax=0x7ffffffe)")
++__msg("R{{.}}=scalar(id=1,smax=0x7ffffffe)")
+ check_assert(s64, <, lt_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+ check_assert(s64, <, lt_zero, 0);
+-__msg(": R0=scalar(id=1,smax=0xffffffff7fffffff")
++__msg("R{{.}}=scalar(id=1,smax=0xffffffff7fffffff")
+ check_assert(s64, <, lt_neg, INT_MIN);
+
+-__msg(": R0=scalar(id=1,smax=0x7fffffff)")
++__msg("R{{.}}=scalar(id=1,smax=0x7fffffff)")
+ check_assert(s64, <=, le_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smax=0)")
++__msg("R{{.}}=scalar(id=1,smax=0)")
+ check_assert(s64, <=, le_zero, 0);
+-__msg(": R0=scalar(id=1,smax=0xffffffff80000000")
++__msg("R{{.}}=scalar(id=1,smax=0xffffffff80000000")
+ check_assert(s64, <=, le_neg, INT_MIN);
+
+-__msg(": R0=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >, gt_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >, gt_zero, 0);
+-__msg(": R0=scalar(id=1,smin=0xffffffff80000001")
++__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000001")
+ check_assert(s64, >, gt_neg, INT_MIN);
+
+-__msg(": R0=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >=, ge_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >=, ge_zero, 0);
+-__msg(": R0=scalar(id=1,smin=0xffffffff80000000")
++__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000000")
+ check_assert(s64, >=, ge_neg, INT_MIN);
+
+ SEC("?tc")
+diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+index c0ce690ddb68a..1fdd85b4b8443 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
++++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+@@ -40,6 +40,9 @@ __naked void linked_regs_bpf_k(void)
+ */
+ "r3 = r10;"
+ "r3 += r0;"
++ /* Mark r1 and r2 as alive. */
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -73,6 +76,9 @@ __naked void linked_regs_bpf_x_src(void)
+ */
+ "r4 = r10;"
+ "r4 += r0;"
++ /* Mark r1 and r2 as alive. */
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -106,6 +112,10 @@ __naked void linked_regs_bpf_x_dst(void)
+ */
+ "r4 = r10;"
+ "r4 += r3;"
++ /* Mark r1 and r2 as alive. */
++ "r0 = r0;"
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -143,6 +153,9 @@ __naked void linked_regs_broken_link(void)
+ */
+ "r3 = r10;"
+ "r3 += r0;"
++ /* Mark r1 and r2 as alive. */
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -156,16 +169,16 @@ __naked void linked_regs_broken_link(void)
+ */
+ SEC("socket")
+ __success __log_level(2)
+-__msg("12: (0f) r2 += r1")
++__msg("17: (0f) r2 += r1")
+ /* Current state */
+-__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ")
+-__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10")
++__msg("frame2: last_idx 17 first_idx 14 subseq_idx -1 ")
++__msg("frame2: regs=r1 stack= before 16: (bf) r2 = r10")
+ __msg("frame2: parent state regs=r1 stack=")
+ __msg("frame1: parent state regs= stack=")
+ __msg("frame0: parent state regs= stack=")
+ /* Parent state */
+-__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ")
+-__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
++__msg("frame2: last_idx 13 first_idx 13 subseq_idx 14 ")
++__msg("frame2: regs=r1 stack= before 13: (25) if r1 > 0x7 goto pc+0")
+ __msg("frame2: parent state regs=r1 stack=")
+ /* frame1.r{6,7} are marked because mark_precise_scalar_ids()
+ * looks for all registers with frame2.r1.id in the current state
+@@ -173,20 +186,20 @@ __msg("frame2: parent state regs=r1 stack=")
+ __msg("frame1: parent state regs=r6,r7 stack=")
+ __msg("frame0: parent state regs=r6 stack=")
+ /* Parent state */
+-__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
+-__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
++__msg("frame2: last_idx 9 first_idx 9 subseq_idx 13")
++__msg("frame2: regs=r1 stack= before 9: (85) call pc+3")
+ /* frame1.r1 is marked because of backtracking of call instruction */
+ __msg("frame1: parent state regs=r1,r6,r7 stack=")
+ __msg("frame0: parent state regs=r6 stack=")
+ /* Parent state */
+-__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
+-__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
+-__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
++__msg("frame1: last_idx 8 first_idx 7 subseq_idx 9")
++__msg("frame1: regs=r1,r6,r7 stack= before 8: (bf) r7 = r1")
++__msg("frame1: regs=r1,r6 stack= before 7: (bf) r6 = r1")
+ __msg("frame1: parent state regs=r1 stack=")
+ __msg("frame0: parent state regs=r6 stack=")
+ /* Parent state */
+-__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
+-__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
++__msg("frame1: last_idx 4 first_idx 4 subseq_idx 7")
++__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
+ __msg("frame0: parent state regs=r1,r6 stack=")
+ /* Parent state */
+ __msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
+@@ -204,6 +217,7 @@ __naked void precision_many_frames(void)
+ "r1 = r0;"
+ "r6 = r0;"
+ "call precision_many_frames__foo;"
++ "r6 = r6;" /* mark r6 as live */
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+@@ -220,6 +234,8 @@ void precision_many_frames__foo(void)
+ "r6 = r1;"
+ "r7 = r1;"
+ "call precision_many_frames__bar;"
++ "r6 = r6;" /* mark r6 as live */
++ "r7 = r7;" /* mark r7 as live */
+ "exit"
+ ::: __clobber_all);
+ }
+@@ -229,6 +245,8 @@ void precision_many_frames__bar(void)
+ {
+ asm volatile (
+ "if r1 > 7 goto +0;"
++ "r6 = 0;" /* mark r6 as live */
++ "r7 = 0;" /* mark r7 as live */
+ /* force r1 to be precise, this eventually marks:
+ * - bar frame r1
+ * - foo frame r{1,6,7}
+@@ -340,6 +358,8 @@ __naked void precision_two_ids(void)
+ "r3 += r7;"
+ /* force r9 to be precise, this also marks r8 */
+ "r3 += r9;"
++ "r6 = r6;" /* mark r6 as live */
++ "r8 = r8;" /* mark r8 as live */
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+@@ -353,7 +373,7 @@ __flag(BPF_F_TEST_STATE_FREQ)
+ * collect_linked_regs() can't tie more than 6 registers for a single insn.
+ */
+ __msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
+-__msg("9: (bf) r6 = r6 ; R6=scalar(id=2")
++__msg("14: (bf) r6 = r6 ; R6=scalar(id=2")
+ /* check that r{0-5} are marked precise after 'if' */
+ __msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
+ __msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
+@@ -372,6 +392,12 @@ __naked void linked_regs_too_many_regs(void)
+ "r6 = r0;"
+ /* propagate range for r{0-6} */
+ "if r0 > 7 goto +0;"
++ /* keep r{1-5} live */
++ "r1 = r1;"
++ "r2 = r2;"
++ "r3 = r3;"
++ "r4 = r4;"
++ "r5 = r5;"
+ /* make r6 appear in the log */
+ "r6 = r6;"
+ /* force r0 to be precise,
+@@ -517,7 +543,7 @@ __naked void check_ids_in_regsafe_2(void)
+ "*(u64*)(r10 - 8) = r1;"
+ /* r9 = pointer to stack */
+ "r9 = r10;"
+- "r9 += -8;"
++ "r9 += -16;"
+ /* r8 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r8 = r0;"
+@@ -538,6 +564,8 @@ __naked void check_ids_in_regsafe_2(void)
+ "if r7 > 4 goto l2_%=;"
+ /* Access memory at r9[r6] */
+ "r9 += r6;"
++ "r9 += r7;"
++ "r9 += r8;"
+ "r0 = *(u8*)(r9 + 0);"
+ "l2_%=:"
+ "r0 = 0;"
+diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
+index 59a020c356474..ef3ec56672c22 100644
+--- a/tools/testing/selftests/bpf/verifier/precise.c
++++ b/tools/testing/selftests/bpf/verifier/precise.c
+@@ -44,9 +44,9 @@
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: regs=r2 stack= before 20\
+- mark_precise: frame0: parent state regs=r2,r9 stack=:\
++ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 10\
+- mark_precise: frame0: regs=r2,r9 stack= before 19\
++ mark_precise: frame0: regs=r2 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: regs=r0,r9 stack= before 15\
+@@ -107,9 +107,9 @@
+ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 20 first_idx 20\
+ mark_precise: frame0: regs=r2 stack= before 20\
+- mark_precise: frame0: parent state regs=r2,r9 stack=:\
++ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 17\
+- mark_precise: frame0: regs=r2,r9 stack= before 19\
++ mark_precise: frame0: regs=r2 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: parent state regs= stack=:",
+--
+2.51.0
+
--- /dev/null
+From 640f58371d1a043630b303bde82051f393e82381 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 17:52:17 +0800
+Subject: bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim
+
+From: Lang Xu <xulang@uniontech.com>
+
+[ Upstream commit 56145d237385ca0e7ca9ff7b226aaf2eb8ef368b ]
+
+The root cause of this bug is that when 'bpf_link_put' reduces the
+refcount of 'shim_link->link.link' to zero, the resource is considered
+released but may still be referenced via 'tr->progs_hlist' in
+'cgroup_shim_find'. The actual cleanup of 'tr->progs_hlist' in
+'bpf_shim_tramp_link_release' is deferred. During this window, another
+process can cause a use-after-free via 'bpf_trampoline_link_cgroup_shim'.
+
+Based on Martin KaFai Lau's suggestions, I have created a simple patch.
+
+To fix this:
+ Add an atomic non-zero check in 'bpf_trampoline_link_cgroup_shim'.
+ Only increment the refcount if it is not already zero.
+
+Testing:
+ I verified the fix by adding a delay in
+ 'bpf_shim_tramp_link_release' to make the bug easier to trigger:
+
+static void bpf_shim_tramp_link_release(struct bpf_link *link)
+{
+ /* ... */
+ if (!shim_link->trampoline)
+ return;
+
++ msleep(100);
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link,
+ shim_link->trampoline, NULL));
+ bpf_trampoline_put(shim_link->trampoline);
+}
+
+Before the patch, running a PoC easily reproduced the crash(almost 100%)
+with a call trace similar to KaiyanM's report.
+After the patch, the bug no longer occurs even after millions of
+iterations.
+
+Fixes: 69fd337a975c ("bpf: per-cgroup lsm flavor")
+Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
+Closes: https://lore.kernel.org/bpf/3c4ebb0b.46ff8.19abab8abe2.Coremail.kaiyanm@hust.edu.cn/
+Signed-off-by: Lang Xu <xulang@uniontech.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/279EEE1BA1DDB49D+20260303095217.34436-1-xulang@uniontech.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/trampoline.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index 04104397c432e..40f19147f227e 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -751,10 +751,8 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ mutex_lock(&tr->mutex);
+
+ shim_link = cgroup_shim_find(tr, bpf_func);
+- if (shim_link) {
++ if (shim_link && !IS_ERR(bpf_link_inc_not_zero(&shim_link->link.link))) {
+ /* Reusing existing shim attached by the other program. */
+- bpf_link_inc(&shim_link->link.link);
+-
+ mutex_unlock(&tr->mutex);
+ bpf_trampoline_put(tr); /* bpf_trampoline_get above */
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 7f55b9b30d9b43decff87d09d6edf7e2408226f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 11:58:06 +0100
+Subject: can: bcm: fix locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c35636e91e392e1540949bbc67932167cb48bc3a ]
+
+Commit c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+added a locking for some variables that can be modified at runtime when
+updating the sending bcm_op with a new TX_SETUP command in bcm_tx_setup().
+
+Usually the RX_SETUP only handles and filters incoming traffic with one
+exception: When the RX_RTR_FRAME flag is set a predefined CAN frame is
+sent when a specific RTR frame is received. Therefore the rx bcm_op uses
+bcm_can_tx() which uses the bcm_tx_lock that was only initialized in
+bcm_tx_setup(). Add the missing spin_lock_init() when allocating the
+bcm_op in bcm_rx_setup() to handle the RTR case properly.
+
+Fixes: c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+Reported-by: syzbot+5b11eccc403dd1cea9f8@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-can/699466e4.a70a0220.2c38d7.00ff.GAE@google.com/
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20260218-bcm_spin_lock_init-v1-1-592634c8a5b5@hartkopp.net
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 5e690a2377e48..756acfd20c6c2 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1170,6 +1170,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->nframes = msg_head->nframes;
+ op->cfsiz = CFSIZ(msg_head->flags);
+--
+2.51.0
+
--- /dev/null
+From 13a0c562083fee07c2d21fa0e5c2a73a65ca75a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:47:05 +0100
+Subject: can: mcp251x: fix deadlock in error path of mcp251x_open
+
+From: Alban Bedel <alban.bedel@lht.dlh.de>
+
+[ Upstream commit ab3f894de216f4a62adc3b57e9191888cbf26885 ]
+
+The mcp251x_open() function call free_irq() in its error path with the
+mpc_lock mutex held. But if an interrupt already occurred the
+interrupt handler will be waiting for the mpc_lock and free_irq() will
+deadlock waiting for the handler to finish.
+
+This issue is similar to the one fixed in commit 7dd9c26bd6cf ("can:
+mcp251x: fix deadlock if an interrupt occurs during mcp251x_open") but
+for the error path.
+
+To solve this issue move the call to free_irq() after the lock is
+released. Setting `priv->force_quit = 1` beforehand ensure that the IRQ
+handler will exit right away once it acquired the lock.
+
+Signed-off-by: Alban Bedel <alban.bedel@lht.dlh.de>
+Link: https://patch.msgid.link/20260209144706.2261954-1-alban.bedel@lht.dlh.de
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index b797e08499d70..b46262e791301 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1214,6 +1214,7 @@ static int mcp251x_open(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
++ bool release_irq = false;
+ unsigned long flags = 0;
+ int ret;
+
+@@ -1257,12 +1258,24 @@ static int mcp251x_open(struct net_device *net)
+ return 0;
+
+ out_free_irq:
+- free_irq(spi->irq, priv);
++ /* The IRQ handler might be running, and if so it will be waiting
++ * for the lock. But free_irq() must wait for the handler to finish
++ * so calling it here would deadlock.
++ *
++ * Setting priv->force_quit will let the handler exit right away
++ * without any access to the hardware. This make it safe to call
++ * free_irq() after the lock is released.
++ */
++ priv->force_quit = 1;
++ release_irq = true;
++
+ mcp251x_hw_sleep(spi);
+ out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
++ if (release_irq)
++ free_irq(spi->irq, priv);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 6c40cb6b3f564ac6eaff88bc00fae7cb2a9663a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:58:12 -0800
+Subject: dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ
+ handler
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 74badb9c20b1a9c02a95c735c6d3cd6121679c93 ]
+
+Commit 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ
+handler") introduces a range check for if_id to avoid an out-of-bounds
+access. If an out-of-bounds if_id is detected, the interrupt status is
+not cleared. This may result in an interrupt storm.
+
+Clear the interrupt status after detecting an out-of-bounds if_id to avoid
+the problem.
+
+Found by an experimental AI code review agent at Google.
+
+Fixes: 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ handler")
+Cc: Junrui Luo <moonafterrain@outlook.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20260227055812.1777915-1-linux@roeck-us.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 78e21b46a5ba8..e212a014c8d41 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1533,7 +1533,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ if_id = (status & 0xFFFF0000) >> 16;
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id);
+- goto out;
++ goto out_clear;
+ }
+ port_priv = ethsw->ports[if_id];
+
+@@ -1553,6 +1553,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ dpaa2_switch_port_connect_mac(port_priv);
+ }
+
++out_clear:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+--
+2.51.0
+
--- /dev/null
+From cd60906c844b5de9633bdbe9044d9d8779bc869d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 16:24:52 +0800
+Subject: drm/sched: Fix kernel-doc warning for drm_sched_job_done()
+
+From: Yujie Liu <yujie.liu@intel.com>
+
+[ Upstream commit 61ded1083b264ff67ca8c2de822c66b6febaf9a8 ]
+
+There is a kernel-doc warning for the scheduler:
+
+Warning: drivers/gpu/drm/scheduler/sched_main.c:367 function parameter 'result' not described in 'drm_sched_job_done'
+
+Fix the warning by describing the undocumented error code.
+
+Fixes: 539f9ee4b52a ("drm/scheduler: properly forward fence errors")
+Signed-off-by: Yujie Liu <yujie.liu@intel.com>
+[phasta: Flesh out commit message]
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Link: https://patch.msgid.link/20260227082452.1802922-1-yujie.liu@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/scheduler/sched_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index c39f0245e3a97..3f138776d35fb 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -361,6 +361,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+ /**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
++ * @result: 0 on success, -ERRNO on error
+ *
+ * Finish the job's fence and resubmit the work items.
+ */
+--
+2.51.0
+
--- /dev/null
+From 9c9fa3502394c6d1f77c3f313bf9e736cddfb67e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:09:32 +0100
+Subject: drm/solomon: Fix page start when updating rectangle in page
+ addressing mode
+
+From: Francesco Lavra <flavra@baylibre.com>
+
+[ Upstream commit 36d9579fed6c9429aa172f77bd28c58696ce8e2b ]
+
+In page addressing mode, the pixel values of a dirty rectangle must be sent
+to the display controller one page at a time. The range of pages
+corresponding to a given rectangle is being incorrectly calculated as if
+the Y value of the top left coordinate of the rectangle was 0. This can
+result in rectangle updates being displayed on wrong parts of the screen.
+
+Fix the above issue by consolidating the start page calculation in a single
+place at the beginning of the update_rect function, and using the
+calculated value for all addressing modes.
+
+Fixes: b0daaa5cfaa5 ("drm/ssd130x: Support page addressing mode")
+Signed-off-by: Francesco Lavra <flavra@baylibre.com>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Link: https://patch.msgid.link/20260210180932.736502-1-flavra@baylibre.com
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/solomon/ssd130x.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index eec43d1a55951..18d2294c526de 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -736,6 +736,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ unsigned int height = drm_rect_height(rect);
+ unsigned int line_length = DIV_ROUND_UP(width, 8);
+ unsigned int page_height = SSD130X_PAGE_HEIGHT;
++ u8 page_start = ssd130x->page_offset + y / page_height;
+ unsigned int pages = DIV_ROUND_UP(height, page_height);
+ struct drm_device *drm = &ssd130x->drm;
+ u32 array_idx = 0;
+@@ -773,14 +774,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+
+ if (!ssd130x->page_address_mode) {
+- u8 page_start;
+-
+ /* Set address range for horizontal addressing mode */
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ return ret;
+
+- page_start = ssd130x->page_offset + y / page_height;
+ ret = ssd130x_set_page_range(ssd130x, page_start, pages);
+ if (ret < 0)
+ return ret;
+@@ -812,7 +810,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+ if (ssd130x->page_address_mode) {
+ ret = ssd130x_set_page_pos(ssd130x,
+- ssd130x->page_offset + i,
++ page_start + i,
+ ssd130x->col_offset + x);
+ if (ret < 0)
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From b5e9a59e55baec137aa271a45e0c834a4c70ee5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 13:34:42 +0100
+Subject: drm/syncobj: Fix handle <-> fd ioctls with dirty stack
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Julian Orth <ju.orth@gmail.com>
+
+[ Upstream commit 2e3649e237237258a08d75afef96648dd2b379f7 ]
+
+Consider the following application:
+
+ #include <fcntl.h>
+ #include <string.h>
+ #include <drm/drm.h>
+ #include <sys/ioctl.h>
+
+ int main(void) {
+ int fd = open("/dev/dri/renderD128", O_RDWR);
+ struct drm_syncobj_create arg1;
+ ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &arg1);
+ struct drm_syncobj_handle arg2;
+ memset(&arg2, 1, sizeof(arg2)); // simulate dirty stack
+ arg2.handle = arg1.handle;
+ arg2.flags = 0;
+ arg2.fd = 0;
+ arg2.pad = 0;
+ // arg2.point = 0; // userspace is required to set point to 0
+ ioctl(fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &arg2);
+ }
+
+The last ioctl returns EINVAL because args->point is not 0. However,
+userspace developed against older kernel versions is not aware of the
+new point field and might therefore not initialize it.
+
+The correct check would be
+
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ return -EINVAL;
+
+However, there might already be userspace that relies on this not
+returning an error as long as point == 0. Therefore use the more lenient
+check.
+
+Fixes: c2d3a7300695 ("drm/syncobj: Extend EXPORT_SYNC_FILE for timeline syncobjs")
+Signed-off-by: Julian Orth <ju.orth@gmail.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://lore.kernel.org/r/20260301-point-v1-1-21fc5fd98614@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_syncobj.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index e1b0fa4000cdd..7eb2cdbc574a0 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -900,7 +900,7 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ return drm_syncobj_export_sync_file(file_private, args->handle,
+ point, &args->fd);
+
+- if (args->point)
++ if (point)
+ return -EINVAL;
+
+ return drm_syncobj_handle_to_fd(file_private, args->handle,
+@@ -934,7 +934,7 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ args->handle,
+ point);
+
+- if (args->point)
++ if (point)
+ return -EINVAL;
+
+ return drm_syncobj_fd_to_handle(file_private, args->fd,
+--
+2.51.0
+
--- /dev/null
+From 1f079cf4ef72745a4ce07d7e3cdb792a98299ff4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 01:34:49 +0000
+Subject: drm/xe/configfs: Free ctx_restore_mid_bb in release
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit e377182f0266f46f02d01838e6bde67b9dac0d66 ]
+
+ctx_restore_mid_bb memory is allocated in wa_bb_store(), but
+xe_config_device_release() only frees ctx_restore_post_bb.
+
+Free ctx_restore_mid_bb[0].cs as well to avoid leaking the allocation
+when the configfs device is removed.
+
+Fixes: b30d5de3d40c ("drm/xe/configfs: Add mid context restore bb")
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Nitin Gote <nitin.r.gote@intel.com>
+Link: https://patch.msgid.link/20260225013448.3547687-2-shuicheng.lin@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit a235e7d0098337c3f2d1e8f3610c719a589e115f)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_configfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
+index 6688b2954d20b..08f379cb5321f 100644
+--- a/drivers/gpu/drm/xe/xe_configfs.c
++++ b/drivers/gpu/drm/xe/xe_configfs.c
+@@ -688,6 +688,7 @@ static void xe_config_device_release(struct config_item *item)
+
+ mutex_destroy(&dev->lock);
+
++ kfree(dev->config.ctx_restore_mid_bb[0].cs);
+ kfree(dev->config.ctx_restore_post_bb[0].cs);
+ kfree(dev);
+ }
+--
+2.51.0
+
--- /dev/null
+From 21f4f56056467eaf42f6315bb317911c1d5ad69d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 16:45:46 -0800
+Subject: drm/xe: Do not preempt fence signaling CS instructions
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit cdc8a1e11f4d5b480ec750e28010c357185b95a6 ]
+
+If a batch buffer is complete, it makes little sense to preempt the
+fence signaling instructions in the ring, as the largest portion of the
+work (the batch buffer) is already done and fence signaling consists of
+only a few instructions. If these instructions are preempted, the GuC
+would need to perform a context switch just to signal the fence, which
+is costly and delays fence signaling. Avoid this scenario by disabling
+preemption immediately after the BB start instruction and re-enabling it
+after executing the fence signaling instructions.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: Carlos Santa <carlos.santa@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Link: https://patch.msgid.link/20260115004546.58060-1-matthew.brost@intel.com
+(cherry picked from commit 2bcbf2dcde0c839a73af664a3c77d4e77d58a3eb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_ring_ops.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
+index d71837773d6c6..e0082b55e2162 100644
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -265,6 +265,9 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+@@ -328,6 +331,9 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+@@ -377,6 +383,9 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ i = emit_render_cache_flush(job, dw, i);
+
+ if (job->user_fence.used)
+--
+2.51.0
+
--- /dev/null
+From 66d5bffcadeaab91c9f48454701393d672a71c52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 17:53:08 -0500
+Subject: drm/xe/gsc: Fix GSC proxy cleanup on early initialization failure
+
+From: Zhanjun Dong <zhanjun.dong@intel.com>
+
+[ Upstream commit b3368ecca9538b88ddf982ea99064860fd5add97 ]
+
+xe_gsc_proxy_remove undoes what is done in both xe_gsc_proxy_init and
+xe_gsc_proxy_start; however, if we fail between those 2 calls, it is
+possible that the HW forcewake access hasn't been initialized yet and so
+we hit errors when the cleanup code tries to write GSC register. To
+avoid that, split the cleanup in 2 functions so that the HW cleanup is
+only called if the HW setup was completed successfully.
+
+Since the HW cleanup (interrupt disabling) is now removed from
+xe_gsc_proxy_remove, the cleanup on error paths in xe_gsc_proxy_start
+must be updated to disable interrupts before returning.
+
+Fixes: ff6cd29b690b ("drm/xe: Cleanup unwind of gt initialization")
+Signed-off-by: Zhanjun Dong <zhanjun.dong@intel.com>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Link: https://patch.msgid.link/20260220225308.101469-1-zhanjun.dong@intel.com
+(cherry picked from commit 2b37c401b265c07b46408b5cb36a4b757c9b5060)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gsc_proxy.c | 43 +++++++++++++++++++++++++------
+ drivers/gpu/drm/xe/xe_gsc_types.h | 2 ++
+ 2 files changed, 37 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
+index 464282a89eef3..a6f6f0ea56526 100644
+--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
++++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
+@@ -435,16 +435,12 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
+ return 0;
+ }
+
+-static void xe_gsc_proxy_remove(void *arg)
++static void xe_gsc_proxy_stop(struct xe_gsc *gsc)
+ {
+- struct xe_gsc *gsc = arg;
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref = 0;
+
+- if (!gsc->proxy.component_added)
+- return;
+-
+ /* disable HECI2 IRQs */
+ xe_pm_runtime_get(xe);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+@@ -458,6 +454,30 @@ static void xe_gsc_proxy_remove(void *arg)
+ xe_pm_runtime_put(xe);
+
+ xe_gsc_wait_for_worker_completion(gsc);
++ gsc->proxy.started = false;
++}
++
++static void xe_gsc_proxy_remove(void *arg)
++{
++ struct xe_gsc *gsc = arg;
++ struct xe_gt *gt = gsc_to_gt(gsc);
++ struct xe_device *xe = gt_to_xe(gt);
++
++ if (!gsc->proxy.component_added)
++ return;
++
++ /*
++ * GSC proxy start is an async process that can be ongoing during
++ * Xe module load/unload. Using devm managed action to register
++ * xe_gsc_proxy_stop could cause issues if Xe module unload has
++ * already started when the action is registered, potentially leading
++ * to the cleanup being called at the wrong time. Therefore, instead
++ * of registering a separate devm action to undo what is done in
++ * proxy start, we call it from here, but only if the start has
++ * completed successfully (tracked with the 'started' flag).
++ */
++ if (gsc->proxy.started)
++ xe_gsc_proxy_stop(gsc);
+
+ component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
+ gsc->proxy.component_added = false;
+@@ -513,6 +533,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
+ */
+ int xe_gsc_proxy_start(struct xe_gsc *gsc)
+ {
++ struct xe_gt *gt = gsc_to_gt(gsc);
+ int err;
+
+ /* enable the proxy interrupt in the GSC shim layer */
+@@ -524,12 +545,18 @@ int xe_gsc_proxy_start(struct xe_gsc *gsc)
+ */
+ err = xe_gsc_proxy_request_handler(gsc);
+ if (err)
+- return err;
++ goto err_irq_disable;
+
+ if (!xe_gsc_proxy_init_done(gsc)) {
+- xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
+- return -EIO;
++ xe_gt_err(gt, "GSC FW reports proxy init not completed\n");
++ err = -EIO;
++ goto err_irq_disable;
+ }
+
++ gsc->proxy.started = true;
+ return 0;
++
++err_irq_disable:
++ gsc_proxy_irq_toggle(gsc, false);
++ return err;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
+index 97c056656df05..5aaa2a75861fd 100644
+--- a/drivers/gpu/drm/xe/xe_gsc_types.h
++++ b/drivers/gpu/drm/xe/xe_gsc_types.h
+@@ -58,6 +58,8 @@ struct xe_gsc {
+ struct mutex mutex;
+ /** @proxy.component_added: whether the component has been added */
+ bool component_added;
++ /** @proxy.started: whether the proxy has been started */
++ bool started;
+ /** @proxy.bo: object to store message to and from the GSC */
+ struct xe_bo *bo;
+ /** @proxy.to_gsc: map of the memory used to send messages to the GSC */
+--
+2.51.0
+
--- /dev/null
+From 653048d18cd5dd90cc7a4d3e00f61143d95a9b5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 17:28:11 +0000
+Subject: drm/xe/reg_sr: Fix leak on xa_store failure
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit 3091723785def05ebfe6a50866f87a044ae314ba ]
+
+Free the newly allocated entry when xa_store() fails to avoid a memory
+leak on the error path.
+
+v2: use goto fail_free. (Bala)
+
+Fixes: e5283bd4dfec ("drm/xe/reg_sr: Remove register pool")
+Cc: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patch.msgid.link/20260204172810.1486719-2-shuicheng.lin@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 6bc6fec71ac45f52db609af4e62bdb96b9f5fadb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_reg_sr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
+index fc8447a838c4f..6b9edc7ca4115 100644
+--- a/drivers/gpu/drm/xe/xe_reg_sr.c
++++ b/drivers/gpu/drm/xe/xe_reg_sr.c
+@@ -101,10 +101,12 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
+ *pentry = *e;
+ ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL));
+ if (ret)
+- goto fail;
++ goto fail_free;
+
+ return 0;
+
++fail_free:
++ kfree(pentry);
+ fail:
+ xe_gt_err(gt,
+ "discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s, mcr: %s): ret=%d\n",
+--
+2.51.0
+
--- /dev/null
+From c94f73f52e1556fc21afee2e332ae75545c50489 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 16:14:20 +0200
+Subject: e1000e: clear DPG_EN after reset to avoid autonomous power-gating
+
+From: Vitaly Lifshits <vitaly.lifshits@intel.com>
+
+[ Upstream commit 0942fc6d324eb9c6b16187b2aa994c0823557f06 ]
+
+Panther Lake systems introduced an autonomous power gating feature for
+the integrated Gigabit Ethernet in shutdown state (S5) state. As part of
+it, the reset value of DPG_EN bit was changed to 1. Clear this bit after
+performing hardware reset to avoid errors such as Tx/Rx hangs, or packet
+loss/corruption.
+
+Fixes: 0c9183ce61bc ("e1000e: Add support for the next LOM generation")
+Signed-off-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Avigail Dahan <avigailx.dahan@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/e1000e/defines.h | 1 +
+ drivers/net/ethernet/intel/e1000e/ich8lan.c | 9 +++++++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
+index ba331899d1861..d4a1041e456dc 100644
+--- a/drivers/net/ethernet/intel/e1000e/defines.h
++++ b/drivers/net/ethernet/intel/e1000e/defines.h
+@@ -33,6 +33,7 @@
+
+ /* Extended Device Control */
+ #define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
++#define E1000_CTRL_EXT_DPG_EN 0x00000008 /* Dynamic Power Gating Enable */
+ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+ #define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index df4e7d781cb1c..f9328caefe44b 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -4925,6 +4925,15 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+ reg |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, reg);
+
++ /* The hardware reset value of the DPG_EN bit is 1.
++ * Clear DPG_EN to prevent unexpected autonomous power gating.
++ */
++ if (hw->mac.type >= e1000_pch_ptp) {
++ reg = er32(CTRL_EXT);
++ reg &= ~E1000_CTRL_EXT_DPG_EN;
++ ew32(CTRL_EXT, reg);
++ }
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 67a7595db874fa3d625195d7f71f0fe00ebed94c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 13:51:51 +0200
+Subject: HID: multitouch: new class MT_CLS_EGALAX_P80H84
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit a2e70a89fa58133521b2deae4427d35776bda935 ]
+
+Fixes: f9e82295eec1 ("HID: multitouch: add eGalaxTouch P80H84 support")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-multitouch.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 1f8accb7ff435..af19e089b0122 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -233,6 +233,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_SMART_TECH 0x0113
+ #define MT_CLS_APPLE_TOUCHBAR 0x0114
+ #define MT_CLS_YOGABOOK9I 0x0115
++#define MT_CLS_EGALAX_P80H84 0x0116
+ #define MT_CLS_SIS 0x0457
+
+ #define MT_DEFAULT_MAXCONTACT 10
+@@ -447,6 +448,11 @@ static const struct mt_class mt_classes[] = {
+ MT_QUIRK_YOGABOOK9I,
+ .export_all_inputs = true
+ },
++ { .name = MT_CLS_EGALAX_P80H84,
++ .quirks = MT_QUIRK_ALWAYS_VALID |
++ MT_QUIRK_IGNORE_DUPLICATES |
++ MT_QUIRK_CONTACT_CNT_ACCURATE,
++ },
+ { }
+ };
+
+@@ -2223,8 +2229,9 @@ static const struct hid_device_id mt_devices[] = {
+ { .driver_data = MT_CLS_EGALAX_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C000) },
+- { .driver_data = MT_CLS_EGALAX,
+- MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
++ { .driver_data = MT_CLS_EGALAX_P80H84,
++ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
+
+ /* Elan devices */
+--
+2.51.0
+
--- /dev/null
+From 395a710342140a8a295ed41d0de4acefa05f5eae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 15:13:20 +0530
+Subject: hwmon: (aht10) Add support for dht20
+
+From: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+
+[ Upstream commit 3eaf1b631506e8de2cb37c278d5bc042521e82c1 ]
+
+Add support for dht20 temperature and humidity sensor from Aosong.
+Modify aht10 driver to handle different init command for dht20 sensor by
+adding init_cmd entry in the driver data. dht20 sensor is compatible with
+aht10 hwmon driver with this change.
+
+Tested on TI am62x SK board with dht20 sensor connected at i2c-2 port.
+
+Signed-off-by: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+Link: https://lore.kernel.org/r/2025112-94320-906858@bhairav-test.ee.iitb.ac.in
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Stable-dep-of: b7497b5a99f5 ("hwmon: (aht10) Fix initialization commands for AHT20")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/hwmon/aht10.rst | 10 +++++++++-
+ drivers/hwmon/Kconfig | 6 +++---
+ drivers/hwmon/aht10.c | 19 ++++++++++++++++---
+ 3 files changed, 28 insertions(+), 7 deletions(-)
+
+diff --git a/Documentation/hwmon/aht10.rst b/Documentation/hwmon/aht10.rst
+index 213644b4ecba6..7903b6434326d 100644
+--- a/Documentation/hwmon/aht10.rst
++++ b/Documentation/hwmon/aht10.rst
+@@ -20,6 +20,14 @@ Supported chips:
+
+ English: http://www.aosong.com/userfiles/files/media/Data%20Sheet%20AHT20.pdf
+
++ * Aosong DHT20
++
++ Prefix: 'dht20'
++
++ Addresses scanned: None
++
++ Datasheet: https://www.digikey.co.nz/en/htmldatasheets/production/9184855/0/0/1/101020932
++
+ Author: Johannes Cornelis Draaijer <jcdra1@gmail.com>
+
+
+@@ -33,7 +41,7 @@ The address of this i2c device may only be 0x38
+ Special Features
+ ----------------
+
+-AHT20 has additional CRC8 support which is sent as the last byte of the sensor
++AHT20, DHT20 has additional CRC8 support which is sent as the last byte of the sensor
+ values.
+
+ Usage Notes
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index 2760feb9f83b5..2a71b6e834b08 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -245,12 +245,12 @@ config SENSORS_ADT7475
+ will be called adt7475.
+
+ config SENSORS_AHT10
+- tristate "Aosong AHT10, AHT20"
++ tristate "Aosong AHT10, AHT20, DHT20"
+ depends on I2C
+ select CRC8
+ help
+- If you say yes here, you get support for the Aosong AHT10 and AHT20
+- temperature and humidity sensors
++ If you say yes here, you get support for the Aosong AHT10, AHT20 and
++ DHT20 temperature and humidity sensors
+
+ This driver can also be built as a module. If so, the module
+ will be called aht10.
+diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
+index d1c55e2eb4799..a153282eef6a2 100644
+--- a/drivers/hwmon/aht10.c
++++ b/drivers/hwmon/aht10.c
+@@ -37,6 +37,8 @@
+ #define AHT10_CMD_MEAS 0b10101100
+ #define AHT10_CMD_RST 0b10111010
+
++#define DHT20_CMD_INIT 0x71
++
+ /*
+ * Flags in the answer byte/command
+ */
+@@ -48,11 +50,12 @@
+
+ #define AHT10_MAX_POLL_INTERVAL_LEN 30
+
+-enum aht10_variant { aht10, aht20 };
++enum aht10_variant { aht10, aht20, dht20};
+
+ static const struct i2c_device_id aht10_id[] = {
+ { "aht10", aht10 },
+ { "aht20", aht20 },
++ { "dht20", dht20 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(i2c, aht10_id);
+@@ -77,6 +80,7 @@ MODULE_DEVICE_TABLE(i2c, aht10_id);
+ * AHT10/AHT20
+ * @crc8: crc8 support flag
+ * @meas_size: measurements data size
++ * @init_cmd: Initialization command
+ */
+
+ struct aht10_data {
+@@ -92,6 +96,7 @@ struct aht10_data {
+ int humidity;
+ bool crc8;
+ unsigned int meas_size;
++ u8 init_cmd;
+ };
+
+ /*
+@@ -101,13 +106,13 @@ struct aht10_data {
+ */
+ static int aht10_init(struct aht10_data *data)
+ {
+- const u8 cmd_init[] = {AHT10_CMD_INIT, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
++ const u8 cmd_init[] = {data->init_cmd, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
+ 0x00};
+ int res;
+ u8 status;
+ struct i2c_client *client = data->client;
+
+- res = i2c_master_send(client, cmd_init, 3);
++ res = i2c_master_send(client, cmd_init, sizeof(cmd_init));
+ if (res < 0)
+ return res;
+
+@@ -352,9 +357,17 @@ static int aht10_probe(struct i2c_client *client)
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
++ data->init_cmd = AHT10_CMD_INIT;
++ break;
++ case dht20:
++ data->meas_size = AHT20_MEAS_SIZE;
++ data->crc8 = true;
++ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
++ data->init_cmd = DHT20_CMD_INIT;
+ break;
+ default:
+ data->meas_size = AHT10_MEAS_SIZE;
++ data->init_cmd = AHT10_CMD_INIT;
+ break;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From dc056c1a423363200cc7305d4e504f8fe16f0ab1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 01:03:31 +0800
+Subject: hwmon: (aht10) Fix initialization commands for AHT20
+
+From: Hao Yu <haoyufine@gmail.com>
+
+[ Upstream commit b7497b5a99f54ab8dcda5b14a308385b2fb03d8d ]
+
+According to the AHT20 datasheet (updated to V1.0 after the 2023.09
+version), the initialization command for AHT20 is 0b10111110 (0xBE).
+The previous sequence (0xE1) used in earlier versions is no longer
+compatible with newer AHT20 sensors. Update the initialization
+command to ensure the sensor is properly initialized.
+
+While at it, use binary notation for DHT20_CMD_INIT to match the notation
+used in the datasheet.
+
+Fixes: d2abcb5cc885 ("hwmon: (aht10) Add support for compatible aht20")
+Signed-off-by: Hao Yu <haoyufine@gmail.com>
+Link: https://lore.kernel.org/r/20260222170332.1616-3-haoyufine@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/aht10.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
+index a153282eef6a2..7ba00bafb57d6 100644
+--- a/drivers/hwmon/aht10.c
++++ b/drivers/hwmon/aht10.c
+@@ -37,7 +37,9 @@
+ #define AHT10_CMD_MEAS 0b10101100
+ #define AHT10_CMD_RST 0b10111010
+
+-#define DHT20_CMD_INIT 0x71
++#define AHT20_CMD_INIT 0b10111110
++
++#define DHT20_CMD_INIT 0b01110001
+
+ /*
+ * Flags in the answer byte/command
+@@ -357,7 +359,7 @@ static int aht10_probe(struct i2c_client *client)
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
+- data->init_cmd = AHT10_CMD_INIT;
++ data->init_cmd = AHT20_CMD_INIT;
+ break;
+ case dht20:
+ data->meas_size = AHT20_MEAS_SIZE;
+--
+2.51.0
+
--- /dev/null
+From 3f546066d9d7a535d1029fc7142a03f975328600 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:14 -0800
+Subject: hwmon: (it87) Check the it87_lock() return value
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 07ed4f05bbfd2bc014974dcc4297fd3aa1cb88c0 ]
+
+Return early in it87_resume() if it87_lock() fails instead of ignoring the
+return value of that function. This patch suppresses a Clang thread-safety
+warning.
+
+Cc: Frank Crawford <frank@crawford.emu.id.au>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Jean Delvare <jdelvare@suse.com>
+Cc: linux-hwmon@vger.kernel.org
+Fixes: 376e1a937b30 ("hwmon: (it87) Add calls to smbus_enable/smbus_disable as required")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20260223220102.2158611-15-bart.vanassche@linux.dev
+[groeck: Declare 'ret' at the beginning of it87_resume()]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/it87.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index e233aafa8856c..5cfb98a0512f0 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -3590,10 +3590,13 @@ static int it87_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct it87_data *data = dev_get_drvdata(dev);
++ int err;
+
+ it87_resume_sio(pdev);
+
+- it87_lock(data);
++ err = it87_lock(data);
++ if (err)
++ return err;
+
+ it87_check_pwm(dev);
+ it87_check_limit_regs(data);
+--
+2.51.0
+
--- /dev/null
+From 9cbb82b5aa3c1e6cc2ff34c544a551196670c1f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 22:20:39 +0100
+Subject: hwmon: (max6639) fix inverted polarity
+
+From: Olivier Sobrie <olivier@sobrie.be>
+
+[ Upstream commit 170a4b21f49b3dcff3115b4c90758f0a0d77375a ]
+
+According to MAX6639 documentation:
+
+ D1: PWM Output Polarity. PWM output is low at
+ 100% duty cycle when this bit is set to zero. PWM
+ output is high at 100% duty cycle when this bit is set
+ to 1.
+
+Up to commit 0f33272b60ed ("hwmon: (max6639) : Update hwmon init using
+info structure"), the polarity was set to high (0x2) when no platform
+data was set. After the patch, the polarity register wasn't set anymore
+if no platform data was specified. Nowadays, since commit 7506ebcd662b
+("hwmon: (max6639) : Configure based on DT property"), it is always set
+to low which doesn't match with the comment above and change the
+behavior compared to versions prior 0f33272b60ed.
+
+Fixes: 0f33272b60ed ("hwmon: (max6639) : Update hwmon init using info structure")
+Signed-off-by: Olivier Sobrie <olivier@sobrie.be>
+Link: https://lore.kernel.org/r/20260304212039.570274-1-olivier@sobrie.be
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max6639.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
+index a06346496e1d9..1fc12e1463b58 100644
+--- a/drivers/hwmon/max6639.c
++++ b/drivers/hwmon/max6639.c
+@@ -623,7 +623,7 @@ static int max6639_init_client(struct i2c_client *client,
+ return err;
+
+ /* Fans PWM polarity high by default */
+- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
++ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (err)
+ return err;
+
+--
+2.51.0
+
--- /dev/null
+From 68aba67aaf94fda522cbeefb3539f211ea9e0e87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 18:41:15 -0700
+Subject: i2c: i801: Revert "i2c: i801: replace acpi_lock with I2C bus lock"
+
+From: Charles Haithcock <chaithco@redhat.com>
+
+[ Upstream commit cfc69c2e6c699c96949f7b0455195b0bfb7dc715 ]
+
+This reverts commit f707d6b9e7c18f669adfdb443906d46cfbaaa0c1.
+
+Under rare circumstances, multiple udev threads can collect i801 device
+info on boot and walk i801_acpi_io_handler somewhat concurrently. The
+first will note the area is reserved by acpi to prevent further touches.
+This ultimately causes the area to be deregistered. The second will
+enter i801_acpi_io_handler after the area is unregistered but before a
+check can be made that the area is unregistered. i2c_lock_bus relies on
+the now unregistered area containing lock_ops to lock the bus. The end
+result is a kernel panic on boot with the following backtrace;
+
+[ 14.971872] ioatdma 0000:09:00.2: enabling device (0100 -> 0102)
+[ 14.971873] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[ 14.971880] #PF: supervisor read access in kernel mode
+[ 14.971884] #PF: error_code(0x0000) - not-present page
+[ 14.971887] PGD 0 P4D 0
+[ 14.971894] Oops: 0000 [#1] PREEMPT SMP PTI
+[ 14.971900] CPU: 5 PID: 956 Comm: systemd-udevd Not tainted 5.14.0-611.5.1.el9_7.x86_64 #1
+[ 14.971905] Hardware name: XXXXXXXXXXXXXXXXXXXXXXX BIOS 1.20.10.SV91 01/30/2023
+[ 14.971908] RIP: 0010:i801_acpi_io_handler+0x2d/0xb0 [i2c_i801]
+[ 14.971929] Code: 00 00 49 8b 40 20 41 57 41 56 4d 8b b8 30 04 00 00 49 89 ce 41 55 41 89 d5 41 54 49 89 f4 be 02 00 00 00 55 4c 89 c5 53 89 fb <48> 8b 00 4c 89 c7 e8 18 61 54 e9 80 bd 80 04 00 00 00 75 09 4c 3b
+[ 14.971933] RSP: 0018:ffffbaa841483838 EFLAGS: 00010282
+[ 14.971938] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff9685e01ba568
+[ 14.971941] RDX: 0000000000000008 RSI: 0000000000000002 RDI: 0000000000000000
+[ 14.971944] RBP: ffff9685ca22f028 R08: ffff9685ca22f028 R09: ffff9685ca22f028
+[ 14.971948] R10: 000000000000000b R11: 0000000000000580 R12: 0000000000000580
+[ 14.971951] R13: 0000000000000008 R14: ffff9685e01ba568 R15: ffff9685c222f000
+[ 14.971954] FS: 00007f8287c0ab40(0000) GS:ffff96a47f940000(0000) knlGS:0000000000000000
+[ 14.971959] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 14.971963] CR2: 0000000000000000 CR3: 0000000168090001 CR4: 00000000003706f0
+[ 14.971966] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 14.971968] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 14.971972] Call Trace:
+[ 14.971977] <TASK>
+[ 14.971981] ? show_trace_log_lvl+0x1c4/0x2df
+[ 14.971994] ? show_trace_log_lvl+0x1c4/0x2df
+[ 14.972003] ? acpi_ev_address_space_dispatch+0x16e/0x3c0
+[ 14.972014] ? __die_body.cold+0x8/0xd
+[ 14.972021] ? page_fault_oops+0x132/0x170
+[ 14.972028] ? exc_page_fault+0x61/0x150
+[ 14.972036] ? asm_exc_page_fault+0x22/0x30
+[ 14.972045] ? i801_acpi_io_handler+0x2d/0xb0 [i2c_i801]
+[ 14.972061] acpi_ev_address_space_dispatch+0x16e/0x3c0
+[ 14.972069] ? __pfx_i801_acpi_io_handler+0x10/0x10 [i2c_i801]
+[ 14.972085] acpi_ex_access_region+0x5b/0xd0
+[ 14.972093] acpi_ex_field_datum_io+0x73/0x2e0
+[ 14.972100] acpi_ex_read_data_from_field+0x8e/0x230
+[ 14.972106] acpi_ex_resolve_node_to_value+0x23d/0x310
+[ 14.972114] acpi_ds_evaluate_name_path+0xad/0x110
+[ 14.972121] acpi_ds_exec_end_op+0x321/0x510
+[ 14.972127] acpi_ps_parse_loop+0xf7/0x680
+[ 14.972136] acpi_ps_parse_aml+0x17a/0x3d0
+[ 14.972143] acpi_ps_execute_method+0x137/0x270
+[ 14.972150] acpi_ns_evaluate+0x1f4/0x2e0
+[ 14.972158] acpi_evaluate_object+0x134/0x2f0
+[ 14.972164] acpi_evaluate_integer+0x50/0xe0
+[ 14.972173] ? vsnprintf+0x24b/0x570
+[ 14.972181] acpi_ac_get_state.part.0+0x23/0x70
+[ 14.972189] get_ac_property+0x4e/0x60
+[ 14.972195] power_supply_show_property+0x90/0x1f0
+[ 14.972205] add_prop_uevent+0x29/0x90
+[ 14.972213] power_supply_uevent+0x109/0x1d0
+[ 14.972222] dev_uevent+0x10e/0x2f0
+[ 14.972228] uevent_show+0x8e/0x100
+[ 14.972236] dev_attr_show+0x19/0x40
+[ 14.972246] sysfs_kf_seq_show+0x9b/0x100
+[ 14.972253] seq_read_iter+0x120/0x4b0
+[ 14.972262] ? selinux_file_permission+0x106/0x150
+[ 14.972273] vfs_read+0x24f/0x3a0
+[ 14.972284] ksys_read+0x5f/0xe0
+[ 14.972291] do_syscall_64+0x5f/0xe0
+...
+
+The kernel panic is mitigated by setting limiting the count of udev
+children to 1. Revert to using the acpi_lock to continue protecting
+marking the area as owned by firmware without relying on a lock in
+a potentially unmapped region of memory.
+
+Fixes: f707d6b9e7c1 ("i2c: i801: replace acpi_lock with I2C bus lock")
+Signed-off-by: Charles Haithcock <chaithco@redhat.com>
+[wsa: added Fixes-tag and updated comment stating the importance of the lock]
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-i801.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 57fbec1259bea..506d69b156f7c 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -306,9 +306,10 @@ struct i801_priv {
+
+ /*
+ * If set to true the host controller registers are reserved for
+- * ACPI AML use.
++ * ACPI AML use. Needs extra protection by acpi_lock.
+ */
+ bool acpi_reserved;
++ struct mutex acpi_lock;
+ };
+
+ #define FEATURE_SMBUS_PEC BIT(0)
+@@ -890,8 +891,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ int hwpec, ret;
+ struct i801_priv *priv = i2c_get_adapdata(adap);
+
+- if (priv->acpi_reserved)
++ mutex_lock(&priv->acpi_lock);
++ if (priv->acpi_reserved) {
++ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
++ }
+
+ pm_runtime_get_sync(&priv->pci_dev->dev);
+
+@@ -931,6 +935,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ iowrite8(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv));
+
+ pm_runtime_put_autosuspend(&priv->pci_dev->dev);
++ mutex_unlock(&priv->acpi_lock);
+ return ret;
+ }
+
+@@ -1459,7 +1464,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+- i2c_lock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
++ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
+ priv->acpi_reserved = true;
+@@ -1479,7 +1484,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+- i2c_unlock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
++ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+ }
+@@ -1539,6 +1544,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ priv->adapter.dev.parent = &dev->dev;
+ acpi_use_parent_companion(&priv->adapter.dev);
+ priv->adapter.retries = 3;
++ mutex_init(&priv->acpi_lock);
+
+ priv->pci_dev = dev;
+ priv->features = id->driver_data;
+--
+2.51.0
+
--- /dev/null
+From fae84cc0dfb07a1f462f8714c07a7dff2da7aa0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 11:50:23 +0100
+Subject: i40e: Fix preempt count leak in napi poll tracepoint
+
+From: Thomas Gleixner <tglx@kernel.org>
+
+[ Upstream commit 4b3d54a85bd37ebf2d9836f0d0de775c0ff21af9 ]
+
+Using get_cpu() in the tracepoint assignment causes an obvious preempt
+count leak because nothing invokes put_cpu() to undo it:
+
+ softirq: huh, entered softirq 3 NET_RX with preempt_count 00000100, exited with 00000101?
+
+This clearly has seen a lot of testing in the last 3+ years...
+
+Use smp_processor_id() instead.
+
+Fixes: 6d4d584a7ea8 ("i40e: Add i40e_napi_poll tracepoint")
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Cc: Tony Nguyen <anthony.l.nguyen@intel.com>
+Cc: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Cc: intel-wired-lan@lists.osuosl.org
+Cc: netdev@vger.kernel.org
+Reviewed-by: Joe Damato <joe@dama.to>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
+index 759f3d1c4c8f0..dde0ccd789ed1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
+@@ -88,7 +88,7 @@ TRACE_EVENT(i40e_napi_poll,
+ __entry->rx_clean_complete = rx_clean_complete;
+ __entry->tx_clean_complete = tx_clean_complete;
+ __entry->irq_num = q->irq_num;
+- __entry->curr_cpu = get_cpu();
++ __entry->curr_cpu = smp_processor_id();
+ __assign_str(qname);
+ __assign_str(dev_name);
+ __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
+--
+2.51.0
+
--- /dev/null
+From f3a5af640678f5bde7955279dbb192bb6a05a265 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:46 +0100
+Subject: i40e: fix registering XDP RxQ info
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8f497dc8a61429cc004720aa8e713743355d80cf ]
+
+Current way of handling XDP RxQ info in i40e has a problem, where frag_size
+is not updated when xsk_buff_pool is detached or when MTU is changed, this
+leads to growing tail always failing for multi-buffer packets.
+
+Couple XDP RxQ info registering with buffer allocations and unregistering
+with cleaning the ring.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-6-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 34 ++++++++++++---------
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 +--
+ 2 files changed, 22 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 02de186dcc8f5..bc00bd4f439be 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3583,18 +3583,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+- err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->queue_index,
+- ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
+- if (err)
+- return err;
+- }
+-
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+@@ -3606,17 +3596,23 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ }
+
+ skip:
+@@ -3654,7 +3650,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* set the context in the HMC */
+@@ -3663,7 +3660,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* configure Rx buffer alignment */
+@@ -3671,7 +3669,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_info(&vsi->back->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto unreg_xdp;
+ }
+ clear_ring_build_skb_enabled(ring);
+ } else {
+@@ -3701,6 +3700,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ return 0;
++unreg_xdp:
++ if (ring->vsi->type == I40E_VSI_MAIN)
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
++
++ return err;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index cc0b9efc2637a..816179c7e2712 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1470,6 +1470,9 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ if (!rx_ring->rx_bi)
+ return;
+
++ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
++
+ if (rx_ring->xsk_pool) {
+ i40e_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+@@ -1527,8 +1530,6 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+ {
+ i40e_clean_rx_ring(rx_ring);
+- if (rx_ring->vsi->type == I40E_VSI_MAIN)
+- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+--
+2.51.0
+
--- /dev/null
+From 59a3cc74fff7359e22bd35fbb4d170d7ef08e8e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:47 +0100
+Subject: i40e: use xdp.frame_sz as XDP RxQ info frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit c69d22c6c46a1d792ba8af3d8d6356fdc0e6f538 ]
+
+The only user of frag_size field in XDP RxQ info is
+bpf_xdp_frags_increase_tail(). It clearly expects whole buffer size instead
+of DMA write size. Different assumptions in i40e driver configuration lead
+to negative tailroom.
+
+Set frag_size to the same value as frame_sz in shared pages mode, use new
+helper to set frag_size when AF_XDP ZC is active.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-7-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index bc00bd4f439be..598739220dfb9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3569,6 +3569,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
++ u32 xdp_frame_sz;
+ int err = 0;
+ bool ok;
+
+@@ -3578,6 +3579,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
++ xdp_frame_sz = i40e_rx_pg_size(ring) / 2;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+@@ -3585,11 +3587,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
++ xdp_frame_sz = xsk_pool_get_rx_frag_step(ring->xsk_pool);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3605,7 +3608,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3616,7 +3619,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ skip:
+- xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
++ xdp_init_buff(&ring->xdp, xdp_frame_sz, &ring->xdp_rxq);
+
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+--
+2.51.0
+
--- /dev/null
+From e996f4e2da35b655dd49375e874e9a7b613bdc5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 15:57:14 +0000
+Subject: iavf: fix netdev->max_mtu to respect actual hardware limit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b84852170153671bb0fa6737a6e48370addd8e1a ]
+
+iavf sets LIBIE_MAX_MTU as netdev->max_mtu, ignoring vf_res->max_mtu
+from PF [1]. This allows setting an MTU beyond the actual hardware
+limit, causing TX queue timeouts [2].
+
+Set correct netdev->max_mtu using vf_res->max_mtu from the PF.
+
+Note that currently PF drivers such as ice/i40e set the frame size in
+vf_res->max_mtu, not MTU. Convert vf_res->max_mtu to MTU before setting
+netdev->max_mtu.
+
+[1]
+ # ip -j -d link show $DEV | jq '.[0].max_mtu'
+ 16356
+
+[2]
+ iavf 0000:00:05.0 enp0s5: NETDEV WATCHDOG: CPU: 1: transmit queue 0 timed out 5692 ms
+ iavf 0000:00:05.0 enp0s5: NIC Link is Up Speed is 10 Gbps Full Duplex
+ iavf 0000:00:05.0 enp0s5: NETDEV WATCHDOG: CPU: 6: transmit queue 3 timed out 5312 ms
+ iavf 0000:00:05.0 enp0s5: NIC Link is Up Speed is 10 Gbps Full Duplex
+ ...
+
+Fixes: 5fa4caff59f2 ("iavf: switch to Page Pool")
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf_main.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 4b0fc8f354bc9..53a0366fbf998 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -2797,7 +2797,22 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+- netdev->max_mtu = LIBIE_MAX_MTU;
++
++ /* PF/VF API: vf_res->max_mtu is max frame size (not MTU).
++ * Convert to MTU.
++ */
++ if (!adapter->vf_res->max_mtu) {
++ netdev->max_mtu = LIBIE_MAX_MTU;
++ } else if (adapter->vf_res->max_mtu < LIBETH_RX_LL_LEN + ETH_MIN_MTU ||
++ adapter->vf_res->max_mtu >
++ LIBETH_RX_LL_LEN + LIBIE_MAX_MTU) {
++ netdev_warn_once(adapter->netdev,
++ "invalid max frame size %d from PF, using default MTU %d",
++ adapter->vf_res->max_mtu, LIBIE_MAX_MTU);
++ netdev->max_mtu = LIBIE_MAX_MTU;
++ } else {
++ netdev->max_mtu = adapter->vf_res->max_mtu - LIBETH_RX_LL_LEN;
++ }
+
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+--
+2.51.0
+
--- /dev/null
+From 18a25919d0ff17b97141db5bcbe493b0469f0204 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Dec 2025 14:29:48 +0100
+Subject: ice: fix adding AQ LLDP filter for VF
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit eef33aa44935d001747ca97703c08dd6f9031162 ]
+
+The referenced commit came from a misunderstanding of the FW LLDP filter
+AQ (Admin Queue) command due to the error in the internal documentation.
+Contrary to the assumptions in the original commit, VFs can be added and
+deleted from this filter without any problems. Introduced dev_info message
+proved to be useful, so reverting the whole commit does not make sense.
+
+Without this fix, trusted VFs do not receive LLDP traffic, if there is an
+AQ LLDP filter on PF. When trusted VF attempts to add an LLDP multicast
+MAC address, the following message can be seen in dmesg on host:
+
+ice 0000:33:00.0: Failed to add Rx LLDP rule on VSI 20 error: -95
+
+Revert checking VSI type when adding LLDP filter through AQ.
+
+Fixes: 4d5a1c4e6d49 ("ice: do not add LLDP-specific filter if not necessary")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index eb148c8d9e083..95160c8dc1bba 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -6428,7 +6428,7 @@ int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
+ struct ice_aqc_lldp_filter_ctrl *cmd;
+ struct libie_aq_desc desc;
+
+- if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
++ if (!ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
+
+ cmd = libie_aq_raw(&desc);
+--
+2.51.0
+
--- /dev/null
+From 228f3fb68f12d02cc5af6e86b914bef953604e61 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jan 2026 12:00:26 +0800
+Subject: ice: recap the VSI and QoS info after rebuild
+
+From: Aaron Ma <aaron.ma@canonical.com>
+
+[ Upstream commit 6aa07e23dd3ccd35a0100c06fcb6b6c3b01e7965 ]
+
+Fix IRDMA hardware initialization timeout (-110) after resume by
+separating VSI-dependent configuration from RDMA resource allocation,
+ensuring VSI is rebuilt before IRDMA accesses it.
+
+After resume from suspend, IRDMA hardware initialization fails:
+ ice: IRDMA hardware initialization FAILED init_state=4 status=-110
+
+Separate RDMA initialization into two phases:
+1. ice_init_rdma() - Allocate resources only (no VSI/QoS access, no plug)
+2. ice_rdma_finalize_setup() - Assign VSI/QoS info and plug device
+
+This allows:
+- ice_init_rdma() to stay in ice_resume() (mirrors ice_deinit_rdma()
+ in ice_suspend())
+- VSI assignment deferred until after ice_vsi_rebuild() completes
+- QoS info updated after ice_dcb_rebuild() completes
+- Device plugged only when control queues, VSI, and DCB are all ready
+
+Fixes: bc69ad74867db ("ice: avoid IRQ collision to fix init failure on ACPI S3 resume")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Aaron Ma <aaron.ma@canonical.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 1 +
+ drivers/net/ethernet/intel/ice/ice_idc.c | 44 +++++++++++++++++------
+ drivers/net/ethernet/intel/ice/ice_main.c | 7 +++-
+ 3 files changed, 41 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index a23ccd4ba08d2..6886188043764 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -988,6 +988,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
+ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+ int ice_plug_aux_dev(struct ice_pf *pf);
+ void ice_unplug_aux_dev(struct ice_pf *pf);
++void ice_rdma_finalize_setup(struct ice_pf *pf);
+ int ice_init_rdma(struct ice_pf *pf);
+ void ice_deinit_rdma(struct ice_pf *pf);
+ bool ice_is_wol_supported(struct ice_hw *hw);
+diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
+index 420d45c2558b6..ded029aa71d7d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_idc.c
++++ b/drivers/net/ethernet/intel/ice/ice_idc.c
+@@ -360,6 +360,39 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
+ auxiliary_device_uninit(adev);
+ }
+
++/**
++ * ice_rdma_finalize_setup - Complete RDMA setup after VSI is ready
++ * @pf: ptr to ice_pf
++ *
++ * Sets VSI-dependent information and plugs aux device.
++ * Must be called after ice_init_rdma(), ice_vsi_rebuild(), and
++ * ice_dcb_rebuild() complete.
++ */
++void ice_rdma_finalize_setup(struct ice_pf *pf)
++{
++ struct device *dev = ice_pf_to_dev(pf);
++ struct iidc_rdma_priv_dev_info *privd;
++ int ret;
++
++ if (!ice_is_rdma_ena(pf) || !pf->cdev_info)
++ return;
++
++ privd = pf->cdev_info->iidc_priv;
++ if (!privd || !pf->vsi || !pf->vsi[0] || !pf->vsi[0]->netdev)
++ return;
++
++ /* Assign VSI info now that VSI is valid */
++ privd->netdev = pf->vsi[0]->netdev;
++ privd->vport_id = pf->vsi[0]->vsi_num;
++
++ /* Update QoS info after DCB has been rebuilt */
++ ice_setup_dcb_qos_info(pf, &privd->qos_info);
++
++ ret = ice_plug_aux_dev(pf);
++ if (ret)
++ dev_warn(dev, "Failed to plug RDMA aux device: %d\n", ret);
++}
++
+ /**
+ * ice_init_rdma - initializes PF for RDMA use
+ * @pf: ptr to ice_pf
+@@ -398,22 +431,14 @@ int ice_init_rdma(struct ice_pf *pf)
+ }
+
+ cdev->iidc_priv = privd;
+- privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+- privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+- ice_setup_dcb_qos_info(pf, &privd->qos_info);
+- ret = ice_plug_aux_dev(pf);
+- if (ret)
+- goto err_plug_aux_dev;
++
+ return 0;
+
+-err_plug_aux_dev:
+- pf->cdev_info->adev = NULL;
+- xa_erase(&ice_aux_id, pf->aux_idx);
+ err_alloc_xa:
+ kfree(privd);
+ err_privd_alloc:
+@@ -432,7 +457,6 @@ void ice_deinit_rdma(struct ice_pf *pf)
+ if (!ice_is_rdma_ena(pf))
+ return;
+
+- ice_unplug_aux_dev(pf);
+ xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index f2b91f7f87861..a4ae032f2161b 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5147,6 +5147,9 @@ int ice_load(struct ice_pf *pf)
+ if (err)
+ goto err_init_rdma;
+
++ /* Finalize RDMA: VSI already created, assign info and plug device */
++ ice_rdma_finalize_setup(pf);
++
+ ice_service_task_restart(pf);
+
+ clear_bit(ICE_DOWN, pf->state);
+@@ -5178,6 +5181,7 @@ void ice_unload(struct ice_pf *pf)
+
+ devl_assert_locked(priv_to_devlink(pf));
+
++ ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
+ ice_tc_indir_block_unregister(vsi);
+@@ -5604,6 +5608,7 @@ static int ice_suspend(struct device *dev)
+ */
+ disabled = ice_service_task_stop(pf);
+
++ ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
+
+ /* Already suspended?, then there is nothing to do */
+@@ -7809,7 +7814,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+
+ ice_health_clear(pf);
+
+- ice_plug_aux_dev(pf);
++ ice_rdma_finalize_setup(pf);
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
+ ice_lag_rebuild(pf);
+
+--
+2.51.0
+
--- /dev/null
+From 1639ade679e9fdfc860082a6f62d090bef4f47ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 21:55:59 +0000
+Subject: idpf: change IRQ naming to match netdev and ethtool queue numbering
+
+From: Brian Vazquez <brianvv@google.com>
+
+[ Upstream commit 1500a8662d2d41d6bb03e034de45ddfe6d7d362d ]
+
+The code uses the vidx for the IRQ name but that doesn't match ethtool
+reporting nor netdev naming, this makes it hard to tune the device and
+associate queues with IRQs. Sequentially requesting irqs starting from
+'0' makes the output consistent.
+
+This commit changes the interrupt numbering but preserves the name
+format, maintaining ABI compatibility. Existing tools relying on the old
+numbering are already non-functional, as they lack a useful correlation
+to the interrupts.
+
+Before:
+
+ethtool -L eth1 tx 1 combined 3
+
+grep . /proc/irq/*/*idpf*/../smp_affinity_list
+/proc/irq/67/idpf-Mailbox-0/../smp_affinity_list:0-55,112-167
+/proc/irq/68/idpf-eth1-TxRx-1/../smp_affinity_list:0
+/proc/irq/70/idpf-eth1-TxRx-3/../smp_affinity_list:1
+/proc/irq/71/idpf-eth1-TxRx-4/../smp_affinity_list:2
+/proc/irq/72/idpf-eth1-Tx-5/../smp_affinity_list:3
+
+ethtool -S eth1 | grep -v ': 0'
+NIC statistics:
+ tx_q-0_pkts: 1002
+ tx_q-1_pkts: 2679
+ tx_q-2_pkts: 1113
+ tx_q-3_pkts: 1192 <----- tx_q-3 vs idpf-eth1-Tx-5
+ rx_q-0_pkts: 1143
+ rx_q-1_pkts: 3172
+ rx_q-2_pkts: 1074
+
+After:
+
+ethtool -L eth1 tx 1 combined 3
+
+grep . /proc/irq/*/*idpf*/../smp_affinity_list
+
+/proc/irq/67/idpf-Mailbox-0/../smp_affinity_list:0-55,112-167
+/proc/irq/68/idpf-eth1-TxRx-0/../smp_affinity_list:0
+/proc/irq/70/idpf-eth1-TxRx-1/../smp_affinity_list:1
+/proc/irq/71/idpf-eth1-TxRx-2/../smp_affinity_list:2
+/proc/irq/72/idpf-eth1-Tx-3/../smp_affinity_list:3
+
+ethtool -S eth1 | grep -v ': 0'
+NIC statistics:
+ tx_q-0_pkts: 118
+ tx_q-1_pkts: 134
+ tx_q-2_pkts: 228
+ tx_q-3_pkts: 138 <--- tx_q-3 matches idpf-eth1-Tx-3
+ rx_q-0_pkts: 111
+ rx_q-1_pkts: 366
+ rx_q-2_pkts: 120
+
+Fixes: d4d558718266 ("idpf: initialize interrupts and enable vport")
+Signed-off-by: Brian Vazquez <brianvv@google.com>
+Reviewed-by: Brett Creeley <brett.creeley@amd.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index 83cc9504e7e1a..c859665b2dc89 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -4038,7 +4038,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
+ continue;
+
+ name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
+- vec_name, vidx);
++ vec_name, vector);
+
+ err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
+ name, q_vector);
+--
+2.51.0
+
--- /dev/null
+From 743650f42653e5ff40191241934624688d287cd6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jan 2026 12:01:13 -0600
+Subject: idpf: Fix flow rule delete failure due to invalid validation
+
+From: Sreedevi Joshi <sreedevi.joshi@intel.com>
+
+[ Upstream commit 2c31557336a8e4d209ed8d4513cef2c0f15e7ef4 ]
+
+When deleting a flow rule using "ethtool -N <dev> delete <location>",
+idpf_sideband_action_ena() incorrectly validates fsp->ring_cookie even
+though ethtool doesn't populate this field for delete operations. The
+uninitialized ring_cookie may randomly match RX_CLS_FLOW_DISC or
+RX_CLS_FLOW_WAKE, causing validation to fail and preventing legitimate
+rule deletions. Remove the unnecessary sideband action enable check and
+ring_cookie validation during delete operations since action validation
+is not required when removing existing rules.
+
+Fixes: ada3e24b84a0 ("idpf: add flow steering support")
+Signed-off-by: Sreedevi Joshi <sreedevi.joshi@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_ethtool.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+index 3e191cf528b69..6c0a9296eccc8 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+@@ -291,9 +291,6 @@ static int idpf_del_flow_steer(struct net_device *netdev,
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+
+- if (!idpf_sideband_action_ena(vport, fsp))
+- return -EOPNOTSUPP;
+-
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+--
+2.51.0
+
--- /dev/null
+From a5966e3cc7ba2b20f9cd9fab4e20595d0c5b6134 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 06:47:28 +0000
+Subject: idpf: increment completion queue next_to_clean in sw marker wait
+ routine
+
+From: Li Li <boolli@google.com>
+
+[ Upstream commit 712896ac4bce38a965a1c175f6e7804ed0381334 ]
+
+Currently, in idpf_wait_for_sw_marker_completion(), when an
+IDPF_TXD_COMPLT_SW_MARKER packet is found, the routine breaks out of
+the for loop and does not increment the next_to_clean counter. This
+causes the subsequent NAPI polls to run into the same
+IDPF_TXD_COMPLT_SW_MARKER packet again and print out the following:
+
+ [ 23.261341] idpf 0000:05:00.0 eth1: Unknown TX completion type: 5
+
+Instead, we should increment next_to_clean regardless when an
+IDPF_TXD_COMPLT_SW_MARKER packet is found.
+
+Tested: with the patch applied, we do not see the errors above from NAPI
+polls anymore.
+
+Fixes: 9d39447051a0 ("idpf: remove SW marker handling from NAPI")
+Signed-off-by: Li Li <boolli@google.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index a48088eb9b822..83cc9504e7e1a 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -2326,7 +2326,7 @@ void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+
+ do {
+ struct idpf_splitq_4b_tx_compl_desc *tx_desc;
+- struct idpf_tx_queue *target;
++ struct idpf_tx_queue *target = NULL;
+ u32 ctype_gen, id;
+
+ tx_desc = flow ? &complq->comp[ntc].common :
+@@ -2346,14 +2346,14 @@ void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+ target = complq->txq_grp->txqs[id];
+
+ idpf_queue_clear(SW_MARKER, target);
+- if (target == txq)
+- break;
+
+ next:
+ if (unlikely(++ntc == complq->desc_count)) {
+ ntc = 0;
+ gen_flag = !gen_flag;
+ }
++ if (target == txq)
++ break;
+ } while (time_before(jiffies, timeout));
+
+ idpf_queue_assign(GEN_CHK, complq, gen_flag);
+--
+2.51.0
+
--- /dev/null
+From 03ca7d50b845bd7cc1fe716a8b38cc07b032ffc1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Jan 2026 15:16:52 +0100
+Subject: igb: Fix trigger of incorrect irq in igb_xsk_wakeup
+
+From: Vivek Behera <vivek.behera@siemens.com>
+
+[ Upstream commit d4c13ab36273a8c318ba06799793cc1f5d9c6fa1 ]
+
+The current implementation in the igb_xsk_wakeup expects
+the Rx and Tx queues to share the same irq. This would lead
+to triggering of incorrect irq in split irq configuration.
+This patch addresses this issue which could impact environments
+with 2 active cpu cores
+or when the number of queues is reduced to 2 or less
+
+cat /proc/interrupts | grep eno2
+ 167: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 0-edge eno2
+ 168: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 1-edge eno2-rx-0
+ 169: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 2-edge eno2-rx-1
+ 170: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 3-edge eno2-tx-0
+ 171: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 4-edge eno2-tx-1
+
+Furthermore it uses the flags input argument to trigger either rx, tx or
+both rx and tx irqs as specified in the ndo_xsk_wakeup api documentation
+
+Fixes: 80f6ccf9f116 ("igb: Introduce XSK data structures and helpers")
+Signed-off-by: Vivek Behera <vivek.behera@siemens.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Suggested-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: Saritha Sanigani <sarithax.sanigani@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_xsk.c | 38 +++++++++++++++++++-----
+ 1 file changed, 30 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
+index 30ce5fbb5b776..ce4a7b58cad2f 100644
+--- a/drivers/net/ethernet/intel/igb/igb_xsk.c
++++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
+@@ -524,6 +524,16 @@ bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+ return nb_pkts < budget;
+ }
+
++static u32 igb_sw_irq_prep(struct igb_q_vector *q_vector)
++{
++ u32 eics = 0;
++
++ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
++ eics = q_vector->eims_value;
++
++ return eics;
++}
++
+ int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+ {
+ struct igb_adapter *adapter = netdev_priv(dev);
+@@ -542,20 +552,32 @@ int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+
+ ring = adapter->tx_ring[qid];
+
+- if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+- return -ENETDOWN;
+-
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+- if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
++ if (flags & XDP_WAKEUP_TX) {
++ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
++ return -ENETDOWN;
++
++ eics |= igb_sw_irq_prep(ring->q_vector);
++ }
++
++ if (flags & XDP_WAKEUP_RX) {
++ /* If IGB_FLAG_QUEUE_PAIRS is active, the q_vector
++ * and NAPI is shared between RX and TX.
++ * If NAPI is already running it would be marked as missed
++ * from the TX path, making this RX call a NOP
++ */
++ ring = adapter->rx_ring[qid];
++ eics |= igb_sw_irq_prep(ring->q_vector);
++ }
++
++ if (eics) {
+ /* Cause software interrupt */
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+- eics |= ring->q_vector->eims_value;
++ if (adapter->flags & IGB_FLAG_HAS_MSIX)
+ wr32(E1000_EICS, eics);
+- } else {
++ else
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+- }
+ }
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From e579364909ea39792122563eb9bf88a58fa28680 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 17:26:03 +0000
+Subject: indirect_call_wrapper: do not reevaluate function pointer
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 710f5c76580306cdb9ec51fac8fcf6a8faff7821 ]
+
+We have an increasing number of READ_ONCE(xxx->function)
+combined with INDIRECT_CALL_[1234]() helpers.
+
+Unfortunately this forces INDIRECT_CALL_[1234]() to read
+xxx->function many times, which is not what we wanted.
+
+Fix these macros so that xxx->function value is not reloaded.
+
+$ scripts/bloat-o-meter -t vmlinux.0 vmlinux
+add/remove: 0/0 grow/shrink: 1/65 up/down: 122/-1084 (-962)
+Function old new delta
+ip_push_pending_frames 59 181 +122
+ip6_finish_output 687 681 -6
+__udp_enqueue_schedule_skb 1078 1072 -6
+ioam6_output 2319 2312 -7
+xfrm4_rcv_encap_finish2 64 56 -8
+xfrm4_output 297 289 -8
+vrf_ip_local_out 278 270 -8
+vrf_ip6_local_out 278 270 -8
+seg6_input_finish 64 56 -8
+rpl_output 700 692 -8
+ipmr_forward_finish 124 116 -8
+ip_forward_finish 143 135 -8
+ip6mr_forward2_finish 100 92 -8
+ip6_forward_finish 73 65 -8
+input_action_end_bpf 1091 1083 -8
+dst_input 52 44 -8
+__xfrm6_output 801 793 -8
+__xfrm4_output 83 75 -8
+bpf_input 500 491 -9
+__tcp_check_space 530 521 -9
+input_action_end_dt6 291 280 -11
+vti6_tnl_xmit 1634 1622 -12
+bpf_xmit 1203 1191 -12
+rpl_input 497 483 -14
+rawv6_send_hdrinc 1355 1341 -14
+ndisc_send_skb 1030 1016 -14
+ipv6_srh_rcv 1377 1363 -14
+ip_send_unicast_reply 1253 1239 -14
+ip_rcv_finish 226 212 -14
+ip6_rcv_finish 300 286 -14
+input_action_end_x_core 205 191 -14
+input_action_end_x 355 341 -14
+input_action_end_t 205 191 -14
+input_action_end_dx6_finish 127 113 -14
+input_action_end_dx4_finish 373 359 -14
+input_action_end_dt4 426 412 -14
+input_action_end_core 186 172 -14
+input_action_end_b6_encap 292 278 -14
+input_action_end_b6 198 184 -14
+igmp6_send 1332 1318 -14
+ip_sublist_rcv 864 848 -16
+ip6_sublist_rcv 1091 1075 -16
+ipv6_rpl_srh_rcv 1937 1920 -17
+xfrm_policy_queue_process 1246 1228 -18
+seg6_output_core 903 885 -18
+mld_sendpack 856 836 -20
+NF_HOOK 756 736 -20
+vti_tunnel_xmit 1447 1426 -21
+input_action_end_dx6 664 642 -22
+input_action_end 1502 1480 -22
+sock_sendmsg_nosec 134 111 -23
+ip6mr_forward2 388 364 -24
+sock_recvmsg_nosec 134 109 -25
+seg6_input_core 836 810 -26
+ip_send_skb 172 146 -26
+ip_local_out 140 114 -26
+ip6_local_out 140 114 -26
+__sock_sendmsg 162 136 -26
+__ip_queue_xmit 1196 1170 -26
+__ip_finish_output 405 379 -26
+ipmr_queue_fwd_xmit 373 346 -27
+sock_recvmsg 173 145 -28
+ip6_xmit 1635 1607 -28
+xfrm_output_resume 1418 1389 -29
+ip_build_and_send_pkt 625 591 -34
+dst_output 504 432 -72
+Total: Before=25217686, After=25216724, chg -0.00%
+
+Fixes: 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260227172603.1700433-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/indirect_call_wrapper.h | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
+index 35227d47cfc98..dc272b514a01b 100644
+--- a/include/linux/indirect_call_wrapper.h
++++ b/include/linux/indirect_call_wrapper.h
+@@ -16,22 +16,26 @@
+ */
+ #define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
++ typeof(f) __f1 = (f); \
++ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+- likely(f == f2) ? f2(__VA_ARGS__) : \
+- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
++ typeof(f) __f2 = (f); \
++ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
++ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f3) ? f3(__VA_ARGS__) : \
+- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
++ typeof(f) __f3 = (f); \
++ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
++ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f4) ? f4(__VA_ARGS__) : \
+- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
++ typeof(f) __f4 = (f); \
++ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
++ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
+ })
+
+ #define INDIRECT_CALLABLE_DECLARE(f) f
+--
+2.51.0
+
--- /dev/null
+From 82444bab3518272158394455e51513a90c41914c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:35:45 +0000
+Subject: inet: annotate data-races around isk->inet_num
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 29252397bcc1e0a1f85e5c3bee59c325f5c26341 ]
+
+UDP/TCP lookups are using RCU, thus isk->inet_num accesses
+should use READ_ONCE() and WRITE_ONCE() where needed.
+
+Fixes: 3ab5aee7fe84 ("net: Convert TCP & DCCP hash tables to use RCU / hlist_nulls")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260225203545.1512417-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/inet6_hashtables.h | 2 +-
+ include/net/inet_hashtables.h | 2 +-
+ include/net/ip.h | 2 +-
+ net/ipv4/inet_hashtables.c | 8 ++++----
+ net/ipv4/tcp_diag.c | 2 +-
+ net/ipv6/inet6_hashtables.c | 3 ++-
+ 6 files changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index 282e29237d936..c16de5b7963fd 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -175,7 +175,7 @@ static inline bool inet6_match(const struct net *net, const struct sock *sk,
+ {
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_family != AF_INET6 ||
+- sk->sk_portpair != ports ||
++ READ_ONCE(sk->sk_portpair) != ports ||
+ !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) ||
+ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return false;
+diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
+index ac05a52d9e138..5a979dcab5383 100644
+--- a/include/net/inet_hashtables.h
++++ b/include/net/inet_hashtables.h
+@@ -345,7 +345,7 @@ static inline bool inet_match(const struct net *net, const struct sock *sk,
+ int dif, int sdif)
+ {
+ if (!net_eq(sock_net(sk), net) ||
+- sk->sk_portpair != ports ||
++ READ_ONCE(sk->sk_portpair) != ports ||
+ sk->sk_addrpair != cookie)
+ return false;
+
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 380afb691c419..1ce79e62a76fb 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -101,7 +101,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+
+ ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
+ ipcm->addr = inet->inet_saddr;
+- ipcm->protocol = inet->inet_num;
++ ipcm->protocol = READ_ONCE(inet->inet_num);
+ }
+
+ #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index b7024e3d9ac3d..a57e33ff92d77 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -200,7 +200,7 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
+ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2, unsigned short port)
+ {
+- inet_sk(sk)->inet_num = port;
++ WRITE_ONCE(inet_sk(sk)->inet_num, port);
+ inet_csk(sk)->icsk_bind_hash = tb;
+ inet_csk(sk)->icsk_bind2_hash = tb2;
+ sk_add_bind_node(sk, &tb2->owners);
+@@ -224,7 +224,7 @@ static void __inet_put_port(struct sock *sk)
+ spin_lock(&head->lock);
+ tb = inet_csk(sk)->icsk_bind_hash;
+ inet_csk(sk)->icsk_bind_hash = NULL;
+- inet_sk(sk)->inet_num = 0;
++ WRITE_ONCE(inet_sk(sk)->inet_num, 0);
+ sk->sk_userlocks &= ~SOCK_CONNECT_BIND;
+
+ spin_lock(&head2->lock);
+@@ -352,7 +352,7 @@ static inline int compute_score(struct sock *sk, const struct net *net,
+ {
+ int score = -1;
+
+- if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
++ if (net_eq(sock_net(sk), net) && READ_ONCE(sk->sk_num) == hnum &&
+ !ipv6_only_sock(sk)) {
+ if (sk->sk_rcv_saddr != daddr)
+ return -1;
+@@ -1202,7 +1202,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+
+ sk->sk_hash = 0;
+ inet_sk(sk)->inet_sport = 0;
+- inet_sk(sk)->inet_num = 0;
++ WRITE_ONCE(inet_sk(sk)->inet_num, 0);
+
+ if (tw)
+ inet_twsk_bind_unhash(tw, hinfo);
+diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
+index d83efd91f461c..7935702e394b2 100644
+--- a/net/ipv4/tcp_diag.c
++++ b/net/ipv4/tcp_diag.c
+@@ -509,7 +509,7 @@ static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next_normal;
+- if (r->id.idiag_sport != htons(sk->sk_num) &&
++ if (r->id.idiag_sport != htons(READ_ONCE(sk->sk_num)) &&
+ r->id.idiag_sport)
+ goto next_normal;
+ if (r->id.idiag_dport != sk->sk_dport &&
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 5e1da088d8e11..182d38e6d6d8d 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -95,7 +95,8 @@ static inline int compute_score(struct sock *sk, const struct net *net,
+ {
+ int score = -1;
+
+- if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
++ if (net_eq(sock_net(sk), net) &&
++ READ_ONCE(inet_sk(sk)->inet_num) == hnum &&
+ sk->sk_family == PF_INET6) {
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return -1;
+--
+2.51.0
+
--- /dev/null
+From cf136ace4b3f731626be1251f37ef3a36f22907f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 11:45:48 -0800
+Subject: ipv6: fix NULL pointer deref in ip6_rt_get_dev_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2ffb4f5c2ccb2fa1c049dd11899aee7967deef5a ]
+
+l3mdev_master_dev_rcu() can return NULL when the slave device is being
+un-slaved from a VRF. All other callers deal with this, but we lost
+the fallback to loopback in ip6_rt_pcpu_alloc() -> ip6_rt_get_dev_rcu()
+with commit 4832c30d5458 ("net: ipv6: put host and anycast routes on
+device with address").
+
+ KASAN: null-ptr-deref in range [0x0000000000000108-0x000000000000010f]
+ RIP: 0010:ip6_rt_pcpu_alloc (net/ipv6/route.c:1418)
+ Call Trace:
+ ip6_pol_route (net/ipv6/route.c:2318)
+ fib6_rule_lookup (net/ipv6/fib6_rules.c:115)
+ ip6_route_output_flags (net/ipv6/route.c:2607)
+ vrf_process_v6_outbound (drivers/net/vrf.c:437)
+
+I was tempted to rework the un-slaving code to clear the flag first
+and insert synchronize_rcu() before we remove the upper. But looks like
+the explicit fallback to loopback_dev is an established pattern.
+And I guess avoiding the synchronize_rcu() is nice, too.
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260301194548.927324-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index cd229974b7974..e7d90a28948a4 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1063,7 +1063,8 @@ static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
+ */
+ if (netif_is_l3_slave(dev) &&
+ !rt6_need_strict(&res->f6i->fib6_dst.addr))
+- dev = l3mdev_master_dev_rcu(dev);
++ dev = l3mdev_master_dev_rcu(dev) ? :
++ dev_net(dev)->loopback_dev;
+ else if (!netif_is_l3_master(dev))
+ dev = dev_net(dev)->loopback_dev;
+ /* last case is netif_is_l3_master(dev) is true in which
+--
+2.51.0
+
--- /dev/null
+From 3e9781374d703f21a10d85653a472bf10e20302f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Dec 2025 16:41:20 +0800
+Subject: kselftest/harness: Use helper to avoid zero-size memset warning
+
+From: Wake Liu <wakel@google.com>
+
+[ Upstream commit 19b8a76cd99bde6d299e60490f3e62b8d3df3997 ]
+
+When building kselftests with a toolchain that enables source
+fortification (e.g., Android's build environment, which uses
+-D_FORTIFY_SOURCE=3), a build failure occurs in tests that use an
+empty FIXTURE().
+
+The root cause is that an empty fixture struct results in
+`sizeof(self_private)` evaluating to 0. The compiler's fortification
+checks then detect the `memset()` call with a compile-time constant size
+of 0, issuing a `-Wuser-defined-warnings` which is promoted to an error
+by `-Werror`.
+
+An initial attempt to guard the call with `if (sizeof(self_private) > 0)`
+was insufficient. The compiler's static analysis is aggressive enough
+to flag the `memset(..., 0)` pattern before evaluating the conditional,
+thus still triggering the error.
+
+To resolve this robustly, this change introduces a `static inline`
+helper function, `__kselftest_memset_safe()`. This function wraps the
+size check and the `memset()` call. By replacing the direct `memset()`
+in the `__TEST_F_IMPL` macro with a call to this helper, we create an
+abstraction boundary. This prevents the compiler's static analyzer from
+"seeing" the problematic pattern at the macro expansion site, resolving
+the build failure.
+
+Build Context:
+Compiler: Android (14488419, +pgo, +bolt, +lto, +mlgo, based on r584948) clang version 22.0.0 (https://android.googlesource.com/toolchain/llvm-project 2d65e4108033380e6fe8e08b1f1826cd2bfb0c99)
+Relevant Options: -O2 -Wall -Werror -D_FORTIFY_SOURCE=3 -target i686-linux-android10000
+
+Test: m kselftest_futex_futex_requeue_pi
+
+Removed Gerrit Change-Id
+Shuah Khan <skhan@linuxfoundation.org>
+
+Link: https://lore.kernel.org/r/20251224084120.249417-1-wakel@google.com
+Signed-off-by: Wake Liu <wakel@google.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Stable-dep-of: 6be268151426 ("selftests/harness: order TEST_F and XFAIL_ADD constructors")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kselftest_harness.h | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 3f66e862e83eb..159cd6729af33 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -70,6 +70,12 @@
+
+ #include "kselftest.h"
+
++static inline void __kselftest_memset_safe(void *s, int c, size_t n)
++{
++ if (n > 0)
++ memset(s, c, n);
++}
++
+ #define TEST_TIMEOUT_DEFAULT 30
+
+ /* Utilities exposed to the test definitions */
+@@ -416,7 +422,7 @@
+ self = mmap(NULL, sizeof(*self), PROT_READ | PROT_WRITE, \
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0); \
+ } else { \
+- memset(&self_private, 0, sizeof(self_private)); \
++ __kselftest_memset_safe(&self_private, 0, sizeof(self_private)); \
+ self = &self_private; \
+ } \
+ } \
+--
+2.51.0
+
--- /dev/null
+From 6e1a35dcc56184889f68c136f42ef356ce524670 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:14:10 +0545
+Subject: kunit: tool: copy caller args in run_kernel to prevent mutation
+
+From: Shuvam Pandey <shuvampandey1@gmail.com>
+
+[ Upstream commit 40804c4974b8df2adab72f6475d343eaff72b7f6 ]
+
+run_kernel() appended KUnit flags directly to the caller-provided args
+list. When exec_tests() calls run_kernel() repeatedly (e.g. with
+--run_isolated), each call mutated the same list, causing later runs
+to inherit stale filter_glob values and duplicate kunit.enable flags.
+
+Fix this by copying args at the start of run_kernel(). Add a regression
+test that calls run_kernel() twice with the same list and verifies the
+original remains unchanged.
+
+Fixes: ff9e09a3762f ("kunit: tool: support running each suite/test separately")
+Signed-off-by: Shuvam Pandey <shuvampandey1@gmail.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_kernel.py | 6 ++++--
+ tools/testing/kunit/kunit_tool_test.py | 26 ++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index 260d8d9aa1db4..2998e1bc088b2 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -346,8 +346,10 @@ class LinuxSourceTree:
+ return self.validate_config(build_dir)
+
+ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
+- if not args:
+- args = []
++ # Copy to avoid mutating the caller-supplied list. exec_tests() reuses
++ # the same args across repeated run_kernel() calls (e.g. --run_isolated),
++ # so appending to the original would accumulate stale flags on each call.
++ args = list(args) if args else []
+ if filter_glob:
+ args.append('kunit.filter_glob=' + filter_glob)
+ if filter:
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index bbba921e0eacb..ed45bac1548d9 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -489,6 +489,32 @@ class LinuxSourceTreeTest(unittest.TestCase):
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
++ def test_run_kernel_args_not_mutated(self):
++ """Verify run_kernel() copies args so callers can reuse them."""
++ start_calls = []
++
++ def fake_start(start_args, unused_build_dir):
++ start_calls.append(list(start_args))
++ return subprocess.Popen(['printf', 'KTAP version 1\n'],
++ text=True, stdout=subprocess.PIPE)
++
++ with tempfile.TemporaryDirectory('') as build_dir:
++ tree = kunit_kernel.LinuxSourceTree(build_dir,
++ kunitconfig_paths=[os.devnull])
++ with mock.patch.object(tree._ops, 'start', side_effect=fake_start), \
++ mock.patch.object(kunit_kernel.subprocess, 'call'):
++ kernel_args = ['mem=1G']
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test1'):
++ pass
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test2'):
++ pass
++ self.assertEqual(kernel_args, ['mem=1G'],
++ 'run_kernel() should not modify caller args')
++ self.assertIn('kunit.filter_glob=suite.test1', start_calls[0])
++ self.assertIn('kunit.filter_glob=suite.test2', start_calls[1])
++
+ def test_build_reconfig_no_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+--
+2.51.0
+
--- /dev/null
+From b54ad8c12dded8277fa55d19b1ee505ed15cc7cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 10:11:40 +0100
+Subject: libie: don't unroll if fwlog isn't supported
+
+From: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+
+[ Upstream commit 636cc3bd12f499c74eaf5dc9a7d5b832f1bb24ed ]
+
+The libie_fwlog_deinit() function can be called during driver unload
+even when firmware logging was never properly initialized. This led to call
+trace:
+
+[ 148.576156] Oops: Oops: 0000 [#1] SMP NOPTI
+[ 148.576167] CPU: 80 UID: 0 PID: 12843 Comm: rmmod Kdump: loaded Not tainted 6.17.0-rc7next-queue-3oct-01915-g06d79d51cf51 #1 PREEMPT(full)
+[ 148.576177] Hardware name: HPE ProLiant DL385 Gen10 Plus/ProLiant DL385 Gen10 Plus, BIOS A42 07/18/2020
+[ 148.576182] RIP: 0010:__dev_printk+0x16/0x70
+[ 148.576196] Code: 1f 44 00 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 0f 1f 44 00 00 41 55 41 54 49 89 d4 55 48 89 fd 53 48 85 f6 74 3c <4c> 8b 6e 50 48 89 f3 4d 85 ed 75 03 4c 8b 2e 48 89 df e8 f3 27 98
+[ 148.576204] RSP: 0018:ffffd2fd7ea17a48 EFLAGS: 00010202
+[ 148.576211] RAX: ffffd2fd7ea17aa0 RBX: ffff8eb288ae2000 RCX: 0000000000000000
+[ 148.576217] RDX: ffffd2fd7ea17a70 RSI: 00000000000000c8 RDI: ffffffffb68d3d88
+[ 148.576222] RBP: ffffffffb68d3d88 R08: 0000000000000000 R09: 0000000000000000
+[ 148.576227] R10: 00000000000000c8 R11: ffff8eb2b1a49400 R12: ffffd2fd7ea17a70
+[ 148.576231] R13: ffff8eb3141fb000 R14: ffffffffc1215b48 R15: ffffffffc1215bd8
+[ 148.576236] FS: 00007f5666ba6740(0000) GS:ffff8eb2472b9000(0000) knlGS:0000000000000000
+[ 148.576242] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 148.576247] CR2: 0000000000000118 CR3: 000000011ad17000 CR4: 0000000000350ef0
+[ 148.576252] Call Trace:
+[ 148.576258] <TASK>
+[ 148.576269] _dev_warn+0x7c/0x96
+[ 148.576290] libie_fwlog_deinit+0x112/0x117 [libie_fwlog]
+[ 148.576303] ixgbe_remove+0x63/0x290 [ixgbe]
+[ 148.576342] pci_device_remove+0x42/0xb0
+[ 148.576354] device_release_driver_internal+0x19c/0x200
+[ 148.576365] driver_detach+0x48/0x90
+[ 148.576372] bus_remove_driver+0x6d/0xf0
+[ 148.576383] pci_unregister_driver+0x2e/0xb0
+[ 148.576393] ixgbe_exit_module+0x1c/0xd50 [ixgbe]
+[ 148.576430] __do_sys_delete_module.isra.0+0x1bc/0x2e0
+[ 148.576446] do_syscall_64+0x7f/0x980
+
+It can be reproduced by trying to unload ixgbe driver in recovery mode.
+
+Fix that by checking if fwlog is supported before doing unroll.
+
+Fixes: 641585bc978e ("ixgbe: fwlog support for e610")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/libie/fwlog.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/libie/fwlog.c b/drivers/net/ethernet/intel/libie/fwlog.c
+index f39cc11cb7c56..5d890d9d3c4d5 100644
+--- a/drivers/net/ethernet/intel/libie/fwlog.c
++++ b/drivers/net/ethernet/intel/libie/fwlog.c
+@@ -1051,6 +1051,10 @@ void libie_fwlog_deinit(struct libie_fwlog *fwlog)
+ {
+ int status;
+
++ /* if FW logging isn't supported it means no configuration was done */
++ if (!libie_fwlog_supported(fwlog))
++ return;
++
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+--
+2.51.0
+
--- /dev/null
+From 7f3554d31383ae038e66b7041448012d26c345cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jan 2026 13:22:57 +0100
+Subject: module: Remove duplicate freeing of lockdep classes
+
+From: Petr Pavlu <petr.pavlu@suse.com>
+
+[ Upstream commit a7b4bc094fbaa7dc7b7b91ae33549bbd7eefaac1 ]
+
+In the error path of load_module(), under the free_module label, the
+code calls lockdep_free_key_range() to release lock classes associated
+with the MOD_DATA, MOD_RODATA and MOD_RO_AFTER_INIT module regions, and
+subsequently invokes module_deallocate().
+
+Since commit ac3b43283923 ("module: replace module_layout with
+module_memory"), the module_deallocate() function calls free_mod_mem(),
+which releases the lock classes as well and considers all module
+regions.
+
+Attempting to free these classes twice is unnecessary. Remove the
+redundant code in load_module().
+
+Fixes: ac3b43283923 ("module: replace module_layout with module_memory")
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Reviewed-by: Daniel Gomez <da.gomez@samsung.com>
+Reviewed-by: Aaron Tomlin <atomlin@atomlin.com>
+Acked-by: Song Liu <song@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/module/main.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index c66b261849362..a2c798d06e3f5 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -3544,12 +3544,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ mutex_unlock(&module_mutex);
+ free_module:
+ mod_stat_bump_invalid(info, flags);
+- /* Free lock-classes; relies on the preceding sync_rcu() */
+- for_class_mod_mem_type(type, core_data) {
+- lockdep_free_key_range(mod->mem[type].base,
+- mod->mem[type].size);
+- }
+-
+ module_memory_restore_rox(mod);
+ module_deallocate(mod, info);
+ free_copy:
+--
+2.51.0
+
--- /dev/null
+From ea21bb0a9350749574dbfa776179205c3c752e10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 13:15:47 +0000
+Subject: net: annotate data-races around sk->sk_{data_ready,write_space}
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2ef2b20cf4e04ac8a6ba68493f8780776ff84300 ]
+
+skmsg (and probably other layers) are changing these pointers
+while other cpus might read them concurrently.
+
+Add corresponding READ_ONCE()/WRITE_ONCE() annotations
+for UDP, TCP and AF_UNIX.
+
+Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface")
+Reported-by: syzbot+87f770387a9e5dc6b79b@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/699ee9fc.050a0220.1cd54b.0009.GAE@google.com/
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: Jakub Sitnicki <jakub@cloudflare.com>
+Cc: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260225131547.1085509-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 14 +++++++-------
+ net/ipv4/tcp.c | 4 ++--
+ net/ipv4/tcp_bpf.c | 2 +-
+ net/ipv4/tcp_input.c | 14 ++++++++------
+ net/ipv4/tcp_minisocks.c | 2 +-
+ net/ipv4/udp.c | 2 +-
+ net/ipv4/udp_bpf.c | 2 +-
+ net/unix/af_unix.c | 8 ++++----
+ 8 files changed, 25 insertions(+), 23 deletions(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index ddde93dd8bc6d..12fbb0545c712 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1205,8 +1205,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
+ return;
+
+ psock->saved_data_ready = sk->sk_data_ready;
+- sk->sk_data_ready = sk_psock_strp_data_ready;
+- sk->sk_write_space = sk_psock_write_space;
++ WRITE_ONCE(sk->sk_data_ready, sk_psock_strp_data_ready);
++ WRITE_ONCE(sk->sk_write_space, sk_psock_write_space);
+ }
+
+ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+@@ -1216,8 +1216,8 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+ if (!psock->saved_data_ready)
+ return;
+
+- sk->sk_data_ready = psock->saved_data_ready;
+- psock->saved_data_ready = NULL;
++ WRITE_ONCE(sk->sk_data_ready, psock->saved_data_ready);
++ WRITE_ONCE(psock->saved_data_ready, NULL);
+ strp_stop(&psock->strp);
+ }
+
+@@ -1296,8 +1296,8 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
+ return;
+
+ psock->saved_data_ready = sk->sk_data_ready;
+- sk->sk_data_ready = sk_psock_verdict_data_ready;
+- sk->sk_write_space = sk_psock_write_space;
++ WRITE_ONCE(sk->sk_data_ready, sk_psock_verdict_data_ready);
++ WRITE_ONCE(sk->sk_write_space, sk_psock_write_space);
+ }
+
+ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
+@@ -1308,6 +1308,6 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
+ if (!psock->saved_data_ready)
+ return;
+
+- sk->sk_data_ready = psock->saved_data_ready;
++ WRITE_ONCE(sk->sk_data_ready, psock->saved_data_ready);
+ psock->saved_data_ready = NULL;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index e35825656e6ea..f665c87edc0f7 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1398,7 +1398,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+ err = sk_stream_error(sk, flags, err);
+ /* make sure we wake any epoll edge trigger waiter */
+ if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
+- sk->sk_write_space(sk);
++ READ_ONCE(sk->sk_write_space)(sk);
+ tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
+ }
+ if (binding)
+@@ -4111,7 +4111,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ break;
+ case TCP_NOTSENT_LOWAT:
+ WRITE_ONCE(tp->notsent_lowat, val);
+- sk->sk_write_space(sk);
++ READ_ONCE(sk->sk_write_space)(sk);
+ break;
+ case TCP_INQ:
+ if (val > 1 || val < 0)
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index ca8a5cb8e569d..d3d6a47af5270 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -725,7 +725,7 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
+ tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
+ } else {
+- sk->sk_write_space = psock->saved_write_space;
++ WRITE_ONCE(sk->sk_write_space, psock->saved_write_space);
+ /* Pairs with lockless read in sk_clone_lock() */
+ sock_replace_proto(sk, psock->sk_proto);
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index abd0d5c5a5e3f..834cd37276d59 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5151,7 +5151,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+
+ if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM);
+ return;
+ }
+@@ -5361,7 +5361,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+ void tcp_data_ready(struct sock *sk)
+ {
+ if (tcp_epollin_ready(sk, sk->sk_rcvlowat) || sock_flag(sk, SOCK_DONE))
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ }
+
+ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+@@ -5417,7 +5417,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+ inet_csk(sk)->icsk_ack.pending |=
+ (ICSK_ACK_NOMEM | ICSK_ACK_NOW);
+ inet_csk_schedule_ack(sk);
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+
+ if (skb_queue_len(&sk->sk_receive_queue) && skb->len) {
+ reason = SKB_DROP_REASON_PROTO_MEM;
+@@ -5859,7 +5859,9 @@ static void tcp_new_space(struct sock *sk)
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+ }
+
+- INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
++ INDIRECT_CALL_1(READ_ONCE(sk->sk_write_space),
++ sk_stream_write_space,
++ sk);
+ }
+
+ /* Caller made space either from:
+@@ -6065,7 +6067,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
+ BUG();
+ WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
+ if (!sock_flag(sk, SOCK_DEAD))
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ }
+ }
+ }
+@@ -7531,7 +7533,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ sock_put(fastopen_sk);
+ goto drop_and_free;
+ }
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ bh_unlock_sock(fastopen_sk);
+ sock_put(fastopen_sk);
+ } else {
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 95c30b6ec44cd..c70c29a3a0905 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -990,7 +990,7 @@ enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ reason = tcp_rcv_state_process(child, skb);
+ /* Wakeup parent, send SIGIO */
+ if (state == TCP_SYN_RECV && child->sk_state != state)
+- parent->sk_data_ready(parent);
++ READ_ONCE(parent->sk_data_ready)(parent);
+ } else {
+ /* Alas, it is possible again, because we do lookup
+ * in main socket hash table and lock on listening
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 860bd61ff047f..777199fa9502f 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1786,7 +1786,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ * using prepare_to_wait_exclusive().
+ */
+ while (nb) {
+- INDIRECT_CALL_1(sk->sk_data_ready,
++ INDIRECT_CALL_1(READ_ONCE(sk->sk_data_ready),
+ sock_def_readable, sk);
+ nb--;
+ }
+diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
+index 91233e37cd97a..779a3a03762f1 100644
+--- a/net/ipv4/udp_bpf.c
++++ b/net/ipv4/udp_bpf.c
+@@ -158,7 +158,7 @@ int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6;
+
+ if (restore) {
+- sk->sk_write_space = psock->saved_write_space;
++ WRITE_ONCE(sk->sk_write_space, psock->saved_write_space);
+ sock_replace_proto(sk, psock->sk_proto);
+ return 0;
+ }
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 9dad3af700af3..79943fb348064 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1806,7 +1806,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr,
+ __skb_queue_tail(&other->sk_receive_queue, skb);
+ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+ sock_put(other);
+ return 0;
+
+@@ -2301,7 +2301,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ scm_stat_add(other, skb);
+ skb_queue_tail(&other->sk_receive_queue, skb);
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+ sock_put(other);
+ scm_destroy(&scm);
+ return len;
+@@ -2374,7 +2374,7 @@ static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
+
+ sk_send_sigurg(other);
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+
+ return 0;
+ out_unlock:
+@@ -2502,7 +2502,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ spin_unlock(&other->sk_receive_queue.lock);
+
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+ sent += size;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 00f7902e083789d8f758bf1d9dc8b4f4f13f10f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:56 +0100
+Subject: net: bridge: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit e5e890630533bdc15b26a34bb8e7ef539bdf1322 ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. Then, if neigh_suppress is enabled and an ICMPv6
+Neighbor Discovery packet reaches the bridge, br_do_suppress_nd() will
+dereference ipv6_stub->nd_tbl which is NULL, passing it to
+neigh_lookup(). This causes a kernel NULL pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000268
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x16/0xe0
+ [...]
+ Call Trace:
+ <IRQ>
+ ? neigh_lookup+0x16/0xe0
+ br_do_suppress_nd+0x160/0x290 [bridge]
+ br_handle_frame_finish+0x500/0x620 [bridge]
+ br_handle_frame+0x353/0x440 [bridge]
+ __netif_receive_skb_core.constprop.0+0x298/0x1110
+ __netif_receive_skb_one_core+0x3d/0xa0
+ process_backlog+0xa0/0x140
+ __napi_poll+0x2c/0x170
+ net_rx_action+0x2c4/0x3a0
+ handle_softirqs+0xd0/0x270
+ do_softirq+0x3f/0x60
+
+Fix this by replacing IS_ENABLED(IPV6) call with ipv6_mod_enabled() in
+the callers. This is in essence disabling NS/NA suppression when IPv6 is
+disabled.
+
+Fixes: ed842faeb2bd ("bridge: suppress nd pkts on BR_NEIGH_SUPPRESS ports")
+Reported-by: Guruprasad C P <gurucp2005@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAHXs0ORzd62QOG-Fttqa2Cx_A_VFp=utE2H2VTX5nqfgs7LDxQ@mail.gmail.com/
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260304120357.9778-1-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_device.c | 2 +-
+ net/bridge/br_input.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index a818fdc22da9a..525d4eccd194a 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -74,7 +74,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ br_do_proxy_suppress_arp(skb, br, vid, NULL);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 1405f1061a549..2cbae0f9ae1f0 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -170,7 +170,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ (skb->protocol == htons(ETH_P_ARP) ||
+ skb->protocol == htons(ETH_P_RARP))) {
+ br_do_proxy_suppress_arp(skb, br, vid, p);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+--
+2.51.0
+
--- /dev/null
+From 51d59e966d141e9397d96869a0e07fcbcd4e6204 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 16:32:56 -0800
+Subject: net: devmem: use READ_ONCE/WRITE_ONCE on binding->dev
+
+From: Bobby Eshleman <bobbyeshleman@meta.com>
+
+[ Upstream commit 40bf00ec2ee271df5ba67593991760adf8b5d0ed ]
+
+binding->dev is protected on the write-side in
+mp_dmabuf_devmem_uninstall() against concurrent writes, but due to the
+concurrent bare reads in net_devmem_get_binding() and
+validate_xmit_unreadable_skb() it should be wrapped in a
+READ_ONCE/WRITE_ONCE pair to make sure no compiler optimizations play
+with the underlying register in unforeseen ways.
+
+Doesn't present a critical bug because the known compiler optimizations
+don't result in bad behavior. There is no tearing on u64, and load
+omissions/invented loads would only break if additional binding->dev
+references were inlined together (they aren't right now).
+
+This just more strictly follows the linux memory model (i.e.,
+"Lock-Protected Writes With Lockless Reads" in
+tools/memory-model/Documentation/access-marking.txt).
+
+Fixes: bd61848900bf ("net: devmem: Implement TX path")
+Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com>
+Link: https://patch.msgid.link/20260302-devmem-membar-fix-v2-1-5b33c9cbc28b@meta.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 2 +-
+ net/core/devmem.c | 6 ++++--
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 9b57a5b63919c..f937b8ba08222 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3975,7 +3975,7 @@ static struct sk_buff *validate_xmit_unreadable_skb(struct sk_buff *skb,
+ if (shinfo->nr_frags > 0) {
+ niov = netmem_to_net_iov(skb_frag_netmem(&shinfo->frags[0]));
+ if (net_is_devmem_iov(niov) &&
+- net_devmem_iov_binding(niov)->dev != dev)
++ READ_ONCE(net_devmem_iov_binding(niov)->dev) != dev)
+ goto out_free;
+ }
+
+diff --git a/net/core/devmem.c b/net/core/devmem.c
+index 1d04754bc756d..448f6582ac1ae 100644
+--- a/net/core/devmem.c
++++ b/net/core/devmem.c
+@@ -387,7 +387,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
+ * net_device.
+ */
+ dst_dev = dst_dev_rcu(dst);
+- if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {
++ if (unlikely(!dst_dev) ||
++ unlikely(dst_dev != READ_ONCE(binding->dev))) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+@@ -504,7 +505,8 @@ static void mp_dmabuf_devmem_uninstall(void *mp_priv,
+ xa_erase(&binding->bound_rxqs, xa_idx);
+ if (xa_empty(&binding->bound_rxqs)) {
+ mutex_lock(&binding->lock);
+- binding->dev = NULL;
++ ASSERT_EXCLUSIVE_WRITER(binding->dev);
++ WRITE_ONCE(binding->dev, NULL);
+ mutex_unlock(&binding->lock);
+ }
+ break;
+--
+2.51.0
+
--- /dev/null
+From 8c9e7e7756f001aed423efb167afbe9f3b343a06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 18:13:14 -0300
+Subject: net: dsa: realtek: rtl8365mb: fix rtl8365mb_phy_ocp_write return
+ value
+
+From: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+
+[ Upstream commit 7cbe98f7bef965241a5908d50d557008cf998aee ]
+
+Function rtl8365mb_phy_ocp_write() always returns 0, even when an error
+occurs during register access. This patch fixes the return value to
+propagate the actual error code from regmap operations.
+
+Link: https://lore.kernel.org/netdev/a2dfde3c-d46f-434b-9d16-1e251e449068@yahoo.com/
+Fixes: 2796728460b8 ("net: dsa: realtek: rtl8365mb: serialize indirect PHY register access")
+Signed-off-by: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Luiz Angelo Daros de Luca <luizluca@gmail.com>
+Reviewed-by: Linus Walleij <linusw@kernel.org>
+Link: https://patch.msgid.link/20260301-realtek_namiltd_fix1-v1-1-43a6bb707f9c@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/realtek/rtl8365mb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
+index 964a56ee16cc9..d06b384d47643 100644
+--- a/drivers/net/dsa/realtek/rtl8365mb.c
++++ b/drivers/net/dsa/realtek/rtl8365mb.c
+@@ -769,7 +769,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
+ out:
+ rtl83xx_unlock(priv);
+
+- return 0;
++ return ret;
+ }
+
+ static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
+--
+2.51.0
+
--- /dev/null
+From 2f85b6e059605b3e7efc919c4ec97cf652216c2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:49 +0100
+Subject: net: enetc: use truesize as XDP RxQ info frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit f8e18abf183dbd636a8725532c7f5aa58957de84 ]
+
+The only user of frag_size field in XDP RxQ info is
+bpf_xdp_frags_increase_tail(). It clearly expects truesize instead of DMA
+write size. Different assumptions in enetc driver configuration lead to
+negative tailroom.
+
+Set frag_size to the same value as frame_sz.
+
+Fixes: 2768b2e2f7d2 ("net: enetc: register XDP RX queues with frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-9-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index b6e3fb0401619..d97a76718dd89 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -3458,7 +3458,7 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
+ priv->rx_ring[i] = bdr;
+
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+- ENETC_RXB_DMA_SIZE_XDP);
++ ENETC_RXB_TRUESIZE);
+ if (err)
+ goto free_vector;
+
+--
+2.51.0
+
--- /dev/null
+From 53636ebbbc1f15c815866660d317c38996bffea0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 18:56:39 +0100
+Subject: net: ethernet: mtk_eth_soc: Reset prog ptr to old_prog in case of
+ error in mtk_xdp_setup()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 0abc73c8a40fd64ac1739c90bb4f42c418d27a5e ]
+
+Reset eBPF program pointer to old_prog and do not decrease its ref-count
+if mtk_open routine in mtk_xdp_setup() fails.
+
+Fixes: 7c26c20da5d42 ("net: ethernet: mtk_eth_soc: add basic XDP support")
+Suggested-by: Paolo Valerio <pvalerio@redhat.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260303-mtk-xdp-prog-ptr-fix-v2-1-97b6dbbe240f@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index e68997a29191b..8d3e15bc867d2 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3749,12 +3749,21 @@ static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
++
++ if (netif_running(dev) && need_update) {
++ int err;
++
++ err = mtk_open(dev);
++ if (err) {
++ rcu_assign_pointer(eth->prog, old_prog);
++
++ return err;
++ }
++ }
++
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+- if (netif_running(dev) && need_update)
+- return mtk_open(dev);
+-
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 5b4db3bbb6791b79445a3f64274ccd334c3fefb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 23:43:59 +0530
+Subject: net: ethernet: ti: am65-cpsw-nuss/cpsw-ale: Fix multicast entry
+ handling in ALE table
+
+From: Chintan Vankar <c-vankar@ti.com>
+
+[ Upstream commit be11a537224d72b906db6b98510619770298c8a4 ]
+
+In the current implementation, flushing multicast entries in MAC mode
+incorrectly deletes entries for all ports instead of only the target port,
+disrupting multicast traffic on other ports. The cause is adding multicast
+entries by setting only host port bit, and not setting the MAC port bits.
+
+Fix this by setting the MAC port's bit in the port mask while adding the
+multicast entry. Also fix the flush logic to preserve the host port bit
+during removal of MAC port and free ALE entries when mask contains only
+host port.
+
+Fixes: 5c50a856d550 ("drivers: net: ethernet: cpsw: add multicast address to ALE table")
+Signed-off-by: Chintan Vankar <c-vankar@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260224181359.2055322-1-c-vankar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ drivers/net/ethernet/ti/cpsw_ale.c | 9 ++++-----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 110eb2da8dbc1..77c2cf61c1fb4 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -391,7 +391,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+- port_mask = ALE_PORT_HOST;
++ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index fbe35af615a6f..9632ad3741de1 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -455,14 +455,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+- mask &= ~port_mask;
++ mask &= (~port_mask | ALE_PORT_HOST);
+
+- /* free if only remaining port is host port */
+- if (mask)
++ if (mask == 0x0 || mask == ALE_PORT_HOST)
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+- else
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+--
+2.51.0
+
--- /dev/null
+From 48cc1953b5b8b9081fad728b33c9556a5723e05e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 14:02:47 +0800
+Subject: net: ipv4: fix ARM64 alignment fault in multipath hash seed
+
+From: Yung Chih Su <yuuchihsu@gmail.com>
+
+[ Upstream commit 4ee7fa6cf78ff26d783d39e2949d14c4c1cd5e7f ]
+
+`struct sysctl_fib_multipath_hash_seed` contains two u32 fields
+(user_seed and mp_seed), making it an 8-byte structure with a 4-byte
+alignment requirement.
+
+In `fib_multipath_hash_from_keys()`, the code evaluates the entire
+struct atomically via `READ_ONCE()`:
+
+ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
+
+While this silently works on GCC by falling back to unaligned regular
+loads which the ARM64 kernel tolerates, it causes a fatal kernel panic
+when compiled with Clang and LTO enabled.
+
+Commit e35123d83ee3 ("arm64: lto: Strengthen READ_ONCE() to acquire
+when CONFIG_LTO=y") strengthens `READ_ONCE()` to use Load-Acquire
+instructions (`ldar` / `ldapr`) to prevent compiler reordering bugs
+under Clang LTO. Since the macro evaluates the full 8-byte struct,
+Clang emits a 64-bit `ldar` instruction. ARM64 architecture strictly
+requires `ldar` to be naturally aligned, thus executing it on a 4-byte
+aligned address triggers a strict Alignment Fault (FSC = 0x21).
+
+Fix the read side by moving the `READ_ONCE()` directly to the `u32`
+member, which emits a safe 32-bit `ldar Wn`.
+
+Furthermore, Eric Dumazet pointed out that `WRITE_ONCE()` on the entire
+struct in `proc_fib_multipath_hash_set_seed()` is also flawed. Analysis
+shows that Clang splits this 8-byte write into two separate 32-bit
+`str` instructions. While this avoids an alignment fault, it destroys
+atomicity and exposes a tear-write vulnerability. Fix this by
+explicitly splitting the write into two 32-bit `WRITE_ONCE()`
+operations.
+
+Finally, add the missing `READ_ONCE()` when reading `user_seed` in
+`proc_fib_multipath_hash_seed()` to ensure proper pairing and
+concurrency safety.
+
+Fixes: 4ee2a8cace3f ("net: ipv4: Add a sysctl to set multipath hash seed")
+Signed-off-by: Yung Chih Su <yuuchihsu@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20260302060247.7066-1-yuuchihsu@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/ip_fib.h | 2 +-
+ net/ipv4/sysctl_net_ipv4.c | 5 +++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index b4495c38e0a01..318593743b6e1 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -559,7 +559,7 @@ static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ siphash_aligned_key_t hash_key;
+ u32 mp_seed;
+
+- mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
++ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.mp_seed);
+ fib_multipath_hash_construct_key(&hash_key, mp_seed);
+
+ return flow_hash_from_keys_seed(keys, &hash_key);
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index 24dbc603cc44d..0f1dd75dbf37b 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -484,7 +484,8 @@ static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
+ proc_fib_multipath_hash_rand_seed),
+ };
+
+- WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new);
++ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.user_seed, new.user_seed);
++ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.mp_seed, new.mp_seed);
+ }
+
+ static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write,
+@@ -498,7 +499,7 @@ static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write
+ int ret;
+
+ mphs = &net->ipv4.sysctl_fib_multipath_hash_seed;
+- user_seed = mphs->user_seed;
++ user_seed = READ_ONCE(mphs->user_seed);
+
+ tmp = *table;
+ tmp.data = &user_seed;
+--
+2.51.0
+
--- /dev/null
+From 5aabdad831de44ccae4818fc23c74444847e9ea7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 19:38:13 +0800
+Subject: net: ipv6: fix panic when IPv4 route references loopback IPv6 nexthop
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 21ec92774d1536f71bdc90b0e3d052eff99cf093 ]
+
+When a standalone IPv6 nexthop object is created with a loopback device
+(e.g., "ip -6 nexthop add id 100 dev lo"), fib6_nh_init() misclassifies
+it as a reject route. This is because nexthop objects have no destination
+prefix (fc_dst=::), causing fib6_is_reject() to match any loopback
+nexthop. The reject path skips fib_nh_common_init(), leaving
+nhc_pcpu_rth_output unallocated. If an IPv4 route later references this
+nexthop, __mkroute_output() dereferences NULL nhc_pcpu_rth_output and
+panics.
+
+Simplify the check in fib6_nh_init() to only match explicit reject
+routes (RTF_REJECT) instead of using fib6_is_reject(). The loopback
+promotion heuristic in fib6_is_reject() is handled separately by
+ip6_route_info_create_nh(). After this change, the three cases behave
+as follows:
+
+1. Explicit reject route ("ip -6 route add unreachable 2001:db8::/64"):
+ RTF_REJECT is set, enters reject path, skips fib_nh_common_init().
+ No behavior change.
+
+2. Implicit loopback reject route ("ip -6 route add 2001:db8::/32 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. ip6_route_info_create_nh() still promotes it to reject
+ afterward. nhc_pcpu_rth_output is allocated but unused, which is
+ harmless.
+
+3. Standalone nexthop object ("ip -6 nexthop add id 100 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. nhc_pcpu_rth_output is properly allocated, fixing the crash
+ when IPv4 routes reference this nexthop.
+
+Suggested-by: Ido Schimmel <idosch@nvidia.com>
+Fixes: 493ced1ac47c ("ipv4: Allow routes to use nexthop objects")
+Reported-by: syzbot+334190e097a98a1b81bb@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698f8482.a70a0220.2c38d7.00ca.GAE@google.com/T/
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260304113817.294966-2-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index e7d90a28948a4..e01331d965313 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3584,7 +3584,6 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
+ struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
+- int addr_type;
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
+@@ -3626,11 +3625,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+
+ fib6_nh->fib_nh_weight = 1;
+
+- /* We cannot add true routes via loopback here,
+- * they would result in kernel looping; promote them to reject routes
++ /* Reset the nexthop device to the loopback device in case of reject
++ * routes.
+ */
+- addr_type = ipv6_addr_type(&cfg->fc_dst);
+- if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
++ if (cfg->fc_flags & RTF_REJECT) {
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != net->loopback_dev) {
+ if (dev) {
+--
+2.51.0
+
--- /dev/null
+From 568fa7fc67b5243910cd512602f49e406b840cb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 18:32:37 +0200
+Subject: net: nfc: nci: Fix zero-length proprietary notifications
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit f7d92f11bd33a6eb49c7c812255ef4ab13681f0f ]
+
+NCI NFC controllers may have proprietary OIDs with zero-length payload.
+One example is: drivers/nfc/nxp-nci/core.c, NXP_NCI_RF_TXLDO_ERROR_NTF.
+
+Allow a zero length payload in proprietary notifications *only*.
+
+Before:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+-- >8 --
+
+After:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x23, plen=0
+kernel: nci: nci_ntf_packet: unknown ntf opcode 0x123
+kernel: nfc nfc0: NFC: RF transmitter couldn't start. Bad power and/or configuration?
+-- >8 --
+
+After fixing the hardware:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 27
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x5, plen=24
+kernel: nci: nci_rf_intf_activated_ntf_packet: rf_discovery_id 1
+-- >8 --
+
+Fixes: d24b03535e5e ("nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260302163238.140576-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index e419e020a70a3..46681bdaeabff 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1482,10 +1482,20 @@ static bool nci_valid_size(struct sk_buff *skb)
+ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
+
+ if (skb->len < hdr_size ||
+- !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
++
++ if (!nci_plen(skb->data)) {
++ /* Allow zero length in proprietary notifications (0x20 - 0x3F). */
++ if (nci_opcode_oid(nci_opcode(skb->data)) >= 0x20 &&
++ nci_mt(skb->data) == NCI_MT_NTF_PKT)
++ return true;
++
++ /* Disallow zero length otherwise. */
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From acb9270e841410b85fa7f3ab03dd818fbdbe78ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 17:26:31 +0100
+Subject: net: Provide a PREEMPT_RT specific check for netdev_queue::_xmit_lock
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit b824c3e16c1904bf80df489e293d1e3cbf98896d ]
+
+After acquiring netdev_queue::_xmit_lock the number of the CPU owning
+the lock is recorded in netdev_queue::xmit_lock_owner. This works as
+long as the BH context is not preemptible.
+
+On PREEMPT_RT the softirq context is preemptible and without the
+softirq-lock it is possible to have multiple user in __dev_queue_xmit()
+submitting a skb on the same CPU. This is fine in general but this means
+also that the current CPU is recorded as netdev_queue::xmit_lock_owner.
+This in turn leads to the recursion alert and the skb is dropped.
+
+Instead checking the for CPU number, that owns the lock, PREEMPT_RT can
+check if the lockowner matches the current task.
+
+Add netif_tx_owned() which returns true if the current context owns the
+lock by comparing the provided CPU number with the recorded number. This
+resembles the current check by negating the condition (the current check
+returns true if the lock is not owned).
+On PREEMPT_RT use rt_mutex_owner() to return the lock owner and compare
+the current task against it.
+Use the new helper in __dev_queue_xmit() and netif_local_xmit_active()
+which provides a similar check.
+Update comments regarding pairing READ_ONCE().
+
+Reported-by: Bert Karwatzki <spasswolf@web.de>
+Closes: https://lore.kernel.org/all/20260216134333.412332-1-spasswolf@web.de
+Fixes: 3253cb49cbad4 ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Bert Karwatzki <spasswolf@web.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://patch.msgid.link/20260302162631.uGUyIqDT@linutronix.de
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 27 ++++++++++++++++++++++-----
+ net/core/dev.c | 5 +----
+ net/core/netpoll.c | 2 +-
+ 3 files changed, 24 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index 3d9f21274dc32..8bb7b0e2c5438 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -4684,7 +4684,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
+ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
+ {
+ spin_lock(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
+ }
+
+@@ -4702,7 +4702,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
+ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+ {
+ spin_lock_bh(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
+
+@@ -4711,7 +4711,7 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+ bool ok = spin_trylock(&txq->_xmit_lock);
+
+ if (likely(ok)) {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
+ return ok;
+@@ -4719,14 +4719,14 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+
+ static inline void __netif_tx_unlock(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
+ spin_unlock(&txq->_xmit_lock);
+ }
+
+ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
+ spin_unlock_bh(&txq->_xmit_lock);
+ }
+@@ -4819,6 +4819,23 @@ static inline void netif_tx_disable(struct net_device *dev)
+ local_bh_enable();
+ }
+
++#ifndef CONFIG_PREEMPT_RT
++static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
++{
++ /* Other cpus might concurrently change txq->xmit_lock_owner
++ * to -1 or to their cpu id, but not to our id.
++ */
++ return READ_ONCE(txq->xmit_lock_owner) == cpu;
++}
++
++#else
++static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
++{
++ return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
++}
++
++#endif
++
+ static inline void netif_addr_lock(struct net_device *dev)
+ {
+ unsigned char nest_level = 0;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index f937b8ba08222..c8e49eef45198 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4758,10 +4758,7 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
+ if (dev->flags & IFF_UP) {
+ int cpu = smp_processor_id(); /* ok because BHs are off */
+
+- /* Other cpus might concurrently change txq->xmit_lock_owner
+- * to -1 or to their cpu id, but not to our id.
+- */
+- if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
++ if (!netif_tx_owned(txq, cpu)) {
+ bool is_list = false;
+
+ if (dev_xmit_recursion())
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 09f72f10813cc..5af14f14a3623 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -132,7 +132,7 @@ static int netif_local_xmit_active(struct net_device *dev)
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+- if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
++ if (netif_tx_owned(txq, smp_processor_id()))
+ return 1;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From b911f8471ebdcac01985f285880ca6b29f69e1b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 13:23:36 -0700
+Subject: net/rds: Fix circular locking dependency in rds_tcp_tune
+
+From: Allison Henderson <achender@kernel.org>
+
+[ Upstream commit 6a877ececd6daa002a9a0002cd0fbca6592a9244 ]
+
+syzbot reported a circular locking dependency in rds_tcp_tune() where
+sk_net_refcnt_upgrade() is called while holding the socket lock:
+
+======================================================
+WARNING: possible circular locking dependency detected
+======================================================
+kworker/u10:8/15040 is trying to acquire lock:
+ffffffff8e9aaf80 (fs_reclaim){+.+.}-{0:0},
+at: __kmalloc_cache_noprof+0x4b/0x6f0
+
+but task is already holding lock:
+ffff88805a3c1ce0 (k-sk_lock-AF_INET6){+.+.}-{0:0},
+at: rds_tcp_tune+0xd7/0x930
+
+The issue occurs because sk_net_refcnt_upgrade() performs memory
+allocation (via get_net_track() -> ref_tracker_alloc()) while the
+socket lock is held, creating a circular dependency with fs_reclaim.
+
+Fix this by moving sk_net_refcnt_upgrade() outside the socket lock
+critical section. This is safe because the fields modified by the
+sk_net_refcnt_upgrade() call (sk_net_refcnt, ns_tracker) are not
+accessed by any concurrent code path at this point.
+
+v2:
+ - Corrected fixes tag
+ - check patch line wrap nits
+ - ai commentary nits
+
+Reported-by: syzbot+2e2cf5331207053b8106@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2e2cf5331207053b8106
+Fixes: 3a58f13a881e ("net: rds: acquire refcount on TCP sockets")
+Signed-off-by: Allison Henderson <achender@kernel.org>
+Link: https://patch.msgid.link/20260227202336.167757-1-achender@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rds/tcp.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 3cc2f303bf786..b66dfcc3efaa0 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -495,18 +495,24 @@ bool rds_tcp_tune(struct socket *sock)
+ struct rds_tcp_net *rtn;
+
+ tcp_sock_set_nodelay(sock->sk);
+- lock_sock(sk);
+ /* TCP timer functions might access net namespace even after
+ * a process which created this net namespace terminated.
+ */
+ if (!sk->sk_net_refcnt) {
+- if (!maybe_get_net(net)) {
+- release_sock(sk);
++ if (!maybe_get_net(net))
+ return false;
+- }
++ /*
++ * sk_net_refcnt_upgrade() must be called before lock_sock()
++ * because it does a GFP_KERNEL allocation, which can trigger
++ * fs_reclaim and create a circular lock dependency with the
++ * socket lock. The fields it modifies (sk_net_refcnt,
++ * ns_tracker) are not accessed by any concurrent code path
++ * at this point.
++ */
+ sk_net_refcnt_upgrade(sk);
+ put_net(net);
+ }
++ lock_sock(sk);
+ rtn = net_generic(net, rds_tcp_netid);
+ if (rtn->sndbuf_size > 0) {
+ sk->sk_sndbuf = rtn->sndbuf_size;
+--
+2.51.0
+
--- /dev/null
+From 7c6e2c32f566cd20357200d2a73fd9360ecd1488 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 09:06:02 -0500
+Subject: net/sched: act_ife: Fix metalist update behavior
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit e2cedd400c3ec0302ffca2490e8751772906ac23 ]
+
+Whenever an ife action replace changes the metalist, instead of
+replacing the old data on the metalist, the current ife code is appending
+the new metadata. Aside from being innapropriate behavior, this may lead
+to an unbounded addition of metadata to the metalist which might cause an
+out of bounds error when running the encode op:
+
+[ 138.423369][ C1] ==================================================================
+[ 138.424317][ C1] BUG: KASAN: slab-out-of-bounds in ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.424906][ C1] Write of size 4 at addr ffff8880077f4ffe by task ife_out_out_bou/255
+[ 138.425778][ C1] CPU: 1 UID: 0 PID: 255 Comm: ife_out_out_bou Not tainted 7.0.0-rc1-00169-gfbdfa8da05b6 #624 PREEMPT(full)
+[ 138.425795][ C1] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+[ 138.425800][ C1] Call Trace:
+[ 138.425804][ C1] <IRQ>
+[ 138.425808][ C1] dump_stack_lvl (lib/dump_stack.c:122)
+[ 138.425828][ C1] print_report (mm/kasan/report.c:379 mm/kasan/report.c:482)
+[ 138.425839][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425844][ C1] ? __virt_addr_valid (./arch/x86/include/asm/preempt.h:95 (discriminator 1) ./include/linux/rcupdate.h:975 (discriminator 1) ./include/linux/mmzone.h:2207 (discriminator 1) arch/x86/mm/physaddr.c:54 (discriminator 1))
+[ 138.425853][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425859][ C1] kasan_report (mm/kasan/report.c:221 mm/kasan/report.c:597)
+[ 138.425868][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425878][ C1] kasan_check_range (mm/kasan/generic.c:186 (discriminator 1) mm/kasan/generic.c:200 (discriminator 1))
+[ 138.425884][ C1] __asan_memset (mm/kasan/shadow.c:84 (discriminator 2))
+[ 138.425889][ C1] ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425893][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:171)
+[ 138.425898][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425903][ C1] ife_encode_meta_u16 (net/sched/act_ife.c:57)
+[ 138.425910][ C1] ? __pfx_do_raw_spin_lock (kernel/locking/spinlock_debug.c:114)
+[ 138.425916][ C1] ? __asan_memcpy (mm/kasan/shadow.c:105 (discriminator 3))
+[ 138.425921][ C1] ? __pfx_ife_encode_meta_u16 (net/sched/act_ife.c:45)
+[ 138.425927][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425931][ C1] tcf_ife_act (net/sched/act_ife.c:847 net/sched/act_ife.c:879)
+
+To solve this issue, fix the replace behavior by adding the metalist to
+the ife rcu data structure.
+
+Fixes: aa9fd9a325d51 ("sched: act: ife: update parameters via rcu handling")
+Reported-by: Ruitong Liu <cnitlrt@gmail.com>
+Tested-by: Ruitong Liu <cnitlrt@gmail.com>
+Co-developed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20260304140603.76500-1-jhs@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tc_act/tc_ife.h | 4 +-
+ net/sched/act_ife.c | 93 ++++++++++++++++++-------------------
+ 2 files changed, 45 insertions(+), 52 deletions(-)
+
+diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
+index c7f24a2da1cad..24d4d5a62b3c2 100644
+--- a/include/net/tc_act/tc_ife.h
++++ b/include/net/tc_act/tc_ife.h
+@@ -13,15 +13,13 @@ struct tcf_ife_params {
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+-
++ struct list_head metalist;
+ struct rcu_head rcu;
+ };
+
+ struct tcf_ife_info {
+ struct tc_action common;
+ struct tcf_ife_params __rcu *params;
+- /* list of metaids allowed */
+- struct list_head metalist;
+ };
+ #define to_ife(a) ((struct tcf_ife_info *)a)
+
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 8e8f6af731d51..4ad01d4e820db 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -293,8 +293,8 @@ static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
+ /* called when adding new meta information
+ */
+ static int __add_metainfo(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool atomic, bool exists)
++ struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len, bool atomic)
+ {
+ struct tcf_meta_info *mi = NULL;
+ int ret = 0;
+@@ -313,45 +313,40 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
+ }
+ }
+
+- if (exists)
+- spin_lock_bh(&ife->tcf_lock);
+- list_add_tail(&mi->metalist, &ife->metalist);
+- if (exists)
+- spin_unlock_bh(&ife->tcf_lock);
++ list_add_tail(&mi->metalist, &p->metalist);
+
+ return ret;
+ }
+
+ static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid,
+- bool exists)
++ struct tcf_ife_params *p, u32 metaid)
+ {
+ int ret;
+
+ if (!try_module_get(ops->owner))
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
++ ret = __add_metainfo(ops, p, metaid, NULL, 0, true);
+ if (ret)
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool exists)
++static int add_metainfo(struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len)
+ {
+ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret;
+
+ if (!ops)
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
++ ret = __add_metainfo(ops, p, metaid, metaval, len, false);
+ if (ret)
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
++static int use_all_metadata(struct tcf_ife_params *p)
+ {
+ struct tcf_meta_ops *o;
+ int rc = 0;
+@@ -359,7 +354,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+
+ read_lock(&ife_mod_lock);
+ list_for_each_entry(o, &ifeoplist, list) {
+- rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
++ rc = add_metainfo_and_get_ops(o, p, o->metaid);
+ if (rc == 0)
+ installed += 1;
+ }
+@@ -371,7 +366,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+ return -EINVAL;
+ }
+
+-static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int dump_metalist(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+ struct tcf_meta_info *e;
+ struct nlattr *nest;
+@@ -379,14 +374,14 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ int total_encoded = 0;
+
+ /*can only happen on decode */
+- if (list_empty(&ife->metalist))
++ if (list_empty(&p->metalist))
+ return 0;
+
+ nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
+ if (!nest)
+ goto out_nlmsg_trim;
+
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry(e, &p->metalist, metalist) {
+ if (!e->ops->get(skb, e))
+ total_encoded += 1;
+ }
+@@ -403,13 +398,11 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ return -1;
+ }
+
+-/* under ife->tcf_lock */
+-static void _tcf_ife_cleanup(struct tc_action *a)
++static void __tcf_ife_cleanup(struct tcf_ife_params *p)
+ {
+- struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_meta_info *e, *n;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_safe(e, n, &p->metalist, metalist) {
+ list_del(&e->metalist);
+ if (e->metaval) {
+ if (e->ops->release)
+@@ -422,18 +415,23 @@ static void _tcf_ife_cleanup(struct tc_action *a)
+ }
+ }
+
++static void tcf_ife_cleanup_params(struct rcu_head *head)
++{
++ struct tcf_ife_params *p = container_of(head, struct tcf_ife_params,
++ rcu);
++
++ __tcf_ife_cleanup(p);
++ kfree(p);
++}
++
+ static void tcf_ife_cleanup(struct tc_action *a)
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_ife_params *p;
+
+- spin_lock_bh(&ife->tcf_lock);
+- _tcf_ife_cleanup(a);
+- spin_unlock_bh(&ife->tcf_lock);
+-
+ p = rcu_dereference_protected(ife->params, 1);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+ }
+
+ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+@@ -455,8 +453,7 @@ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+ return 0;
+ }
+
+-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+- bool exists, bool rtnl_held)
++static int populate_metalist(struct tcf_ife_params *p, struct nlattr **tb)
+ {
+ int len = 0;
+ int rc = 0;
+@@ -468,7 +465,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ val = nla_data(tb[i]);
+ len = nla_len(tb[i]);
+
+- rc = add_metainfo(ife, i, val, len, exists);
++ rc = add_metainfo(p, i, val, len);
+ if (rc)
+ return rc;
+ }
+@@ -523,6 +520,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
++ INIT_LIST_HEAD(&p->metalist);
+
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+@@ -567,8 +565,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ ife = to_ife(*a);
+- if (ret == ACT_P_CREATED)
+- INIT_LIST_HEAD(&ife->metalist);
+
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+@@ -600,8 +596,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ if (tb[TCA_IFE_METALST]) {
+- err = populate_metalist(ife, tb2, exists,
+- !(flags & TCA_ACT_FLAGS_NO_RTNL));
++ err = populate_metalist(p, tb2);
+ if (err)
+ goto metadata_parse_err;
+ } else {
+@@ -610,7 +605,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ * as we can. You better have at least one else we are
+ * going to bail out
+ */
+- err = use_all_metadata(ife, exists);
++ err = use_all_metadata(p);
+ if (err)
+ goto metadata_parse_err;
+ }
+@@ -626,13 +621,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+
+ return ret;
+ metadata_parse_err:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ release_idr:
++ __tcf_ife_cleanup(p);
+ kfree(p);
+ tcf_idr_release(*a, bind);
+ return err;
+@@ -679,7 +675,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
+ goto nla_put_failure;
+
+- if (dump_metalist(skb, ife)) {
++ if (dump_metalist(skb, p)) {
+ /*ignore failure to dump metalist */
+ pr_info("Failed to dump metalist\n");
+ }
+@@ -693,13 +689,13 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ return -1;
+ }
+
+-static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
++static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_params *p,
+ u16 metaid, u16 mlen, void *mdata)
+ {
+ struct tcf_meta_info *e;
+
+ /* XXX: use hash to speed up */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (metaid == e->metaid) {
+ if (e->ops) {
+ /* We check for decode presence already */
+@@ -716,10 +712,13 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ int action = ife->tcf_action;
++ struct tcf_ife_params *p;
+ u8 *ifehdr_end;
+ u8 *tlv_data;
+ u16 metalen;
+
++ p = rcu_dereference_bh(ife->params);
++
+ bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+ tcf_lastuse_update(&ife->tcf_tm);
+
+@@ -745,7 +744,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ return TC_ACT_SHOT;
+ }
+
+- if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
++ if (find_decode_metaid(skb, p, mtype, dlen, curr_data)) {
+ /* abuse overlimits to count when we receive metadata
+ * but dont have an ops for it
+ */
+@@ -769,12 +768,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ /*XXX: check if we can do this at install time instead of current
+ * send data path
+ **/
+-static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+- struct tcf_meta_info *e, *n;
++ struct tcf_meta_info *e;
+ int tot_run_sz = 0, run_sz = 0;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->check_presence) {
+ run_sz = e->ops->check_presence(skb, e);
+ tot_run_sz += run_sz;
+@@ -795,7 +794,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
+ where ORIGDATA = original ethernet header ...
+ */
+- u16 metalen = ife_get_sz(skb, ife);
++ u16 metalen = ife_get_sz(skb, p);
+ int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
+ unsigned int skboff = 0;
+ int new_len = skb->len + hdrm;
+@@ -833,25 +832,21 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ if (!ife_meta)
+ goto drop;
+
+- spin_lock(&ife->tcf_lock);
+-
+ /* XXX: we dont have a clever way of telling encode to
+ * not repeat some of the computations that are done by
+ * ops->presence_check...
+ */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->encode) {
+ err = e->ops->encode(skb, (void *)(ife_meta + skboff),
+ e);
+ }
+ if (err < 0) {
+ /* too corrupt to keep around if overwritten */
+- spin_unlock(&ife->tcf_lock);
+ goto drop;
+ }
+ skboff += err;
+ }
+- spin_unlock(&ife->tcf_lock);
+ oethh = (struct ethhdr *)skb->data;
+
+ if (!is_zero_ether_addr(p->eth_src))
+--
+2.51.0
+
--- /dev/null
+From 99642fa347e89f10ea0a68919a440e119b3f7ce2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Feb 2026 23:53:07 +0900
+Subject: net: sched: avoid qdisc_reset_all_tx_gt() vs dequeue race for
+ lockless qdiscs
+
+From: Koichiro Den <den@valinux.co.jp>
+
+[ Upstream commit 7f083faf59d14c04e01ec05a7507f036c965acf8 ]
+
+When shrinking the number of real tx queues,
+netif_set_real_num_tx_queues() calls qdisc_reset_all_tx_gt() to flush
+qdiscs for queues which will no longer be used.
+
+qdisc_reset_all_tx_gt() currently serializes qdisc_reset() with
+qdisc_lock(). However, for lockless qdiscs, the dequeue path is
+serialized by qdisc_run_begin/end() using qdisc->seqlock instead, so
+qdisc_reset() can run concurrently with __qdisc_run() and free skbs
+while they are still being dequeued, leading to UAF.
+
+This can easily be reproduced on e.g. virtio-net by imposing heavy
+traffic while frequently changing the number of queue pairs:
+
+ iperf3 -ub0 -c $peer -t 0 &
+ while :; do
+ ethtool -L eth0 combined 1
+ ethtool -L eth0 combined 2
+ done
+
+With KASAN enabled, this leads to reports like:
+
+ BUG: KASAN: slab-use-after-free in __qdisc_run+0x133f/0x1760
+ ...
+ Call Trace:
+ <TASK>
+ ...
+ __qdisc_run+0x133f/0x1760
+ __dev_queue_xmit+0x248f/0x3550
+ ip_finish_output2+0xa42/0x2110
+ ip_output+0x1a7/0x410
+ ip_send_skb+0x2e6/0x480
+ udp_send_skb+0xb0a/0x1590
+ udp_sendmsg+0x13c9/0x1fc0
+ ...
+ </TASK>
+
+ Allocated by task 1270 on cpu 5 at 44.558414s:
+ ...
+ alloc_skb_with_frags+0x84/0x7c0
+ sock_alloc_send_pskb+0x69a/0x830
+ __ip_append_data+0x1b86/0x48c0
+ ip_make_skb+0x1e8/0x2b0
+ udp_sendmsg+0x13a6/0x1fc0
+ ...
+
+ Freed by task 1306 on cpu 3 at 44.558445s:
+ ...
+ kmem_cache_free+0x117/0x5e0
+ pfifo_fast_reset+0x14d/0x580
+ qdisc_reset+0x9e/0x5f0
+ netif_set_real_num_tx_queues+0x303/0x840
+ virtnet_set_channels+0x1bf/0x260 [virtio_net]
+ ethnl_set_channels+0x684/0xae0
+ ethnl_default_set_doit+0x31a/0x890
+ ...
+
+Serialize qdisc_reset_all_tx_gt() against the lockless dequeue path by
+taking qdisc->seqlock for TCQ_F_NOLOCK qdiscs, matching the
+serialization model already used by dev_reset_queue().
+
+Additionally clear QDISC_STATE_NON_EMPTY after reset so the qdisc state
+reflects an empty queue, avoiding needless re-scheduling.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Signed-off-by: Koichiro Den <den@valinux.co.jp>
+Link: https://patch.msgid.link/20260228145307.3955532-1-den@valinux.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 738cd5b13c62f..1518454c906e1 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -758,13 +758,23 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+ {
+ struct Qdisc *qdisc;
++ bool nolock;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ if (qdisc) {
++ nolock = qdisc->flags & TCQ_F_NOLOCK;
++
++ if (nolock)
++ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
++ if (nolock) {
++ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
++ spin_unlock_bh(&qdisc->seqlock);
++ }
+ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From a822db7cb24b614f96d037d9597f6336890815c1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:28 +0000
+Subject: net: stmmac: Defer VLAN HW configuration when interface is down
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 2cd70e3968f505996d5fefdf7ca684f0f4575734 ]
+
+VLAN register accesses on the MAC side require the PHY RX clock to be
+active. When the network interface is down, the PHY is suspended and
+the RX clock is unavailable, causing VLAN operations to fail with
+timeouts.
+
+The VLAN core automatically removes VID 0 after the interface goes down
+and re-adds it when it comes back up, so these timeouts happen during
+normal interface down/up:
+
+ # ip link set end1 down
+ renesas-gbeth 15c40000.ethernet end1: Timeout accessing MAC_VLAN_Tag_Filter
+ renesas-gbeth 15c40000.ethernet end1: failed to kill vid 0081/0
+
+Adding VLANs while the interface is down also fails:
+
+ # ip link add link end1 name end1.10 type vlan id 10
+ renesas-gbeth 15c40000.ethernet end1: Timeout accessing MAC_VLAN_Tag_Filter
+ RTNETLINK answers: Device or resource busy
+
+To fix this, check if the interface is up before accessing VLAN registers.
+The software state is always kept up to date regardless of interface state.
+
+When the interface is brought up, stmmac_vlan_restore() is called
+to write the VLAN state to hardware.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-5-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 3 ++
+ .../net/ethernet/stmicro/stmmac/stmmac_vlan.c | 42 ++++++++++---------
+ 2 files changed, 26 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index d423ee979bd09..30064e4a33938 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6613,6 +6613,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ hash = 0;
+ }
+
++ if (!netif_running(priv->dev))
++ return 0;
++
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+index fcc34867405ed..e24efe3bfedbe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+@@ -76,7 +76,9 @@ static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ }
+
+ hw->vlan_filter[0] = vid;
+- vlan_write_single(dev, vid);
++
++ if (netif_running(dev))
++ vlan_write_single(dev, vid);
+
+ return 0;
+ }
+@@ -97,12 +99,15 @@ static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ return -EPERM;
+ }
+
+- ret = vlan_write_filter(dev, hw, index, val);
++ if (netif_running(dev)) {
++ ret = vlan_write_filter(dev, hw, index, val);
++ if (ret)
++ return ret;
++ }
+
+- if (!ret)
+- hw->vlan_filter[index] = val;
++ hw->vlan_filter[index] = val;
+
+- return ret;
++ return 0;
+ }
+
+ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+@@ -115,7 +120,9 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+- vlan_write_single(dev, 0);
++
++ if (netif_running(dev))
++ vlan_write_single(dev, 0);
+ }
+ return 0;
+ }
+@@ -124,22 +131,23 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) &&
+ ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) {
+- ret = vlan_write_filter(dev, hw, i, 0);
+
+- if (!ret)
+- hw->vlan_filter[i] = 0;
+- else
+- return ret;
++ if (netif_running(dev)) {
++ ret = vlan_write_filter(dev, hw, i, 0);
++ if (ret)
++ return ret;
++ }
++
++ hw->vlan_filter[i] = 0;
+ }
+ }
+
+- return ret;
++ return 0;
+ }
+
+ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+ {
+- u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+@@ -149,12 +157,8 @@ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+- for (i = 0; i < hw->num_vlan; i++) {
+- if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) {
+- val = hw->vlan_filter[i];
+- vlan_write_filter(dev, hw, i, val);
+- }
+- }
++ for (i = 0; i < hw->num_vlan; i++)
++ vlan_write_filter(dev, hw, i, hw->vlan_filter[i]);
+ }
+
+ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+--
+2.51.0
+
--- /dev/null
+From ea948791f95cc6a75adf35aef4261a9ba1a63946 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:25 +0000
+Subject: net: stmmac: Fix error handling in VLAN add and delete paths
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 35dfedce442c4060cfe5b98368bc9643fb995716 ]
+
+stmmac_vlan_rx_add_vid() updates active_vlans and the VLAN hash
+register before writing the HW filter entry. If the filter write
+fails, it leaves a stale VID in active_vlans and the hash register.
+
+stmmac_vlan_rx_kill_vid() has the reverse problem: it clears
+active_vlans before removing the HW filter. On failure, the VID is
+gone from active_vlans but still present in the HW filter table.
+
+To fix this, reorder the operations to update the hash table first,
+then attempt the HW filter operation. If the HW filter fails, roll
+back both the active_vlans bitmap and the hash table by calling
+stmmac_vlan_update() again.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-2-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 46299b7925b44..769342e4bafa1 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6638,9 +6638,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ clear_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto err_pm_put;
++ }
+ }
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6664,15 +6668,21 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
++ ret = stmmac_vlan_update(priv, is_double);
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ goto del_vlan_error;
++ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto del_vlan_error;
++ }
+ }
+
+- ret = stmmac_vlan_update(priv, is_double);
+-
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+--
+2.51.0
+
--- /dev/null
+From 8044de99b9b46e7ba6dfaa826cda225a6d08bc95 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:27 +0000
+Subject: net: stmmac: Fix VLAN HW state restore
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit bd7ad51253a76fb35886d01cfe9a37f0e4ed6709 ]
+
+When the network interface is opened or resumed, a DMA reset is performed,
+which resets all hardware state, including VLAN state. Currently, only
+the resume path is restoring the VLAN state via
+stmmac_restore_hw_vlan_rx_fltr(), but that is incomplete: the VLAN hash
+table and the VLAN_TAG control bits are not restored.
+
+Therefore, add stmmac_vlan_restore(), which restores the full VLAN
+state by updating both the HW filter entries and the hash table, and
+call it from both the open and resume paths.
+
+The VLAN restore is moved outside of phylink_rx_clk_stop_block/unblock
+in the resume path because receive clock stop is already disabled when
+stmmac supports VLAN.
+
+Also, remove the hash readback code in vlan_restore_hw_rx_fltr() that
+attempts to restore VTHM by reading VLAN_HASH_TABLE, as it always reads
+zero after DMA reset, making it dead code.
+
+Fixes: 3cd1cfcba26e ("net: stmmac: Implement VLAN Hash Filtering in XGMAC")
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-4-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2cd70e3968f5 ("net: stmmac: Defer VLAN HW configuration when interface is down")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 24 +++++++++++++++++--
+ .../net/ethernet/stmicro/stmmac/stmmac_vlan.c | 10 --------
+ 2 files changed, 22 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 6bebc57e6ae7c..d423ee979bd09 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -139,6 +139,7 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
+ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
++static int stmmac_vlan_restore(struct stmmac_priv *priv);
+
+ #ifdef CONFIG_DEBUG_FS
+ static const struct net_device_ops stmmac_netdev_ops;
+@@ -3967,6 +3968,8 @@ static int __stmmac_open(struct net_device *dev,
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+
++ stmmac_vlan_restore(priv);
++
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+@@ -6697,6 +6700,23 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ return ret;
+ }
+
++static int stmmac_vlan_restore(struct stmmac_priv *priv)
++{
++ int ret;
++
++ if (!(priv->dev->features & NETIF_F_VLAN_FEATURES))
++ return 0;
++
++ if (priv->hw->num_vlan)
++ stmmac_restore_hw_vlan_rx_fltr(priv, priv->dev, priv->hw);
++
++ ret = stmmac_vlan_update(priv, priv->num_double_vlans);
++ if (ret)
++ netdev_err(priv->dev, "Failed to restore VLANs\n");
++
++ return ret;
++}
++
+ static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+@@ -7931,10 +7951,10 @@ int stmmac_resume(struct device *dev)
+ stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
+ stmmac_set_rx_mode(ndev);
+-
+- stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
+
++ stmmac_vlan_restore(priv);
++
+ stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+index de1a70e1c86ef..fcc34867405ed 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+@@ -139,9 +139,6 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+ {
+- void __iomem *ioaddr = hw->pcsr;
+- u32 value;
+- u32 hash;
+ u32 val;
+ int i;
+
+@@ -158,13 +155,6 @@ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ vlan_write_filter(dev, hw, i, val);
+ }
+ }
+-
+- hash = readl(ioaddr + VLAN_HASH_TABLE);
+- if (hash & VLAN_VLHT) {
+- value = readl(ioaddr + VLAN_TAG);
+- value |= VLAN_VTHM;
+- writel(value, ioaddr + VLAN_TAG);
+- }
+ }
+
+ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+--
+2.51.0
+
--- /dev/null
+From 87050fc73b01d2c8ffb4ee860ad5385b3ef6f00a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:26 +0000
+Subject: net: stmmac: Improve double VLAN handling
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit e38200e361cbe331806dc454c76c11c7cd95e1b9 ]
+
+The double VLAN bits (EDVLP, ESVL, DOVLTC) are handled inconsistently
+between the two vlan_update_hash() implementations:
+
+- dwxgmac2_update_vlan_hash() explicitly clears the double VLAN bits when
+is_double is false, meaning that adding a 802.1Q VLAN will disable
+double VLAN mode:
+
+ $ ip link add link eth0 name eth0.200 type vlan id 200 protocol 802.1ad
+ $ ip link add link eth0 name eth0.100 type vlan id 100
+ # Double VLAN bits no longer set
+
+- vlan_update_hash() sets these bits and only clears them when the last
+VLAN has been removed, so double VLAN mode remains enabled even after all
+802.1AD VLANs are removed.
+
+Address both issues by tracking the number of active 802.1AD VLANs in
+priv->num_double_vlans. Pass this count to stmmac_vlan_update() so both
+implementations correctly set the double VLAN bits when any 802.1AD
+VLAN is active, and clear them only when none remain.
+
+Also update vlan_update_hash() to explicitly clear the double VLAN bits
+when is_double is false, matching the dwxgmac2 behavior.
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-3-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2cd70e3968f5 ("net: stmmac: Defer VLAN HW configuration when interface is down")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 +
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 16 ++++++++++++----
+ .../net/ethernet/stmicro/stmmac/stmmac_vlan.c | 8 ++++++++
+ 3 files changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index 7ca5477be390b..81706f175d330 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -318,6 +318,7 @@ struct stmmac_priv {
+ void __iomem *ptpaddr;
+ void __iomem *estaddr;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
++ unsigned int num_double_vlans;
+ int sfty_irq;
+ int sfty_ce_irq;
+ int sfty_ue_irq;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 769342e4bafa1..6bebc57e6ae7c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6619,6 +6619,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+ {
+ struct stmmac_priv *priv = netdev_priv(ndev);
++ unsigned int num_double_vlans;
+ bool is_double = false;
+ int ret;
+
+@@ -6630,7 +6631,8 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ is_double = true;
+
+ set_bit(vid, priv->active_vlans);
+- ret = stmmac_vlan_update(priv, is_double);
++ num_double_vlans = priv->num_double_vlans + is_double;
++ ret = stmmac_vlan_update(priv, num_double_vlans);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ goto err_pm_put;
+@@ -6640,11 +6642,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+- stmmac_vlan_update(priv, is_double);
++ stmmac_vlan_update(priv, priv->num_double_vlans);
+ goto err_pm_put;
+ }
+ }
+
++ priv->num_double_vlans = num_double_vlans;
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6657,6 +6661,7 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+ {
+ struct stmmac_priv *priv = netdev_priv(ndev);
++ unsigned int num_double_vlans;
+ bool is_double = false;
+ int ret;
+
+@@ -6668,7 +6673,8 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+- ret = stmmac_vlan_update(priv, is_double);
++ num_double_vlans = priv->num_double_vlans - is_double;
++ ret = stmmac_vlan_update(priv, num_double_vlans);
+ if (ret) {
+ set_bit(vid, priv->active_vlans);
+ goto del_vlan_error;
+@@ -6678,11 +6684,13 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret) {
+ set_bit(vid, priv->active_vlans);
+- stmmac_vlan_update(priv, is_double);
++ stmmac_vlan_update(priv, priv->num_double_vlans);
+ goto del_vlan_error;
+ }
+ }
+
++ priv->num_double_vlans = num_double_vlans;
++
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+index b18404dd5a8be..de1a70e1c86ef 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+@@ -183,6 +183,10 @@ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
++ } else {
++ value &= ~VLAN_EDVLP;
++ value &= ~VLAN_ESVL;
++ value &= ~VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + VLAN_TAG);
+@@ -193,6 +197,10 @@ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
++ } else {
++ value &= ~VLAN_EDVLP;
++ value &= ~VLAN_ESVL;
++ value &= ~VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+--
+2.51.0
+
--- /dev/null
+From 0cfa561bea55001ba177e2d94e3bc84f1ff65600 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 15:53:56 +0530
+Subject: net: ti: icssg-prueth: Fix ping failure after offload mode setup when
+ link speed is not 1G
+
+From: MD Danish Anwar <danishanwar@ti.com>
+
+[ Upstream commit 147792c395db870756a0dc87ce656c75ae7ab7e8 ]
+
+When both eth interfaces with links up are added to a bridge or hsr
+interface, ping fails if the link speed is not 1Gbps (e.g., 100Mbps).
+
+The issue is seen because when switching to offload (bridge/hsr) mode,
+prueth_emac_restart() restarts the firmware and clears DRAM with
+memset_io(), setting all memory to 0. This includes PORT_LINK_SPEED_OFFSET
+which firmware reads for link speed. The value 0 corresponds to
+FW_LINK_SPEED_1G (0x00), so for 1Gbps links the default value is correct
+and ping works. For 100Mbps links, the firmware needs FW_LINK_SPEED_100M
+(0x01) but gets 0 instead, causing ping to fail. The function
+emac_adjust_link() is called to reconfigure, but it detects no state change
+(emac->link is still 1, speed/duplex match PHY) so new_state remains false
+and icssg_config_set_speed() is never called to correct the firmware speed
+value.
+
+The fix resets emac->link to 0 before calling emac_adjust_link() in
+prueth_emac_common_start(). This forces new_state=true, ensuring
+icssg_config_set_speed() is called to write the correct speed value to
+firmware memory.
+
+Fixes: 06feac15406f ("net: ti: icssg-prueth: Fix emac link speed handling")
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Link: https://patch.msgid.link/20260226102356.2141871-1-danishanwar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/icssg/icssg_prueth.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index e42d0fdefee12..07489564270b2 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -270,6 +270,14 @@ static int prueth_emac_common_start(struct prueth *prueth)
+ if (ret)
+ goto disable_class;
+
++ /* Reset link state to force reconfiguration in
++ * emac_adjust_link(). Without this, if the link was already up
++ * before restart, emac_adjust_link() won't detect any state
++ * change and will skip critical configuration like writing
++ * speed to firmware.
++ */
++ emac->link = 0;
++
+ mutex_lock(&emac->ndev->phydev->lock);
+ emac_adjust_link(emac->ndev);
+ mutex_unlock(&emac->ndev->phydev->lock);
+--
+2.51.0
+
--- /dev/null
+From 82f62a9193a749e740cd50c948ecf0c3476dede7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:57 +0100
+Subject: net: vxlan: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit 168ff39e4758897d2eee4756977d036d52884c7e ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. If an IPv6 packet is injected into the interface,
+route_shortcircuit() is called and a NULL pointer dereference happens on
+neigh_lookup().
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000380
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x20/0x270
+ [...]
+ Call Trace:
+ <TASK>
+ vxlan_xmit+0x638/0x1ef0 [vxlan]
+ dev_hard_start_xmit+0x9e/0x2e0
+ __dev_queue_xmit+0xbee/0x14e0
+ packet_sendmsg+0x116f/0x1930
+ __sys_sendto+0x1f5/0x200
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x12f/0x1590
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by adding an early check on route_shortcircuit() when protocol
+is ETH_P_IPV6. Note that ipv6_mod_enabled() cannot be used here because
+VXLAN can be built-in even when IPv6 is built as a module.
+
+Fixes: e15a00aafa4b ("vxlan: add ipv6 route short circuit support")
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Link: https://patch.msgid.link/20260304120357.9778-2-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index e957aa12a8a44..2a140be86bafc 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2130,6 +2130,11 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct ipv6hdr *pip6;
+
++ /* check if nd_tbl is not initiliazed due to
++ * ipv6.disable=1 set during boot
++ */
++ if (!ipv6_stub->nd_tbl)
++ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+--
+2.51.0
+
--- /dev/null
+From cfbbda72cd8fbd6b1c84645d924963415a27a4e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 01:56:40 +0000
+Subject: net_sched: sch_fq: clear q->band_pkt_count[] in fq_reset()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a4c2b8be2e5329e7fac6e8f64ddcb8958155cfcb ]
+
+When/if a NIC resets, queues are deactivated by dev_deactivate_many(),
+then reactivated when the reset operation completes.
+
+fq_reset() removes all the skbs from various queues.
+
+If we do not clear q->band_pkt_count[], these counters keep growing
+and can eventually reach sch->limit, preventing new packets to be queued.
+
+Many thanks to Praveen for discovering the root cause.
+
+Fixes: 29f834aa326e ("net_sched: sch_fq: add 3 bands and WRR scheduling")
+Diagnosed-by: Praveen Kaligineedi <pkaligineedi@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Neal Cardwell <ncardwell@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20260304015640.961780-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_fq.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index fee922da2f99c..5e41930079948 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -826,6 +826,7 @@ static void fq_reset(struct Qdisc *sch)
+ for (idx = 0; idx < FQ_BANDS; idx++) {
+ q->band_flows[idx].new_flows.first = NULL;
+ q->band_flows[idx].old_flows.first = NULL;
++ q->band_pkt_count[idx] = 0;
+ }
+ q->delayed = RB_ROOT;
+ q->flows = 0;
+--
+2.51.0
+
--- /dev/null
+From 7b2299258d7b2ca839a9fc22d99163442ed53726 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 23:28:15 +0100
+Subject: netfilter: nf_tables: clone set on flush only
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit fb7fb4016300ac622c964069e286dc83166a5d52 ]
+
+Syzbot with fault injection triggered a failing memory allocation with
+GFP_KERNEL which results in a WARN splat:
+
+iter.err
+WARNING: net/netfilter/nf_tables_api.c:845 at nft_map_deactivate+0x34e/0x3c0 net/netfilter/nf_tables_api.c:845, CPU#0: syz.0.17/5992
+Modules linked in:
+CPU: 0 UID: 0 PID: 5992 Comm: syz.0.17 Not tainted syzkaller #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2026
+RIP: 0010:nft_map_deactivate+0x34e/0x3c0 net/netfilter/nf_tables_api.c:845
+Code: 8b 05 86 5a 4e 09 48 3b 84 24 a0 00 00 00 75 62 48 8d 65 d8 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc cc e8 63 6d fa f7 90 <0f> 0b 90 43
++80 7c 35 00 00 0f 85 23 fe ff ff e9 26 fe ff ff 89 d9
+RSP: 0018:ffffc900045af780 EFLAGS: 00010293
+RAX: ffffffff89ca45bd RBX: 00000000fffffff4 RCX: ffff888028111e40
+RDX: 0000000000000000 RSI: 00000000fffffff4 RDI: 0000000000000000
+RBP: ffffc900045af870 R08: 0000000000400dc0 R09: 00000000ffffffff
+R10: dffffc0000000000 R11: fffffbfff1d141db R12: ffffc900045af7e0
+R13: 1ffff920008b5f24 R14: dffffc0000000000 R15: ffffc900045af920
+FS: 000055557a6a5500(0000) GS:ffff888125496000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fb5ea271fc0 CR3: 000000003269e000 CR4: 00000000003526f0
+Call Trace:
+ <TASK>
+ __nft_release_table+0xceb/0x11f0 net/netfilter/nf_tables_api.c:12115
+ nft_rcv_nl_event+0xc25/0xdb0 net/netfilter/nf_tables_api.c:12187
+ notifier_call_chain+0x19d/0x3a0 kernel/notifier.c:85
+ blocking_notifier_call_chain+0x6a/0x90 kernel/notifier.c:380
+ netlink_release+0x123b/0x1ad0 net/netlink/af_netlink.c:761
+ __sock_release net/socket.c:662 [inline]
+ sock_close+0xc3/0x240 net/socket.c:1455
+
+Restrict set clone to the flush set command in the preparation phase.
+Add NFT_ITER_UPDATE_CLONE and use it for this purpose, update the rbtree
+and pipapo backends to only clone the set when this iteration type is
+used.
+
+As for the existing NFT_ITER_UPDATE type, update the pipapo backend to
+use the existing set clone if available, otherwise use the existing set
+representation. After this update, there is no need to clone a set that
+is being deleted, this includes bound anonymous set.
+
+An alternative approach to NFT_ITER_UPDATE_CLONE is to add a .clone
+interface and call it from the flush set path.
+
+Reported-by: syzbot+4924a0edc148e8b4b342@syzkaller.appspotmail.com
+Fixes: 3f1d886cc7c3 ("netfilter: nft_set_pipapo: move cloning of match info to insert/removal path")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 2 ++
+ net/netfilter/nf_tables_api.c | 10 +++++++++-
+ net/netfilter/nft_set_hash.c | 1 +
+ net/netfilter/nft_set_pipapo.c | 11 +++++++++--
+ net/netfilter/nft_set_rbtree.c | 8 +++++---
+ 5 files changed, 26 insertions(+), 6 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index f1b67b40dd4de..077d3121cc9f1 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -317,11 +317,13 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
+ * @NFT_ITER_UNSPEC: unspecified, to catch errors
+ * @NFT_ITER_READ: read-only iteration over set elements
+ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
++ * @NFT_ITER_UPDATE_CLONE: clone set before iteration under mutex to update element
+ */
+ enum nft_iter_type {
+ NFT_ITER_UNSPEC,
+ NFT_ITER_READ,
+ NFT_ITER_UPDATE,
++ NFT_ITER_UPDATE_CLONE,
+ };
+
+ struct nft_set;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index b5e1b26a5302b..7bb5719c214b0 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -832,6 +832,11 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
+ }
+ }
+
++/* Use NFT_ITER_UPDATE iterator even if this may be called from the preparation
++ * phase, the set clone might already exist from a previous command, or it might
++ * be a set that is going away and does not require a clone. The netns and
++ * netlink release paths also need to work on the live set.
++ */
+ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ struct nft_set_iter iter = {
+@@ -8010,9 +8015,12 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
+
+ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
+ {
++ /* The set backend might need to clone the set, do it now from the
++ * preparation phase, use NFT_ITER_UPDATE_CLONE iterator type.
++ */
+ struct nft_set_iter iter = {
+ .genmask = genmask,
+- .type = NFT_ITER_UPDATE,
++ .type = NFT_ITER_UPDATE_CLONE,
+ .fn = nft_setelem_flush,
+ };
+
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 739b992bde591..b0e571c8e3f38 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -374,6 +374,7 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ {
+ switch (iter->type) {
+ case NFT_ITER_UPDATE:
++ case NFT_ITER_UPDATE_CLONE:
+ /* only relevant for netlink dumps which use READ type */
+ WARN_ON_ONCE(iter->skip != 0);
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 18e1903b1d3d0..cd0d2d4ae36bf 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -2145,13 +2145,20 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nft_pipapo_match *m;
+
+ switch (iter->type) {
+- case NFT_ITER_UPDATE:
++ case NFT_ITER_UPDATE_CLONE:
+ m = pipapo_maybe_clone(set);
+ if (!m) {
+ iter->err = -ENOMEM;
+ return;
+ }
+-
++ nft_pipapo_do_walk(ctx, set, m, iter);
++ break;
++ case NFT_ITER_UPDATE:
++ if (priv->clone)
++ m = priv->clone;
++ else
++ m = rcu_dereference_protected(priv->match,
++ nft_pipapo_transaction_mutex_held(set));
+ nft_pipapo_do_walk(ctx, set, m, iter);
+ break;
+ case NFT_ITER_READ:
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index a4fb5b517d9de..5d91b7d08d33a 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -810,13 +810,15 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
+ struct nft_rbtree *priv = nft_set_priv(set);
+
+ switch (iter->type) {
+- case NFT_ITER_UPDATE:
+- lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
+-
++ case NFT_ITER_UPDATE_CLONE:
+ if (nft_array_may_resize(set) < 0) {
+ iter->err = -ENOMEM;
+ break;
+ }
++ fallthrough;
++ case NFT_ITER_UPDATE:
++ lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
++
+ nft_rbtree_do_walk(ctx, set, iter);
+ break;
+ case NFT_ITER_READ:
+--
+2.51.0
+
--- /dev/null
+From 5a738b376221e81eb1748f8d8a2eb6279542184c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 23:12:37 +0100
+Subject: netfilter: nf_tables: unconditionally bump set->nelems before
+ insertion
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit def602e498a4f951da95c95b1b8ce8ae68aa733a ]
+
+In case that the set is full, a new element gets published then removed
+without waiting for the RCU grace period, while RCU reader can be
+walking over it already.
+
+To address this issue, add the element transaction even if set is full,
+but toggle the set_full flag to report -ENFILE so the abort path safely
+unwinds the set to its previous state.
+
+As for element updates, decrement set->nelems to restore it.
+
+A simpler fix is to call synchronize_rcu() in the error path.
+However, with a large batch adding elements to already maxed-out set,
+this could cause noticeable slowdown of such batches.
+
+Fixes: 35d0ac9070ef ("netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL")
+Reported-by: Inseo An <y0un9sa@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 30 ++++++++++++++++--------------
+ 1 file changed, 16 insertions(+), 14 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 89039bbf7d638..b5e1b26a5302b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7288,6 +7288,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_data_desc desc;
+ enum nft_registers dreg;
+ struct nft_trans *trans;
++ bool set_full = false;
+ u64 expiration;
+ u64 timeout;
+ int err, i;
+@@ -7574,10 +7575,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ if (err < 0)
+ goto err_elem_free;
+
++ if (!(flags & NFT_SET_ELEM_CATCHALL)) {
++ unsigned int max = nft_set_maxsize(set), nelems;
++
++ nelems = atomic_inc_return(&set->nelems);
++ if (nelems > max)
++ set_full = true;
++ }
++
+ trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
+ if (trans == NULL) {
+ err = -ENOMEM;
+- goto err_elem_free;
++ goto err_set_size;
+ }
+
+ ext->genmask = nft_genmask_cur(ctx->net);
+@@ -7629,7 +7638,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+
+ ue->priv = elem_priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans);
+- goto err_elem_free;
++ goto err_set_size;
+ }
+ }
+ }
+@@ -7647,23 +7656,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ goto err_element_clash;
+ }
+
+- if (!(flags & NFT_SET_ELEM_CATCHALL)) {
+- unsigned int max = nft_set_maxsize(set);
+-
+- if (!atomic_add_unless(&set->nelems, 1, max)) {
+- err = -ENFILE;
+- goto err_set_full;
+- }
+- }
+-
+ nft_trans_container_elem(trans)->elems[0].priv = elem.priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans);
+- return 0;
+
+-err_set_full:
+- nft_setelem_remove(ctx->net, set, elem.priv);
++ return set_full ? -ENFILE : 0;
++
+ err_element_clash:
+ kfree(trans);
++err_set_size:
++ if (!(flags & NFT_SET_ELEM_CATCHALL))
++ atomic_dec(&set->nelems);
+ err_elem_free:
+ nf_tables_set_elem_destroy(ctx, set, elem.priv);
+ err_parse_data:
+--
+2.51.0
+
--- /dev/null
+From 462cac8ce5923b20a0342ee1bef50f1e91f18215 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 16:31:32 +0100
+Subject: netfilter: nft_set_pipapo: split gc into unlink and reclaim phase
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 9df95785d3d8302f7c066050117b04cd3c2048c2 ]
+
+Yiming Qian reports Use-after-free in the pipapo set type:
+ Under a large number of expired elements, commit-time GC can run for a very
+ long time in a non-preemptible context, triggering soft lockup warnings and
+ RCU stall reports (local denial of service).
+
+We must split GC in an unlink and a reclaim phase.
+
+We cannot queue elements for freeing until pointers have been swapped.
+Expired elements are still exposed to both the packet path and userspace
+dumpers via the live copy of the data structure.
+
+call_rcu() does not protect us: dump operations or element lookups starting
+after call_rcu has fired can still observe the free'd element, unless the
+commit phase has made enough progress to swap the clone and live pointers
+before any new reader has picked up the old version.
+
+This a similar approach as done recently for the rbtree backend in commit
+35f83a75529a ("netfilter: nft_set_rbtree: don't gc elements on insert").
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Reported-by: Yiming Qian <yimingqian591@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 5 +++
+ net/netfilter/nf_tables_api.c | 5 ---
+ net/netfilter/nft_set_pipapo.c | 51 ++++++++++++++++++++++++++-----
+ net/netfilter/nft_set_pipapo.h | 2 ++
+ 4 files changed, 50 insertions(+), 13 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 077d3121cc9f1..c18cffafc9696 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1860,6 +1860,11 @@ struct nft_trans_gc {
+ struct rcu_head rcu;
+ };
+
++static inline int nft_trans_gc_space(const struct nft_trans_gc *trans)
++{
++ return NFT_TRANS_GC_BATCHCOUNT - trans->count;
++}
++
+ static inline void nft_ctx_update(struct nft_ctx *ctx,
+ const struct nft_trans *trans)
+ {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7bb5719c214b0..598a9fe03fb0b 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10646,11 +10646,6 @@ static void nft_trans_gc_queue_work(struct nft_trans_gc *trans)
+ schedule_work(&trans_gc_work);
+ }
+
+-static int nft_trans_gc_space(struct nft_trans_gc *trans)
+-{
+- return NFT_TRANS_GC_BATCHCOUNT - trans->count;
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp)
+ {
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index cd0d2d4ae36bf..d9b74d588c768 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1681,11 +1681,11 @@ static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
+ }
+
+ /**
+- * pipapo_gc() - Drop expired entries from set, destroy start and end elements
++ * pipapo_gc_scan() - Drop expired entries from set and link them to gc list
+ * @set: nftables API set representation
+ * @m: Matching data
+ */
+-static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
++static void pipapo_gc_scan(struct nft_set *set, struct nft_pipapo_match *m)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct net *net = read_pnet(&set->net);
+@@ -1698,6 +1698,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ if (!gc)
+ return;
+
++ list_add(&gc->list, &priv->gc_head);
++
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ const struct nft_pipapo_field *f;
+@@ -1725,9 +1727,13 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ * NFT_SET_ELEM_DEAD_BIT.
+ */
+ if (__nft_set_elem_expired(&e->ext, tstamp)) {
+- gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
+- if (!gc)
+- return;
++ if (!nft_trans_gc_space(gc)) {
++ gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
++ if (!gc)
++ return;
++
++ list_add(&gc->list, &priv->gc_head);
++ }
+
+ nft_pipapo_gc_deactivate(net, set, e);
+ pipapo_drop(m, rulemap);
+@@ -1741,10 +1747,30 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ }
+ }
+
+- gc = nft_trans_gc_catchall_sync(gc);
++ priv->last_gc = jiffies;
++}
++
++/**
++ * pipapo_gc_queue() - Free expired elements
++ * @set: nftables API set representation
++ */
++static void pipapo_gc_queue(struct nft_set *set)
++{
++ struct nft_pipapo *priv = nft_set_priv(set);
++ struct nft_trans_gc *gc, *next;
++
++ /* always do a catchall cycle: */
++ gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
+ if (gc) {
++ gc = nft_trans_gc_catchall_sync(gc);
++ if (gc)
++ nft_trans_gc_queue_sync_done(gc);
++ }
++
++ /* always purge queued gc elements. */
++ list_for_each_entry_safe(gc, next, &priv->gc_head, list) {
++ list_del(&gc->list);
+ nft_trans_gc_queue_sync_done(gc);
+- priv->last_gc = jiffies;
+ }
+ }
+
+@@ -1798,6 +1824,10 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+ *
+ * We also need to create a new working copy for subsequent insertions and
+ * deletions.
++ *
++ * After the live copy has been replaced by the clone, we can safely queue
++ * expired elements that have been collected by pipapo_gc_scan() for
++ * memory reclaim.
+ */
+ static void nft_pipapo_commit(struct nft_set *set)
+ {
+@@ -1808,7 +1838,7 @@ static void nft_pipapo_commit(struct nft_set *set)
+ return;
+
+ if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
+- pipapo_gc(set, priv->clone);
++ pipapo_gc_scan(set, priv->clone);
+
+ old = rcu_replace_pointer(priv->match, priv->clone,
+ nft_pipapo_transaction_mutex_held(set));
+@@ -1816,6 +1846,8 @@ static void nft_pipapo_commit(struct nft_set *set)
+
+ if (old)
+ call_rcu(&old->rcu, pipapo_reclaim_match);
++
++ pipapo_gc_queue(set);
+ }
+
+ static void nft_pipapo_abort(const struct nft_set *set)
+@@ -2280,6 +2312,7 @@ static int nft_pipapo_init(const struct nft_set *set,
+ f->mt = NULL;
+ }
+
++ INIT_LIST_HEAD(&priv->gc_head);
+ rcu_assign_pointer(priv->match, m);
+
+ return 0;
+@@ -2329,6 +2362,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m;
+
++ WARN_ON_ONCE(!list_empty(&priv->gc_head));
++
+ m = rcu_dereference_protected(priv->match, true);
+
+ if (priv->clone) {
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index eaab422aa56ab..9aee9a9eaeb75 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -156,12 +156,14 @@ struct nft_pipapo_match {
+ * @clone: Copy where pending insertions and deletions are kept
+ * @width: Total bytes to be matched for one packet, including padding
+ * @last_gc: Timestamp of last garbage collection run, jiffies
++ * @gc_head: list of nft_trans_gc to queue up for mem reclaim
+ */
+ struct nft_pipapo {
+ struct nft_pipapo_match __rcu *match;
+ struct nft_pipapo_match *clone;
+ int width;
+ unsigned long last_gc;
++ struct list_head gc_head;
+ };
+
+ struct nft_pipapo_elem;
+--
+2.51.0
+
--- /dev/null
+From 20d403136c40923e3b428c37a705a6f9c32f3373 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 13:32:33 +0000
+Subject: netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict
+ sequence
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit a0b4c7a49137ed21279f354eb59f49ddae8dffc2 ]
+
+Fix netfslib such that when it's making an unbuffered or DIO write, to make
+sure that it sends each subrequest strictly sequentially, waiting till the
+previous one is 'committed' before sending the next so that we don't have
+pieces landing out of order and potentially leaving a hole if an error
+occurs (ENOSPC for example).
+
+This is done by copying in just those bits of issuing, collecting and
+retrying subrequests that are necessary to do one subrequest at a time.
+Retrying, in particular, is simpler because if the current subrequest needs
+retrying, the source iterator can just be copied again and the subrequest
+prepped and issued again without needing to be concerned about whether it
+needs merging with the previous or next in the sequence.
+
+Note that the issuing loop waits for a subrequest to complete right after
+issuing it, but this wait could be moved elsewhere allowing preparatory
+steps to be performed whilst the subrequest is in progress. In particular,
+once content encryption is available in netfslib, that could be done whilst
+waiting, as could cleanup of buffers that have been completed.
+
+Fixes: 153a9961b551 ("netfs: Implement unbuffered/DIO write support")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://patch.msgid.link/58526.1772112753@warthog.procyon.org.uk
+Tested-by: Steve French <sfrench@samba.org>
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/direct_write.c | 228 ++++++++++++++++++++++++++++++++---
+ fs/netfs/internal.h | 4 +-
+ fs/netfs/write_collect.c | 21 ----
+ fs/netfs/write_issue.c | 41 +------
+ include/trace/events/netfs.h | 4 +-
+ 5 files changed, 221 insertions(+), 77 deletions(-)
+
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index a9d1c3b2c0842..dd1451bf7543d 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -9,6 +9,202 @@
+ #include <linux/uio.h>
+ #include "internal.h"
+
++/*
++ * Perform the cleanup rituals after an unbuffered write is complete.
++ */
++static void netfs_unbuffered_write_done(struct netfs_io_request *wreq)
++{
++ struct netfs_inode *ictx = netfs_inode(wreq->inode);
++
++ _enter("R=%x", wreq->debug_id);
++
++ /* Okay, declare that all I/O is complete. */
++ trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
++
++ if (!wreq->error)
++ netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
++
++ if (wreq->origin == NETFS_DIO_WRITE &&
++ wreq->mapping->nrpages) {
++ /* mmap may have got underfoot and we may now have folios
++ * locally covering the region we just wrote. Attempt to
++ * discard the folios, but leave in place any modified locally.
++ * ->write_iter() is prevented from interfering by the DIO
++ * counter.
++ */
++ pgoff_t first = wreq->start >> PAGE_SHIFT;
++ pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
++
++ invalidate_inode_pages2_range(wreq->mapping, first, last);
++ }
++
++ if (wreq->origin == NETFS_DIO_WRITE)
++ inode_dio_end(wreq->inode);
++
++ _debug("finished");
++ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
++ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
++
++ if (wreq->iocb) {
++ size_t written = umin(wreq->transferred, wreq->len);
++
++ wreq->iocb->ki_pos += written;
++ if (wreq->iocb->ki_complete) {
++ trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete);
++ wreq->iocb->ki_complete(wreq->iocb, wreq->error ?: written);
++ }
++ wreq->iocb = VFS_PTR_POISON;
++ }
++
++ netfs_clear_subrequests(wreq);
++}
++
++/*
++ * Collect the subrequest results of unbuffered write subrequests.
++ */
++static void netfs_unbuffered_write_collect(struct netfs_io_request *wreq,
++ struct netfs_io_stream *stream,
++ struct netfs_io_subrequest *subreq)
++{
++ trace_netfs_collect_sreq(wreq, subreq);
++
++ spin_lock(&wreq->lock);
++ list_del_init(&subreq->rreq_link);
++ spin_unlock(&wreq->lock);
++
++ wreq->transferred += subreq->transferred;
++ iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
++
++ stream->collected_to = subreq->start + subreq->transferred;
++ wreq->collected_to = stream->collected_to;
++ netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
++
++ trace_netfs_collect_stream(wreq, stream);
++ trace_netfs_collect_state(wreq, wreq->collected_to, 0);
++}
++
++/*
++ * Write data to the server without going through the pagecache and without
++ * writing it to the local cache. We dispatch the subrequests serially and
++ * wait for each to complete before dispatching the next, lest we leave a gap
++ * in the data written due to a failure such as ENOSPC. We could, however
++ * attempt to do preparation such as content encryption for the next subreq
++ * whilst the current is in progress.
++ */
++static int netfs_unbuffered_write(struct netfs_io_request *wreq)
++{
++ struct netfs_io_subrequest *subreq = NULL;
++ struct netfs_io_stream *stream = &wreq->io_streams[0];
++ int ret;
++
++ _enter("%llx", wreq->len);
++
++ if (wreq->origin == NETFS_DIO_WRITE)
++ inode_dio_begin(wreq->inode);
++
++ stream->collected_to = wreq->start;
++
++ for (;;) {
++ bool retry = false;
++
++ if (!subreq) {
++ netfs_prepare_write(wreq, stream, wreq->start + wreq->transferred);
++ subreq = stream->construct;
++ stream->construct = NULL;
++ stream->front = NULL;
++ }
++
++ /* Check if (re-)preparation failed. */
++ if (unlikely(test_bit(NETFS_SREQ_FAILED, &subreq->flags))) {
++ netfs_write_subrequest_terminated(subreq, subreq->error);
++ wreq->error = subreq->error;
++ break;
++ }
++
++ iov_iter_truncate(&subreq->io_iter, wreq->len - wreq->transferred);
++ if (!iov_iter_count(&subreq->io_iter))
++ break;
++
++ subreq->len = netfs_limit_iter(&subreq->io_iter, 0,
++ stream->sreq_max_len,
++ stream->sreq_max_segs);
++ iov_iter_truncate(&subreq->io_iter, subreq->len);
++ stream->submit_extendable_to = subreq->len;
++
++ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
++ stream->issue_write(subreq);
++
++ /* Async, need to wait. */
++ netfs_wait_for_in_progress_stream(wreq, stream);
++
++ if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
++ retry = true;
++ } else if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
++ ret = subreq->error;
++ wreq->error = ret;
++ netfs_see_subrequest(subreq, netfs_sreq_trace_see_failed);
++ subreq = NULL;
++ break;
++ }
++ ret = 0;
++
++ if (!retry) {
++ netfs_unbuffered_write_collect(wreq, stream, subreq);
++ subreq = NULL;
++ if (wreq->transferred >= wreq->len)
++ break;
++ if (!wreq->iocb && signal_pending(current)) {
++ ret = wreq->transferred ? -EINTR : -ERESTARTSYS;
++ trace_netfs_rreq(wreq, netfs_rreq_trace_intr);
++ break;
++ }
++ continue;
++ }
++
++ /* We need to retry the last subrequest, so first reset the
++ * iterator, taking into account what, if anything, we managed
++ * to transfer.
++ */
++ subreq->error = -EAGAIN;
++ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
++ if (subreq->transferred > 0)
++ iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
++
++ if (stream->source == NETFS_UPLOAD_TO_SERVER &&
++ wreq->netfs_ops->retry_request)
++ wreq->netfs_ops->retry_request(wreq, stream);
++
++ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
++ __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
++ __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
++ subreq->io_iter = wreq->buffer.iter;
++ subreq->start = wreq->start + wreq->transferred;
++ subreq->len = wreq->len - wreq->transferred;
++ subreq->transferred = 0;
++ subreq->retry_count += 1;
++ stream->sreq_max_len = UINT_MAX;
++ stream->sreq_max_segs = INT_MAX;
++
++ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
++ stream->prepare_write(subreq);
++
++ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
++ netfs_stat(&netfs_n_wh_retry_write_subreq);
++ }
++
++ netfs_unbuffered_write_done(wreq);
++ _leave(" = %d", ret);
++ return ret;
++}
++
++static void netfs_unbuffered_write_async(struct work_struct *work)
++{
++ struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
++
++ netfs_unbuffered_write(wreq);
++ netfs_put_request(wreq, netfs_rreq_trace_put_complete);
++}
++
+ /*
+ * Perform an unbuffered write where we may have to do an RMW operation on an
+ * encrypted file. This can also be used for direct I/O writes.
+@@ -70,35 +266,35 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
+ */
+ wreq->buffer.iter = *iter;
+ }
++
++ wreq->len = iov_iter_count(&wreq->buffer.iter);
+ }
+
+ __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
+- if (async)
+- __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
+
+ /* Copy the data into the bounce buffer and encrypt it. */
+ // TODO
+
+ /* Dispatch the write. */
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+- if (async)
+- wreq->iocb = iocb;
+- wreq->len = iov_iter_count(&wreq->buffer.iter);
+- ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
+- if (ret < 0) {
+- _debug("begin = %zd", ret);
+- goto out;
+- }
+
+- if (!async) {
+- ret = netfs_wait_for_write(wreq);
+- if (ret > 0)
+- iocb->ki_pos += ret;
+- } else {
++ if (async) {
++ INIT_WORK(&wreq->work, netfs_unbuffered_write_async);
++ wreq->iocb = iocb;
++ queue_work(system_dfl_wq, &wreq->work);
+ ret = -EIOCBQUEUED;
++ } else {
++ ret = netfs_unbuffered_write(wreq);
++ if (ret < 0) {
++ _debug("begin = %zd", ret);
++ } else {
++ iocb->ki_pos += wreq->transferred;
++ ret = wreq->transferred ?: wreq->error;
++ }
++
++ netfs_put_request(wreq, netfs_rreq_trace_put_complete);
+ }
+
+-out:
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ return ret;
+
+diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
+index 4319611f53544..d436e20d34185 100644
+--- a/fs/netfs/internal.h
++++ b/fs/netfs/internal.h
+@@ -198,6 +198,9 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+ struct file *file,
+ loff_t start,
+ enum netfs_io_origin origin);
++void netfs_prepare_write(struct netfs_io_request *wreq,
++ struct netfs_io_stream *stream,
++ loff_t start);
+ void netfs_reissue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq,
+ struct iov_iter *source);
+@@ -212,7 +215,6 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
+ struct folio **writethrough_cache);
+ ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache);
+-int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
+
+ /*
+ * write_retry.c
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 61eab34ea67ef..83eb3dc1adf8a 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -399,27 +399,6 @@ bool netfs_write_collection(struct netfs_io_request *wreq)
+ ictx->ops->invalidate_cache(wreq);
+ }
+
+- if ((wreq->origin == NETFS_UNBUFFERED_WRITE ||
+- wreq->origin == NETFS_DIO_WRITE) &&
+- !wreq->error)
+- netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
+-
+- if (wreq->origin == NETFS_DIO_WRITE &&
+- wreq->mapping->nrpages) {
+- /* mmap may have got underfoot and we may now have folios
+- * locally covering the region we just wrote. Attempt to
+- * discard the folios, but leave in place any modified locally.
+- * ->write_iter() is prevented from interfering by the DIO
+- * counter.
+- */
+- pgoff_t first = wreq->start >> PAGE_SHIFT;
+- pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
+- invalidate_inode_pages2_range(wreq->mapping, first, last);
+- }
+-
+- if (wreq->origin == NETFS_DIO_WRITE)
+- inode_dio_end(wreq->inode);
+-
+ _debug("finished");
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
+diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
+index 34894da5a23ec..437268f656409 100644
+--- a/fs/netfs/write_issue.c
++++ b/fs/netfs/write_issue.c
+@@ -154,9 +154,9 @@ EXPORT_SYMBOL(netfs_prepare_write_failed);
+ * Prepare a write subrequest. We need to allocate a new subrequest
+ * if we don't have one.
+ */
+-static void netfs_prepare_write(struct netfs_io_request *wreq,
+- struct netfs_io_stream *stream,
+- loff_t start)
++void netfs_prepare_write(struct netfs_io_request *wreq,
++ struct netfs_io_stream *stream,
++ loff_t start)
+ {
+ struct netfs_io_subrequest *subreq;
+ struct iov_iter *wreq_iter = &wreq->buffer.iter;
+@@ -698,41 +698,6 @@ ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_c
+ return ret;
+ }
+
+-/*
+- * Write data to the server without going through the pagecache and without
+- * writing it to the local cache.
+- */
+-int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
+-{
+- struct netfs_io_stream *upload = &wreq->io_streams[0];
+- ssize_t part;
+- loff_t start = wreq->start;
+- int error = 0;
+-
+- _enter("%zx", len);
+-
+- if (wreq->origin == NETFS_DIO_WRITE)
+- inode_dio_begin(wreq->inode);
+-
+- while (len) {
+- // TODO: Prepare content encryption
+-
+- _debug("unbuffered %zx", len);
+- part = netfs_advance_write(wreq, upload, start, len, false);
+- start += part;
+- len -= part;
+- rolling_buffer_advance(&wreq->buffer, part);
+- if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
+- netfs_wait_for_paused_write(wreq);
+- if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
+- break;
+- }
+-
+- netfs_end_issue_write(wreq);
+- _leave(" = %d", error);
+- return error;
+-}
+-
+ /*
+ * Write some of a pending folio data back to the server and/or the cache.
+ */
+diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
+index 64a382fbc31a8..2d366be46a1c3 100644
+--- a/include/trace/events/netfs.h
++++ b/include/trace/events/netfs.h
+@@ -57,6 +57,7 @@
+ EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
+ EM(netfs_rreq_trace_free, "FREE ") \
++ EM(netfs_rreq_trace_intr, "INTR ") \
+ EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
+ EM(netfs_rreq_trace_recollect, "RECLLCT") \
+ EM(netfs_rreq_trace_redirty, "REDIRTY") \
+@@ -169,7 +170,8 @@
+ EM(netfs_sreq_trace_put_oom, "PUT OOM ") \
+ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
+ EM(netfs_sreq_trace_put_work, "PUT WORK ") \
+- E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
++ EM(netfs_sreq_trace_put_terminated, "PUT TERM ") \
++ E_(netfs_sreq_trace_see_failed, "SEE FAILED ")
+
+ #define netfs_folio_traces \
+ EM(netfs_folio_is_uptodate, "mod-uptodate") \
+--
+2.51.0
+
--- /dev/null
+From 74a64b9c36b98639defa7b8d8b7052e8012bffaa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:44 -0800
+Subject: nfc: nci: clear NCI_DATA_EXCHANGE before calling completion callback
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0efdc02f4f6d52f8ca5d5889560f325a836ce0a8 ]
+
+Move clear_bit(NCI_DATA_EXCHANGE) before invoking the data exchange
+callback in nci_data_exchange_complete().
+
+The callback (e.g. rawsock_data_exchange_complete) may immediately
+schedule another data exchange via schedule_work(tx_work). On a
+multi-CPU system, tx_work can run and reach nci_transceive() before
+the current nci_data_exchange_complete() clears the flag, causing
+test_and_set_bit(NCI_DATA_EXCHANGE) to return -EBUSY and the new
+transfer to fail.
+
+This causes intermittent flakes in nci/nci_dev in NIPA:
+
+ # # RUN NCI.NCI1_0.t4t_tag_read ...
+ # # t4t_tag_read: Test terminated by timeout
+ # # FAIL NCI.NCI1_0.t4t_tag_read
+ # not ok 3 NCI.NCI1_0.t4t_tag_read
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-5-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 78f4131af3cf3..5f98c73db5afd 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -33,7 +33,8 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+- goto exit;
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++ return;
+ }
+
+ cb = conn_info->data_exchange_cb;
+@@ -45,6 +46,12 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ timer_delete_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
++ /* Mark the exchange as done before calling the callback.
++ * The callback (e.g. rawsock_data_exchange_complete) may
++ * want to immediately queue another data exchange.
++ */
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++
+ if (cb) {
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+@@ -54,9 +61,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+-
+-exit:
+- clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+ }
+
+ /* ----------------- NCI TX Data ----------------- */
+--
+2.51.0
+
--- /dev/null
+From 39d0790c09c2acb941e0a0d9a57fde01ac645346 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:43 -0800
+Subject: nfc: nci: complete pending data exchange on device close
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 66083581945bd5b8e99fe49b5aeb83d03f62d053 ]
+
+In nci_close_device(), complete any pending data exchange before
+closing. The data exchange callback (e.g.
+rawsock_data_exchange_complete) holds a socket reference.
+
+NIPA occasionally hits this leak:
+
+unreferenced object 0xff1100000f435000 (size 2048):
+ comm "nci_dev", pid 3954, jiffies 4295441245
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 27 00 01 40 00 00 00 00 00 00 00 00 00 00 00 00 '..@............
+ backtrace (crc ec2b3c5):
+ __kmalloc_noprof+0x4db/0x730
+ sk_prot_alloc.isra.0+0xe4/0x1d0
+ sk_alloc+0x36/0x760
+ rawsock_create+0xd1/0x540
+ nfc_sock_create+0x11f/0x280
+ __sock_create+0x22d/0x630
+ __sys_socket+0x115/0x1d0
+ __x64_sys_socket+0x72/0xd0
+ do_syscall_64+0x117/0xfc0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-4-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index f6dc0a94b8d54..d334b7aa8c172 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -567,6 +567,10 @@ static int nci_close_device(struct nci_dev *ndev)
+ flush_workqueue(ndev->cmd_wq);
+ timer_delete_sync(&ndev->cmd_timer);
+ timer_delete_sync(&ndev->data_timer);
++ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ nci_data_exchange_complete(ndev, NULL,
++ ndev->cur_conn_id,
++ -ENODEV);
+ mutex_unlock(&ndev->req_lock);
+ return 0;
+ }
+@@ -598,6 +602,11 @@ static int nci_close_device(struct nci_dev *ndev)
+ flush_workqueue(ndev->cmd_wq);
+
+ timer_delete_sync(&ndev->cmd_timer);
++ timer_delete_sync(&ndev->data_timer);
++
++ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ nci_data_exchange_complete(ndev, NULL, ndev->cur_conn_id,
++ -ENODEV);
+
+ /* Clear flags except NCI_UNREG */
+ ndev->flags &= BIT(NCI_UNREG);
+--
+2.51.0
+
--- /dev/null
+From 0e1ddca3abf914d8866b38030e62a774fb1b14a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:41 -0800
+Subject: nfc: nci: free skb on nci_transceive early error paths
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7bd4b0c4779f978a6528c9b7937d2ca18e936e2c ]
+
+nci_transceive() takes ownership of the skb passed by the caller,
+but the -EPROTO, -EINVAL, and -EBUSY error paths return without
+freeing it.
+
+Due to issues clearing NCI_DATA_EXCHANGE fixed by subsequent changes
+the nci/nci_dev selftest hits the error path occasionally in NIPA,
+and kmemleak detects leaks:
+
+unreferenced object 0xff11000015ce6a40 (size 640):
+ comm "nci_dev", pid 3954, jiffies 4295441246
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 00 a4 00 0c 02 e1 03 6b 6b 6b 6b 6b kkkk.......kkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ backtrace (crc 7c40cc2a):
+ kmem_cache_alloc_node_noprof+0x492/0x630
+ __alloc_skb+0x11e/0x5f0
+ alloc_skb_with_frags+0xc6/0x8f0
+ sock_alloc_send_pskb+0x326/0x3f0
+ nfc_alloc_send_skb+0x94/0x1d0
+ rawsock_sendmsg+0x162/0x4c0
+ do_syscall_64+0x117/0xfc0
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 46681bdaeabff..f6dc0a94b8d54 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1035,18 +1035,23 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+- if (!conn_info)
++ if (!conn_info) {
++ kfree_skb(skb);
+ return -EPROTO;
++ }
+
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ pr_err("unable to exchange data, no active target\n");
++ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+- if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) {
++ kfree_skb(skb);
+ return -EBUSY;
++ }
+
+ /* store cb and context to be used on receiving data */
+ conn_info->data_exchange_cb = cb;
+--
+2.51.0
+
--- /dev/null
+From f3f44bdc521019a159095b5a02341104c2c70e44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:45 -0800
+Subject: nfc: rawsock: cancel tx_work before socket teardown
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit d793458c45df2aed498d7f74145eab7ee22d25aa ]
+
+In rawsock_release(), cancel any pending tx_work and purge the write
+queue before orphaning the socket. rawsock_tx_work runs on the system
+workqueue and calls nfc_data_exchange which dereferences the NCI
+device. Without synchronization, tx_work can race with socket and
+device teardown when a process is killed (e.g. by SIGKILL), leading
+to use-after-free or leaked references.
+
+Set SEND_SHUTDOWN first so that if tx_work is already running it will
+see the flag and skip transmitting, then use cancel_work_sync to wait
+for any in-progress execution to finish, and finally purge any
+remaining queued skbs.
+
+Fixes: 23b7869c0fd0 ("NFC: add the NFC socket raw protocol")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-6-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/rawsock.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 5125392bb68eb..028b4daafaf83 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -67,6 +67,17 @@ static int rawsock_release(struct socket *sock)
+ if (sock->type == SOCK_RAW)
+ nfc_sock_unlink(&raw_sk_list, sk);
+
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* Prevent rawsock_tx_work from starting new transmits and
++ * wait for any in-progress work to finish. This must happen
++ * before the socket is orphaned to avoid a race where
++ * rawsock_tx_work runs after the NCI device has been freed.
++ */
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ cancel_work_sync(&nfc_rawsock(sk)->tx_work);
++ rawsock_write_queue_purge(sk);
++ }
++
+ sock_orphan(sk);
+ sock_put(sk);
+
+--
+2.51.0
+
--- /dev/null
+From 3b188ed19b7136a711c04c1a3481b7ec2abaf314 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Jan 2026 22:48:08 +0800
+Subject: nvme: fix admin queue leak on controller reset
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit b84bb7bd913d8ca2f976ee6faf4a174f91c02b8d ]
+
+When nvme_alloc_admin_tag_set() is called during a controller reset,
+a previous admin queue may still exist. Release it properly before
+allocating a new one to avoid orphaning the old queue.
+
+This fixes a regression introduced by commit 03b3bcd319b3 ("nvme: fix
+admin request_queue lifetime").
+
+Cc: Keith Busch <kbusch@kernel.org>
+Fixes: 03b3bcd319b3 ("nvme: fix admin request_queue lifetime").
+Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
+Closes: https://lore.kernel.org/linux-block/CAHj4cs9wv3SdPo+N01Fw2SHBYDs9tj2M_e1-GdQOkRy=DsBB1w@mail.gmail.com/
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index f1f719351f3f2..2ba7244fdaf1c 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4865,6 +4865,13 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ if (ret)
+ return ret;
+
++ /*
++ * If a previous admin queue exists (e.g., from before a reset),
++ * put it now before allocating a new one to avoid orphaning it.
++ */
++ if (ctrl->admin_q)
++ blk_put_queue(ctrl->admin_q);
++
+ ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
+--
+2.51.0
+
--- /dev/null
+From 3e438a3d177d8247251551a3439a6ba7c485cef5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 19:19:28 -0500
+Subject: nvme: fix memory allocation in nvme_pr_read_keys()
+
+From: Sungwoo Kim <iam@sung-woo.kim>
+
+[ Upstream commit c3320153769f05fd7fe9d840cb555dd3080ae424 ]
+
+nvme_pr_read_keys() takes num_keys from userspace and uses it to
+calculate the allocation size for rse via struct_size(). The upper
+limit is PR_KEYS_MAX (64K).
+
+A malicious or buggy userspace can pass a large num_keys value that
+results in a 4MB allocation attempt at most, causing a warning in
+the page allocator when the order exceeds MAX_PAGE_ORDER.
+
+To fix this, use kvzalloc() instead of kzalloc().
+
+This bug has the same reasoning and fix with the patch below:
+https://lore.kernel.org/linux-block/20251212013510.3576091-1-kartikey406@gmail.com/
+
+Warning log:
+WARNING: mm/page_alloc.c:5216 at __alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216, CPU#1: syz-executor117/272
+Modules linked in:
+CPU: 1 UID: 0 PID: 272 Comm: syz-executor117 Not tainted 6.19.0 #1 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+RIP: 0010:__alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216
+Code: ff 83 bd a8 fe ff ff 0a 0f 86 69 fb ff ff 0f b6 1d f9 f9 c4 04 80 fb 01 0f 87 3b 76 30 ff 83 e3 01 75 09 c6 05 e4 f9 c4 04 01 <0f> 0b 48 c7 85 70 fe ff ff 00 00 00 00 e9 8f fd ff ff 31 c0 e9 0d
+RSP: 0018:ffffc90000fcf450 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 1ffff920001f9ea0
+RDX: 0000000000000000 RSI: 000000000000000b RDI: 0000000000040dc0
+RBP: ffffc90000fcf648 R08: ffff88800b6c3380 R09: 0000000000000001
+R10: ffffc90000fcf840 R11: ffff88807ffad280 R12: 0000000000000000
+R13: 0000000000040dc0 R14: 0000000000000001 R15: ffffc90000fcf620
+FS: 0000555565db33c0(0000) GS:ffff8880be26c000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000000002000000c CR3: 0000000003b72000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ alloc_pages_mpol+0x236/0x4d0 mm/mempolicy.c:2486
+ alloc_frozen_pages_noprof+0x149/0x180 mm/mempolicy.c:2557
+ ___kmalloc_large_node+0x10c/0x140 mm/slub.c:5598
+ __kmalloc_large_node_noprof+0x25/0xc0 mm/slub.c:5629
+ __do_kmalloc_node mm/slub.c:5645 [inline]
+ __kmalloc_noprof+0x483/0x6f0 mm/slub.c:5669
+ kmalloc_noprof include/linux/slab.h:961 [inline]
+ kzalloc_noprof include/linux/slab.h:1094 [inline]
+ nvme_pr_read_keys+0x8f/0x4c0 drivers/nvme/host/pr.c:245
+ blkdev_pr_read_keys block/ioctl.c:456 [inline]
+ blkdev_common_ioctl+0x1b71/0x29b0 block/ioctl.c:730
+ blkdev_ioctl+0x299/0x700 block/ioctl.c:786
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:597 [inline]
+ __se_sys_ioctl fs/ioctl.c:583 [inline]
+ __x64_sys_ioctl+0x1bf/0x220 fs/ioctl.c:583
+ x64_sys_call+0x1280/0x21b0 mnt/fuzznvme_1/fuzznvme/linux-build/v6.19/./arch/x86/include/generated/asm/syscalls_64.h:17
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x71/0x330 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7fb893d3108d
+Code: 28 c3 e8 46 1e 00 00 66 0f 1f 44 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007ffff61f2f38 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007ffff61f3138 RCX: 00007fb893d3108d
+RDX: 0000000020000040 RSI: 00000000c01070ce RDI: 0000000000000003
+RBP: 0000000000000001 R08: 0000000000000000 R09: 00007ffff61f3138
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001
+R13: 00007ffff61f3128 R14: 00007fb893dae530 R15: 0000000000000001
+ </TASK>
+
+Fixes: 5fd96a4e15de (nvme: Add pr_ops read_keys support)
+Acked-by: Chao Shi <cshi008@fiu.edu>
+Acked-by: Weidong Zhu <weizhu@fiu.edu>
+Acked-by: Dave Tian <daveti@purdue.edu>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Sungwoo Kim <iam@sung-woo.kim>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index ad2ecc2f49a97..fe7dbe2648158 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -242,7 +242,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ if (rse_len > U32_MAX)
+ return -EINVAL;
+
+- rse = kzalloc(rse_len, GFP_KERNEL);
++ rse = kvzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+@@ -267,7 +267,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ }
+
+ free_rse:
+- kfree(rse);
++ kvfree(rse);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 77c6866bb2968973e11fce8bb025831bbcc7cf21 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 11:38:05 -0800
+Subject: nvme-multipath: fix leak on try_module_get failure
+
+From: Keith Busch <kbusch@kernel.org>
+
+[ Upstream commit 0f5197ea9a73a4c406c75e6d8af3a13f7f96ae89 ]
+
+We need to fall back to the synchronous removal if we can't get a
+reference on the module needed for the deferred removal.
+
+Fixes: 62188639ec16 ("nvme-multipath: introduce delayed removal of the multipath head node")
+Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/multipath.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index e35eccacee8c8..81ccdd91f7790 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -1310,13 +1310,11 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ if (!list_empty(&head->list))
+ goto out;
+
+- if (head->delayed_removal_secs) {
+- /*
+- * Ensure that no one could remove this module while the head
+- * remove work is pending.
+- */
+- if (!try_module_get(THIS_MODULE))
+- goto out;
++ /*
++ * Ensure that no one could remove this module while the head
++ * remove work is pending.
++ */
++ if (head->delayed_removal_secs && try_module_get(THIS_MODULE)) {
+ mod_delayed_work(nvme_wq, &head->remove_work,
+ head->delayed_removal_secs * HZ);
+ } else {
+--
+2.51.0
+
--- /dev/null
+From 075049a325f7d32a39d144034b390a78cec1fcc3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Dec 2025 16:43:27 -0500
+Subject: nvme: reject invalid pr_read_keys() num_keys values
+
+From: Stefan Hajnoczi <stefanha@redhat.com>
+
+[ Upstream commit 38ec8469f39e0e96e7dd9b76f05e0f8eb78be681 ]
+
+The pr_read_keys() interface has a u32 num_keys parameter. The NVMe
+Reservation Report command has a u32 maximum length. Reject num_keys
+values that are too large to fit.
+
+This will become important when pr_read_keys() is exposed to untrusted
+userspace via an <linux/pr.h> ioctl.
+
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c3320153769f ("nvme: fix memory allocation in nvme_pr_read_keys()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pr.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index ca6a74607b139..ad2ecc2f49a97 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -228,7 +228,8 @@ static int nvme_pr_resv_report(struct block_device *bdev, void *data,
+ static int nvme_pr_read_keys(struct block_device *bdev,
+ struct pr_keys *keys_info)
+ {
+- u32 rse_len, num_keys = keys_info->num_keys;
++ size_t rse_len;
++ u32 num_keys = keys_info->num_keys;
+ struct nvme_reservation_status_ext *rse;
+ int ret, i;
+ bool eds;
+@@ -238,6 +239,9 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ * enough to get enough keys to fill the return keys buffer.
+ */
+ rse_len = struct_size(rse, regctl_eds, num_keys);
++ if (rse_len > U32_MAX)
++ return -EINVAL;
++
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+--
+2.51.0
+
--- /dev/null
+From 177e09e20e70becea2b5ce80d834d0e867919628 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 12:26:13 -0800
+Subject: nvmet-fcloop: Check remoteport port_state before calling done
+ callback
+
+From: Justin Tee <justintee8345@gmail.com>
+
+[ Upstream commit dd677d0598387ea623820ab2bd0e029c377445a3 ]
+
+In nvme_fc_handle_ls_rqst_work, the lsrsp->done callback is only set when
+remoteport->port_state is FC_OBJSTATE_ONLINE. Otherwise, the
+nvme_fc_xmt_ls_rsp's LLDD call to lport->ops->xmt_ls_rsp is expected to
+fail and the nvme-fc transport layer itself will directly call
+nvme_fc_xmt_ls_rsp_free instead of relying on LLDD's done callback to free
+the lsrsp resources.
+
+Update the fcloop_t2h_xmt_ls_rsp routine to check remoteport->port_state.
+If online, then lsrsp->done callback will free the lsrsp. Else, return
+-ENODEV to signal the nvme-fc transport to handle freeing lsrsp.
+
+Cc: Ewan D. Milne <emilne@redhat.com>
+Tested-by: Aristeu Rozanski <aris@redhat.com>
+Acked-by: Aristeu Rozanski <aris@redhat.com>
+Reviewed-by: Daniel Wagner <dwagner@suse.de>
+Closes: https://lore.kernel.org/linux-nvme/21255200-a271-4fa0-b099-97755c8acd4c@work/
+Fixes: 10c165af35d2 ("nvmet-fcloop: call done callback even when remote port is gone")
+Signed-off-by: Justin Tee <justintee8345@gmail.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/fcloop.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index 5dffcc5becae8..305ab7ee6e760 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -492,6 +492,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct fcloop_rport *rport = remoteport->private;
+ struct nvmet_fc_target_port *targetport = rport->targetport;
+ struct fcloop_tport *tport;
++ int ret = 0;
+
+ if (!targetport) {
+ /*
+@@ -501,12 +502,18 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ * We end up here from delete association exchange:
+ * nvmet_fc_xmt_disconnect_assoc sends an async request.
+ *
+- * Return success because this is what LLDDs do; silently
+- * drop the response.
++ * Return success when remoteport is still online because this
++ * is what LLDDs do and silently drop the response. Otherwise,
++ * return with error to signal upper layer to perform the lsrsp
++ * resource cleanup.
+ */
+- lsrsp->done(lsrsp);
++ if (remoteport->port_state == FC_OBJSTATE_ONLINE)
++ lsrsp->done(lsrsp);
++ else
++ ret = -ENODEV;
++
+ kmem_cache_free(lsreq_cache, tls_req);
+- return 0;
++ return ret;
+ }
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+--
+2.51.0
+
--- /dev/null
+From 2e4deeaed7f9e8132f5b04713251e9d2003a1114 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:58 +0000
+Subject: octeon_ep: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 43b3160cb639079a15daeb5f080120afbfbfc918 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-3-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 21 +++++++++------
+ .../net/ethernet/marvell/octeon_ep/octep_rx.c | 27 +++++++++++++------
+ 2 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 7f8ed8f0ade49..16f52d4b11e91 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -562,17 +562,22 @@ static void octep_clean_irqs(struct octep_device *oct)
+ */
+ static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index f2a7c6a76c742..74de19166488f 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -324,10 +324,16 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
+
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+@@ -338,8 +344,9 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -414,7 +421,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ u16 rx_ol_flags;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -499,7 +506,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
+@@ -522,22 +529,26 @@ int octep_oq_process_rx(struct octep_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 3f9dcbe9914599c871864800008b03c8dab3e04c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:57 +0000
+Subject: octeon_ep: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 18c04a808c436d629d5812ce883e3822a5f5a47f ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion,
+and replace napi_complete with napi_complete_done.
+
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+napi_complete_done (vs napi_complete) allows for better
+interrupt coalescing.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-2-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 57db7ea2f5be9..7f8ed8f0ade49 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -555,12 +555,12 @@ static void octep_clean_irqs(struct octep_device *oct)
+ }
+
+ /**
+- * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -576,7 +576,17 @@ static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+- wmb();
++ smp_wmb();
++}
++
++/**
++ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++{
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -602,7 +612,8 @@ static int octep_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+- napi_complete(napi);
++ octep_update_pkt(ioq_vector->iq, ioq_vector->oq);
++ napi_complete_done(napi, rx_done);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+ }
+--
+2.51.0
+
--- /dev/null
+From 20076be40e30b007086bc82ac9de6b6cf37c8d5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:14:00 +0000
+Subject: octeon_ep_vf: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 6c73126ecd1080351b468fe43353b2f705487f44 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 1cd3b407977c3 ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-5-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeon_ep_vf/octep_vf_main.c | 21 ++++++++------
+ .../marvell/octeon_ep_vf/octep_vf_rx.c | 28 +++++++++++++------
+ 2 files changed, 33 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 17efc8eab4cfb..a3c359124887e 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -294,17 +294,22 @@ static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+
+ static void octep_vf_update_pkt(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+index 6f865dbbba6c6..b579d5b545c46 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+@@ -325,9 +325,16 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
++
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+@@ -339,8 +346,9 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -369,7 +377,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct sk_buff *skb;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -463,7 +471,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
+@@ -486,22 +494,26 @@ int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f79feaad0987b651de890029aaeef3a7bed6ca73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:59 +0000
+Subject: octeon_ep_vf: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 2ae7d20fb24f598f60faa8f6ecc856dac782261a ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion.
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+
+Fixes: 1cd3b407977c3 ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-4-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeon_ep_vf/octep_vf_main.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 1d9760b4b8f47..17efc8eab4cfb 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -286,12 +286,13 @@ static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+ }
+
+ /**
+- * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_vf_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
++
++static void octep_vf_update_pkt(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -308,6 +309,17 @@ static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
++}
++
++/**
++ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq,
++ struct octep_vf_oq *oq)
++{
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -333,6 +345,7 @@ static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
++ octep_vf_update_pkt(ioq_vector->iq, ioq_vector->oq);
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+--
+2.51.0
+
--- /dev/null
+From f333977bfd69b8161cf881c1518ab5bd19f15ddd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 17:39:07 +0800
+Subject: pinctrl: cirrus: cs42l43: Fix double-put in cs42l43_pin_probe()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fd5bed798f45eb3a178ad527b43ab92705faaf8a ]
+
+devm_add_action_or_reset() already invokes the action on failure,
+so the explicit put causes a double-put.
+
+Fixes: 9b07cdf86a0b ("pinctrl: cirrus: Fix fwnode leak in cs42l43_pin_probe()")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/cirrus/pinctrl-cs42l43.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+index a8f82104a3842..227c37c360e19 100644
+--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
++++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+@@ -574,10 +574,9 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
+ if (child) {
+ ret = devm_add_action_or_reset(&pdev->dev,
+ cs42l43_fwnode_put, child);
+- if (ret) {
+- fwnode_handle_put(child);
++ if (ret)
+ return ret;
+- }
++
+ if (!child->dev)
+ child->dev = priv->dev;
+ fwnode = child;
+--
+2.51.0
+
--- /dev/null
+From e5432d3e649c7d1a51d143e18feae98024ad85b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:46 +0100
+Subject: pinctrl: equilibrium: fix warning trace on load
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 3e00b1b332e54ba50cca6691f628b9c06574024f ]
+
+The callback functions 'eqbr_irq_mask()' and 'eqbr_irq_ack()' are also
+called in the callback function 'eqbr_irq_mask_ack()'. This is done to
+avoid source code duplication. The problem, is that in the function
+'eqbr_irq_mask()' also calles the gpiolib function 'gpiochip_disable_irq()'
+
+This generates the following warning trace in the log for every gpio on
+load.
+
+[ 6.088111] ------------[ cut here ]------------
+[ 6.092440] WARNING: CPU: 3 PID: 1 at drivers/gpio/gpiolib.c:3810 gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Modules linked in:
+[ 6.097847] CPU: 3 UID: 0 PID: 1 Comm: swapper/0 Tainted: G W 6.12.59+ #0
+[ 6.097847] Tainted: [W]=WARN
+[ 6.097847] RIP: 0010:gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Code: 39 c6 48 19 c0 21 c6 48 c1 e6 05 48 03 b2 38 03 00 00 48 81 fe 00 f0 ff ff 77 11 48 8b 46 08 f6 c4 02 74 06 f0 80 66 09 fb c3 <0f> 0b 90 0f 1f 40 00 c3 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40
+[ 6.097847] RSP: 0000:ffffc9000000b830 EFLAGS: 00010046
+[ 6.097847] RAX: 0000000000000045 RBX: ffff888001be02a0 RCX: 0000000000000008
+[ 6.097847] RDX: ffff888001be9000 RSI: ffff888001b2dd00 RDI: ffff888001be02a0
+[ 6.097847] RBP: ffffc9000000b860 R08: 0000000000000000 R09: 0000000000000000
+[ 6.097847] R10: 0000000000000001 R11: ffff888001b2a154 R12: ffff888001be0514
+[ 6.097847] R13: ffff888001be02a0 R14: 0000000000000008 R15: 0000000000000000
+[ 6.097847] FS: 0000000000000000(0000) GS:ffff888041d80000(0000) knlGS:0000000000000000
+[ 6.097847] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 6.097847] CR2: 0000000000000000 CR3: 0000000003030000 CR4: 00000000001026b0
+[ 6.097847] Call Trace:
+[ 6.097847] <TASK>
+[ 6.097847] ? eqbr_irq_mask+0x63/0x70
+[ 6.097847] ? no_action+0x10/0x10
+[ 6.097847] eqbr_irq_mask_ack+0x11/0x60
+
+In an other driver (drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c) the
+interrupt is not disabled here.
+
+To fix this, do not call the 'eqbr_irq_mask()' and 'eqbr_irq_ack()'
+function. Implement instead this directly without disabling the interrupts.
+
+Fixes: 52066a53bd11 ("pinctrl: equilibrium: Convert to immutable irq_chip")
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index 49c8232b525a9..ba1c867b7b891 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -64,8 +64,15 @@ static void eqbr_irq_ack(struct irq_data *d)
+
+ static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_irq_mask(d);
+- eqbr_irq_ack(d);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
++ unsigned int offset = irqd_to_hwirq(d);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&gctrl->lock, flags);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
++ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+--
+2.51.0
+
--- /dev/null
+From a971e12467f053b65d3c2a8eda41f9b3c6b88b5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:45 +0100
+Subject: pinctrl: equilibrium: rename irq_chip function callbacks
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 1f96b84835eafb3e6f366dc3a66c0e69504cec9d ]
+
+Renaming of the irq_chip callback functions to improve clarity.
+
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Stable-dep-of: 3e00b1b332e5 ("pinctrl: equilibrium: fix warning trace on load")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index 48b55c5bf8d4f..49c8232b525a9 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -23,7 +23,7 @@
+ #define PIN_NAME_LEN 10
+ #define PAD_REG_OFF 0x100
+
+-static void eqbr_gpio_disable_irq(struct irq_data *d)
++static void eqbr_irq_mask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -36,7 +36,7 @@ static void eqbr_gpio_disable_irq(struct irq_data *d)
+ gpiochip_disable_irq(gc, offset);
+ }
+
+-static void eqbr_gpio_enable_irq(struct irq_data *d)
++static void eqbr_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -50,7 +50,7 @@ static void eqbr_gpio_enable_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_ack_irq(struct irq_data *d)
++static void eqbr_irq_ack(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -62,10 +62,10 @@ static void eqbr_gpio_ack_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
++static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_gpio_disable_irq(d);
+- eqbr_gpio_ack_irq(d);
++ eqbr_irq_mask(d);
++ eqbr_irq_ack(d);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+@@ -92,7 +92,7 @@ static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ return 0;
+ }
+
+-static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
++static int eqbr_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -166,11 +166,11 @@ static void eqbr_irq_handler(struct irq_desc *desc)
+
+ static const struct irq_chip eqbr_irq_chip = {
+ .name = "gpio_irq",
+- .irq_mask = eqbr_gpio_disable_irq,
+- .irq_unmask = eqbr_gpio_enable_irq,
+- .irq_ack = eqbr_gpio_ack_irq,
+- .irq_mask_ack = eqbr_gpio_mask_ack_irq,
+- .irq_set_type = eqbr_gpio_set_irq_type,
++ .irq_ack = eqbr_irq_ack,
++ .irq_mask = eqbr_irq_mask,
++ .irq_mask_ack = eqbr_irq_mask_ack,
++ .irq_unmask = eqbr_irq_unmask,
++ .irq_set_type = eqbr_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+--
+2.51.0
+
--- /dev/null
+From 3a34a742f23a1ebbf64e83b4d2078d10ada08eae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 16:17:07 +0000
+Subject: pinctrl: generic: move function to amlogic-am4 driver
+
+From: Conor Dooley <conor.dooley@microchip.com>
+
+[ Upstream commit 9c5a40f2922a5a6d6b42e7b3d4c8e253918c07a1 ]
+
+pinconf_generic_dt_node_to_map_pinmux() is not actually a generic
+function, and really belongs in the amlogic-am4 driver. There are three
+reasons why.
+
+First, and least, of the reasons is that this function behaves
+differently to the other dt_node_to_map functions in a way that is not
+obvious from a first glance. This difference stems for the devicetree
+properties that the function is intended for use with, and how they are
+typically used. The other generic dt_node_to_map functions support
+platforms where the pins, groups and functions are described statically
+in the driver and require a function that will produce a mapping from dt
+nodes to these pre-established descriptions. No other code in the driver
+is require to be executed at runtime.
+pinconf_generic_dt_node_to_map_pinmux() on the other hand is intended for
+use with the pinmux property, where groups and functions are determined
+entirely from the devicetree. As a result, there are no statically
+defined groups and functions in the driver for this function to perform
+a mapping to. Other drivers that use the pinmux property (e.g. the k1)
+their dt_node_to_map function creates the groups and functions as the
+devicetree is parsed. Instead of that,
+pinconf_generic_dt_node_to_map_pinmux() requires that the devicetree is
+parsed twice, once by it and once at probe, so that the driver
+dynamically creates the groups and functions before the dt_node_to_map
+callback is executed. I don't believe this double parsing requirement is
+how developers would expect this to work and is not necessary given
+there are drivers that do not have this behaviour.
+
+Secondly and thirdly, the function bakes in some assumptions that only
+really match the amlogic platform about how the devicetree is constructed.
+These, to me, are problematic for something that claims to be generic.
+
+The other dt_node_to_map implementations accept a being called for
+either a node containing pin configuration properties or a node
+containing child nodes that each contain the configuration properties.
+IOW, they support the following two devicetree configurations:
+
+| cfg {
+| label: group {
+| pinmux = <asjhdasjhlajskd>;
+| config-item1;
+| };
+| };
+
+| label: cfg {
+| group1 {
+| pinmux = <dsjhlfka>;
+| config-item2;
+| };
+| group2 {
+| pinmux = <lsdjhaf>;
+| config-item1;
+| };
+| };
+
+pinconf_generic_dt_node_to_map_pinmux() only supports the latter.
+
+The other assumption about devicetree configuration that the function
+makes is that the labeled node's parent is a "function node". The amlogic
+driver uses these "function nodes" to create the functions at probe
+time, and pinconf_generic_dt_node_to_map_pinmux() finds the parent of
+the node it is operating on's name as part of the mapping. IOW, it
+requires that the devicetree look like:
+
+| pinctrl@bla {
+|
+| func-foo {
+| label: group-default {
+| pinmuxes = <lskdf>;
+| };
+| };
+| };
+
+and couldn't be used if the nodes containing the pinmux and
+configuration properties are children of the pinctrl node itself:
+
+| pinctrl@bla {
+|
+| label: group-default {
+| pinmuxes = <lskdf>;
+| };
+| };
+
+These final two reasons are mainly why I believe this is not suitable as
+a generic function, and should be moved into the driver that is the sole
+user and originator of the "generic" function.
+
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Acked-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Stable-dep-of: a2539b92e4b7 ("pinctrl: meson: amlogic-a4: Fix device node reference leak in aml_dt_node_to_map_pinmux()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/meson/pinctrl-amlogic-a4.c | 71 +++++++++++++++++++++-
+ drivers/pinctrl/pinconf-generic.c | 69 ---------------------
+ include/linux/pinctrl/pinconf-generic.h | 5 --
+ 3 files changed, 70 insertions(+), 75 deletions(-)
+
+diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+index 40542edd557e0..dfa32b11555cd 100644
+--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
++++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+@@ -24,6 +24,7 @@
+ #include <dt-bindings/pinctrl/amlogic,pinctrl.h>
+
+ #include "../core.h"
++#include "../pinctrl-utils.h"
+ #include "../pinconf.h"
+
+ #define gpio_chip_to_bank(chip) \
+@@ -672,11 +673,79 @@ static void aml_pin_dbg_show(struct pinctrl_dev *pcdev, struct seq_file *s,
+ seq_printf(s, " %s", dev_name(pcdev->dev));
+ }
+
++static int aml_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
++ struct device_node *np,
++ struct pinctrl_map **map,
++ unsigned int *num_maps)
++{
++ struct device *dev = pctldev->dev;
++ struct device_node *pnode;
++ unsigned long *configs = NULL;
++ unsigned int num_configs = 0;
++ struct property *prop;
++ unsigned int reserved_maps;
++ int reserve;
++ int ret;
++
++ prop = of_find_property(np, "pinmux", NULL);
++ if (!prop) {
++ dev_info(dev, "Missing pinmux property\n");
++ return -ENOENT;
++ }
++
++ pnode = of_get_parent(np);
++ if (!pnode) {
++ dev_info(dev, "Missing function node\n");
++ return -EINVAL;
++ }
++
++ reserved_maps = 0;
++ *map = NULL;
++ *num_maps = 0;
++
++ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
++ &num_configs);
++ if (ret < 0) {
++ dev_err(dev, "%pOF: could not parse node property\n", np);
++ return ret;
++ }
++
++ reserve = 1;
++ if (num_configs)
++ reserve++;
++
++ ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps,
++ num_maps, reserve);
++ if (ret < 0)
++ goto exit;
++
++ ret = pinctrl_utils_add_map_mux(pctldev, map,
++ &reserved_maps, num_maps, np->name,
++ pnode->name);
++ if (ret < 0)
++ goto exit;
++
++ if (num_configs) {
++ ret = pinctrl_utils_add_map_configs(pctldev, map, &reserved_maps,
++ num_maps, np->name, configs,
++ num_configs, PIN_MAP_TYPE_CONFIGS_GROUP);
++ if (ret < 0)
++ goto exit;
++ }
++
++exit:
++ kfree(configs);
++ if (ret)
++ pinctrl_utils_free_map(pctldev, *map, *num_maps);
++
++ return ret;
++}
++
+ static const struct pinctrl_ops aml_pctrl_ops = {
+ .get_groups_count = aml_get_groups_count,
+ .get_group_name = aml_get_group_name,
+ .get_group_pins = aml_get_group_pins,
+- .dt_node_to_map = pinconf_generic_dt_node_to_map_pinmux,
++ .dt_node_to_map = aml_dt_node_to_map_pinmux,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .pin_dbg_show = aml_pin_dbg_show,
+ };
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 5de6ff62c69bd..cad29abe4050a 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -356,75 +356,6 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
+ }
+ EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_config);
+
+-int pinconf_generic_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+- struct device_node *np,
+- struct pinctrl_map **map,
+- unsigned int *num_maps)
+-{
+- struct device *dev = pctldev->dev;
+- struct device_node *pnode;
+- unsigned long *configs = NULL;
+- unsigned int num_configs = 0;
+- struct property *prop;
+- unsigned int reserved_maps;
+- int reserve;
+- int ret;
+-
+- prop = of_find_property(np, "pinmux", NULL);
+- if (!prop) {
+- dev_info(dev, "Missing pinmux property\n");
+- return -ENOENT;
+- }
+-
+- pnode = of_get_parent(np);
+- if (!pnode) {
+- dev_info(dev, "Missing function node\n");
+- return -EINVAL;
+- }
+-
+- reserved_maps = 0;
+- *map = NULL;
+- *num_maps = 0;
+-
+- ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
+- &num_configs);
+- if (ret < 0) {
+- dev_err(dev, "%pOF: could not parse node property\n", np);
+- return ret;
+- }
+-
+- reserve = 1;
+- if (num_configs)
+- reserve++;
+-
+- ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps,
+- num_maps, reserve);
+- if (ret < 0)
+- goto exit;
+-
+- ret = pinctrl_utils_add_map_mux(pctldev, map,
+- &reserved_maps, num_maps, np->name,
+- pnode->name);
+- if (ret < 0)
+- goto exit;
+-
+- if (num_configs) {
+- ret = pinctrl_utils_add_map_configs(pctldev, map, &reserved_maps,
+- num_maps, np->name, configs,
+- num_configs, PIN_MAP_TYPE_CONFIGS_GROUP);
+- if (ret < 0)
+- goto exit;
+- }
+-
+-exit:
+- kfree(configs);
+- if (ret)
+- pinctrl_utils_free_map(pctldev, *map, *num_maps);
+-
+- return ret;
+-}
+-EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map_pinmux);
+-
+ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np, struct pinctrl_map **map,
+ unsigned int *reserved_maps, unsigned int *num_maps,
+diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
+index d9245ecec71dc..4e22aa44bcd4c 100644
+--- a/include/linux/pinctrl/pinconf-generic.h
++++ b/include/linux/pinctrl/pinconf-generic.h
+@@ -235,9 +235,4 @@ static inline int pinconf_generic_dt_node_to_map_all(struct pinctrl_dev *pctldev
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_INVALID);
+ }
+-
+-int pinconf_generic_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+- struct device_node *np,
+- struct pinctrl_map **map,
+- unsigned int *num_maps);
+ #endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */
+--
+2.51.0
+
--- /dev/null
+From 285558e473352ef0aac311d62f6d7d854092ddea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Feb 2026 00:51:22 +0800
+Subject: pinctrl: meson: amlogic-a4: Fix device node reference leak in
+ aml_dt_node_to_map_pinmux()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit a2539b92e4b791c1ba482930b5e51b1591975461 ]
+
+The of_get_parent() function returns a device_node with an incremented
+reference count.
+
+Use the __free(device_node) cleanup attribute to ensure of_node_put()
+is automatically called when pnode goes out of scope, fixing a
+reference leak.
+
+Fixes: 6e9be3abb78c ("pinctrl: Add driver support for Amlogic SoCs")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/meson/pinctrl-amlogic-a4.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+index dfa32b11555cd..e2293a872dcb7 100644
+--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
++++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+@@ -679,7 +679,6 @@ static int aml_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+ unsigned int *num_maps)
+ {
+ struct device *dev = pctldev->dev;
+- struct device_node *pnode;
+ unsigned long *configs = NULL;
+ unsigned int num_configs = 0;
+ struct property *prop;
+@@ -693,7 +692,7 @@ static int aml_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+ return -ENOENT;
+ }
+
+- pnode = of_get_parent(np);
++ struct device_node *pnode __free(device_node) = of_get_parent(np);
+ if (!pnode) {
+ dev_info(dev, "Missing function node\n");
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From 4c929fb259c633027c6a349c0cf75d670d751e3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 09:33:44 +0530
+Subject: pinctrl: qcom: qcs615: Add missing dual edge GPIO IRQ errata flag
+
+From: Maulik Shah <maulik.shah@oss.qualcomm.com>
+
+[ Upstream commit 09a30b7a035f9f4ac918c8a9af89d70e43462152 ]
+
+Wakeup capable GPIOs uses PDC as parent IRQ chip and PDC on qcs615 do not
+support dual edge IRQs. Add missing wakeirq_dual_edge_errata configuration
+to enable workaround for dual edge GPIO IRQs.
+
+Fixes: b698f36a9d40 ("pinctrl: qcom: add the tlmm driver for QCS615 platform")
+Signed-off-by: Maulik Shah <maulik.shah@oss.qualcomm.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-qcs615.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
+index 4dfa820d4e77c..f1c827ddbfbfa 100644
+--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
++++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
+@@ -1067,6 +1067,7 @@ static const struct msm_pinctrl_soc_data qcs615_tlmm = {
+ .ntiles = ARRAY_SIZE(qcs615_tiles),
+ .wakeirq_map = qcs615_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(qcs615_pdc_map),
++ .wakeirq_dual_edge_errata = true,
+ };
+
+ static const struct of_device_id qcs615_tlmm_of_match[] = {
+--
+2.51.0
+
--- /dev/null
+From 175e9e29de4ccbdcd831033f93023ac546d307fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 01:01:29 +0000
+Subject: platform/x86: thinkpad_acpi: Fix errors reading battery thresholds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Teh <jonathan.teh@outlook.com>
+
+[ Upstream commit 53e977b1d50c46f2c4ec3865cd13a822f58ad3cd ]
+
+Check whether the battery supports the relevant charge threshold before
+reading the value to silence these errors:
+
+thinkpad_acpi: acpi_evalf(BCTG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCTG: evaluate failed
+thinkpad_acpi: acpi_evalf(BCSG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCSG: evaluate failed
+
+when reading the charge thresholds via sysfs on platforms that do not
+support them such as the ThinkPad T400.
+
+Fixes: 2801b9683f74 ("thinkpad_acpi: Add support for battery thresholds")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=202619
+Signed-off-by: Jonathan Teh <jonathan.teh@outlook.com>
+Reviewed-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://patch.msgid.link/MI0P293MB01967B206E1CA6F337EBFB12926CA@MI0P293MB0196.ITAP293.PROD.OUTLOOK.COM
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/lenovo/thinkpad_acpi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/lenovo/thinkpad_acpi.c b/drivers/platform/x86/lenovo/thinkpad_acpi.c
+index cc19fe520ea96..075543cd0e77e 100644
+--- a/drivers/platform/x86/lenovo/thinkpad_acpi.c
++++ b/drivers/platform/x86/lenovo/thinkpad_acpi.c
+@@ -9525,14 +9525,16 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
+ {
+ switch (what) {
+ case THRESHOLD_START:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
++ if (!battery_info.batteries[battery].start_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery)))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
++ if (!battery_info.batteries[battery].stop_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery)))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+--
+2.51.0
+
--- /dev/null
+From 076bda62f3359ac4e33949f47868c9ed9f24f4d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 19:37:56 +0900
+Subject: rust: kunit: fix warning when !CONFIG_PRINTK
+
+From: Alexandre Courbot <acourbot@nvidia.com>
+
+[ Upstream commit 7dd34dfc8dfa92a7244242098110388367996ac3 ]
+
+If `CONFIG_PRINTK` is not set, then the following warnings are issued
+during build:
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:16:12
+ |
+ 16 | pub fn err(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+ |
+ = note: `#[warn(unused_variables)]` (part of `#[warn(unused)]`) on by default
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:32:13
+ |
+ 32 | pub fn info(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+
+Fix this by adding a no-op assignment using `args` when `CONFIG_PRINTK`
+is not set.
+
+Fixes: a66d733da801 ("rust: support running Rust documentation tests as KUnit ones")
+Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/kunit.rs | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
+index 79436509dd73d..8907b6f89ece5 100644
+--- a/rust/kernel/kunit.rs
++++ b/rust/kernel/kunit.rs
+@@ -17,6 +17,10 @@
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn err(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+@@ -33,6 +37,10 @@ pub fn err(args: fmt::Arguments<'_>) {
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn info(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+--
+2.51.0
+
--- /dev/null
+From e7115dacfc2f390c8935304b9a62f9883baa4527 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 09:36:37 +0800
+Subject: selftest/arm64: Fix sve2p1_sigill() to hwcap test
+
+From: Yifan Wu <wuyifan50@huawei.com>
+
+[ Upstream commit d87c828daa7ead9763416f75cc416496969cf1dc ]
+
+The FEAT_SVE2p1 is indicated by ID_AA64ZFR0_EL1.SVEver. However,
+the BFADD requires the FEAT_SVE_B16B16, which is indicated by
+ID_AA64ZFR0_EL1.B16B16. This could cause the test to incorrectly
+fail on a CPU that supports FEAT_SVE2.1 but not FEAT_SVE_B16B16.
+
+LD1Q Gather load quadwords which is decoded from SVE encodings and
+implied by FEAT_SVE2p1.
+
+Fixes: c5195b027d29 ("kselftest/arm64: Add SVE 2.1 to hwcap test")
+Signed-off-by: Yifan Wu <wuyifan50@huawei.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/arm64/abi/hwcap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
+index 3b96d090c5ebe..09a326f375e91 100644
+--- a/tools/testing/selftests/arm64/abi/hwcap.c
++++ b/tools/testing/selftests/arm64/abi/hwcap.c
+@@ -473,8 +473,8 @@ static void sve2_sigill(void)
+
+ static void sve2p1_sigill(void)
+ {
+- /* BFADD Z0.H, Z0.H, Z0.H */
+- asm volatile(".inst 0x65000000" : : : "z0");
++ /* LD1Q {Z0.Q}, P0/Z, [Z0.D, X0] */
++ asm volatile(".inst 0xC400A000" : : : "z0");
+ }
+
+ static void sve2p2_sigill(void)
+--
+2.51.0
+
--- /dev/null
+From d9291f2be9db9d929e1c420d462b84768e9b58f0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 19:14:50 +0800
+Subject: selftests/harness: order TEST_F and XFAIL_ADD constructors
+
+From: Sun Jian <sun.jian.kdev@gmail.com>
+
+[ Upstream commit 6be2681514261324c8ee8a1c6f76cefdf700220f ]
+
+TEST_F() allocates and registers its struct __test_metadata via mmap()
+inside its constructor, and only then assigns the
+_##fixture_##test##_object pointer.
+
+XFAIL_ADD() runs in a constructor too and reads
+_##fixture_##test##_object to initialize xfail->test. If XFAIL_ADD runs
+first, xfail->test can be NULL and the expected failure will be reported
+as FAIL.
+
+Use constructor priorities to ensure TEST_F registration runs before
+XFAIL_ADD, without adding extra state or runtime lookups.
+
+Fixes: 2709473c9386 ("selftests: kselftest_harness: support using xfail")
+Signed-off-by: Sun Jian <sun.jian.kdev@gmail.com>
+Link: https://patch.msgid.link/20260225111451.347923-1-sun.jian.kdev@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kselftest_harness.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 159cd6729af33..fe162cbfc0912 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -76,6 +76,9 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
+ memset(s, c, n);
+ }
+
++#define KSELFTEST_PRIO_TEST_F 20000
++#define KSELFTEST_PRIO_XFAIL 20001
++
+ #define TEST_TIMEOUT_DEFAULT 30
+
+ /* Utilities exposed to the test definitions */
+@@ -465,7 +468,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
+ fixture_name##_teardown(_metadata, self, variant); \
+ } \
+ static struct __test_metadata *_##fixture_name##_##test_name##_object; \
+- static void __attribute__((constructor)) \
++ static void __attribute__((constructor(KSELFTEST_PRIO_TEST_F))) \
+ _register_##fixture_name##_##test_name(void) \
+ { \
+ struct __test_metadata *object = mmap(NULL, sizeof(*object), \
+@@ -880,7 +883,7 @@ struct __test_xfail {
+ .fixture = &_##fixture_name##_fixture_object, \
+ .variant = &_##fixture_name##_##variant_name##_object, \
+ }; \
+- static void __attribute__((constructor)) \
++ static void __attribute__((constructor(KSELFTEST_PRIO_XFAIL))) \
+ _register_##fixture_name##_##variant_name##_##test_name##_xfail(void) \
+ { \
+ _##fixture_name##_##variant_name##_##test_name##_xfail.test = \
+--
+2.51.0
+
net-stmmac-remove-support-for-lpi_intr_o.patch
bluetooth-fix-cis-host-feature-condition.patch
drm-amd-display-use-gfp_atomic-in-dc_create_stream_for_sink.patch
+nvme-fix-admin-queue-leak-on-controller-reset.patch
+hwmon-aht10-add-support-for-dht20.patch
+hwmon-aht10-fix-initialization-commands-for-aht20.patch
+pinctrl-equilibrium-rename-irq_chip-function-callbac.patch
+pinctrl-equilibrium-fix-warning-trace-on-load.patch
+pinctrl-qcom-qcs615-add-missing-dual-edge-gpio-irq-e.patch
+platform-x86-thinkpad_acpi-fix-errors-reading-batter.patch
+module-remove-duplicate-freeing-of-lockdep-classes.patch
+hid-multitouch-new-class-mt_cls_egalax_p80h84.patch
+pinctrl-generic-move-function-to-amlogic-am4-driver.patch
+pinctrl-meson-amlogic-a4-fix-device-node-reference-l.patch
+pinctrl-cirrus-cs42l43-fix-double-put-in-cs42l43_pin.patch
+hwmon-it87-check-the-it87_lock-return-value.patch
+idpf-increment-completion-queue-next_to_clean-in-sw-.patch
+idpf-change-irq-naming-to-match-netdev-and-ethtool-q.patch
+idpf-fix-flow-rule-delete-failure-due-to-invalid-val.patch
+ice-recap-the-vsi-and-qos-info-after-rebuild.patch
+i40e-fix-preempt-count-leak-in-napi-poll-tracepoint.patch
+e1000e-clear-dpg_en-after-reset-to-avoid-autonomous-.patch
+drm-solomon-fix-page-start-when-updating-rectangle-i.patch
+netfs-fix-unbuffered-dio-writes-to-dispatch-subreque.patch
+nvmet-fcloop-check-remoteport-port_state-before-call.patch
+net-annotate-data-races-around-sk-sk_-data_ready-wri.patch
+net-ethernet-ti-am65-cpsw-nuss-cpsw-ale-fix-multicas.patch
+nvme-multipath-fix-leak-on-try_module_get-failure.patch
+inet-annotate-data-races-around-isk-inet_num.patch
+udp-unhash-auto-bound-connected-sk-from-4-tuple-hash.patch
+tcp-give-up-on-stronger-sk_rcvbuf-checks-for-now.patch
+xsk-fix-fragment-node-deletion-to-prevent-buffer-lea.patch
+xsk-fix-zero-copy-af_xdp-fragment-drop.patch
+dpaa2-switch-fix-interrupt-storm-after-receiving-bad.patch
+atm-lec-fix-null-ptr-deref-in-lec_arp_clear_vccs.patch
+net-ti-icssg-prueth-fix-ping-failure-after-offload-m.patch
+amd-xgbe-fix-mac_tcr_ss-register-width-for-2.5g-and-.patch
+can-bcm-fix-locking-for-bcm_op-runtime-updates.patch
+can-mcp251x-fix-deadlock-in-error-path-of-mcp251x_op.patch
+wifi-rsi-don-t-default-to-eopnotsupp-in-rsi_mac80211.patch
+drm-syncobj-fix-handle-fd-ioctls-with-dirty-stack.patch
+drm-xe-do-not-preempt-fence-signaling-cs-instruction.patch
+drm-xe-configfs-free-ctx_restore_mid_bb-in-release.patch
+rust-kunit-fix-warning-when-config_printk.patch
+kunit-tool-copy-caller-args-in-run_kernel-to-prevent.patch
+net-dsa-realtek-rtl8365mb-fix-rtl8365mb_phy_ocp_writ.patch
+bpf-bonding-reject-vlan-srcmac-xmit_hash_policy-chan.patch
+octeon_ep-relocate-counter-updates-before-napi.patch
+octeon_ep-avoid-compiler-and-iq-oq-reordering.patch
+octeon_ep_vf-relocate-counter-updates-before-napi.patch
+octeon_ep_vf-avoid-compiler-and-iq-oq-reordering.patch
+wifi-cw1200-fix-locking-in-error-paths.patch
+wifi-wlcore-fix-a-locking-bug.patch
+wifi-mt76-mt7996-fix-possible-oob-access-in-mt7996_m.patch
+wifi-mt76-mt7925-fix-possible-oob-access-in-mt7925_m.patch
+wifi-mt76-fix-possible-oob-access-in-mt76_connac2_ma.patch
+indirect_call_wrapper-do-not-reevaluate-function-poi.patch
+net-rds-fix-circular-locking-dependency-in-rds_tcp_t.patch
+xen-acpi-processor-fix-_cst-detection-using-undersiz.patch
+ice-fix-adding-aq-lldp-filter-for-vf.patch
+libie-don-t-unroll-if-fwlog-isn-t-supported.patch
+iavf-fix-netdev-max_mtu-to-respect-actual-hardware-l.patch
+igb-fix-trigger-of-incorrect-irq-in-igb_xsk_wakeup.patch
+bpf-fix-a-uaf-issue-in-bpf_trampoline_link_cgroup_sh.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch-3780
+ipv6-fix-null-pointer-deref-in-ip6_rt_get_dev_rcu.patch
+net-ipv4-fix-arm64-alignment-fault-in-multipath-hash.patch
+amd-xgbe-fix-sleep-while-atomic-on-suspend-resume.patch
+drm-sched-fix-kernel-doc-warning-for-drm_sched_job_d.patch
+i2c-i801-revert-i2c-i801-replace-acpi_lock-with-i2c-.patch
+drm-xe-gsc-fix-gsc-proxy-cleanup-on-early-initializa.patch
+drm-xe-reg_sr-fix-leak-on-xa_store-failure.patch
+nvme-reject-invalid-pr_read_keys-num_keys-values.patch
+nvme-fix-memory-allocation-in-nvme_pr_read_keys.patch
+timekeeping-fix-timex-status-validation-for-auxiliar.patch
+hwmon-max6639-fix-inverted-polarity.patch
+net-sched-avoid-qdisc_reset_all_tx_gt-vs-dequeue-rac.patch
+tcp-secure_seq-add-back-ports-to-ts-offset.patch
+net-nfc-nci-fix-zero-length-proprietary-notification.patch
+net_sched-sch_fq-clear-q-band_pkt_count-in-fq_reset.patch
+net-devmem-use-read_once-write_once-on-binding-dev.patch
+nfc-nci-free-skb-on-nci_transceive-early-error-paths.patch
+nfc-nci-complete-pending-data-exchange-on-device-clo.patch
+nfc-nci-clear-nci_data_exchange-before-calling-compl.patch
+nfc-rawsock-cancel-tx_work-before-socket-teardown.patch
+net-stmmac-fix-error-handling-in-vlan-add-and-delete.patch
+net-stmmac-improve-double-vlan-handling.patch
+net-stmmac-fix-vlan-hw-state-restore.patch
+net-stmmac-defer-vlan-hw-configuration-when-interfac.patch
+block-use-trylock-to-avoid-lockdep-circular-dependen.patch
+net-provide-a-preempt_rt-specific-check-for-netdev_q.patch
+netfilter-nf_tables-unconditionally-bump-set-nelems-.patch
+netfilter-nf_tables-clone-set-on-flush-only.patch
+netfilter-nft_set_pipapo-split-gc-into-unlink-and-re.patch
+net-ethernet-mtk_eth_soc-reset-prog-ptr-to-old_prog-.patch
+kselftest-harness-use-helper-to-avoid-zero-size-mems.patch
+selftests-harness-order-test_f-and-xfail_add-constru.patch
+net-bridge-fix-nd_tbl-null-dereference-when-ipv6-is-.patch
+net-vxlan-fix-nd_tbl-null-dereference-when-ipv6-is-d.patch
+net-ipv6-fix-panic-when-ipv4-route-references-loopba.patch
+net-sched-act_ife-fix-metalist-update-behavior.patch
+xdp-use-modulo-operation-to-calculate-xdp-frag-tailr.patch
+xsk-introduce-helper-to-determine-rxq-frag_size.patch
+i40e-fix-registering-xdp-rxq-info.patch
+i40e-use-xdp.frame_sz-as-xdp-rxq-info-frag_size.patch
+net-enetc-use-truesize-as-xdp-rxq-info-frag_size.patch
+xdp-produce-a-warning-when-calculated-tailroom-is-ne.patch
+ata-libata-eh-fix-detection-of-deferred-qc-timeouts.patch
+selftest-arm64-fix-sve2p1_sigill-to-hwcap-test.patch
+tracing-add-null-pointer-check-to-trigger_data_free.patch
+bpf-collect-only-live-registers-in-linked-regs.patch
--- /dev/null
+From 551fc747b1e6ddc20afff17a6f305373245292e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:11 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ smb2_compound_op()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 12c43a062acb0ac137fc2a4a106d4d084b8c5416 ]
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: 6a5f6592a0b6 ("SMB311: Add support for query info using posix extensions (level 100)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2inode.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 5c25f25aa2efb..a5f9f73ac91b9 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -322,7 +322,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ cfile->fid.volatile_fid,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ } else {
+@@ -332,7 +332,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ }
+--
+2.51.0
+
--- /dev/null
+From a0afd138f34a29709ce931493a0f9b907aeeb844 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:12 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ SMB311_posix_query_info()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 9621b996e4db1dbc2b3dc5d5910b7d6179397320 ]
+
+SMB311_posix_query_info() is currently unused, but it may still be used in
+some stable versions, so these changes are submitted as a separate patch.
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: b1bc1874b885 ("smb311: Add support for SMB311 query info (non-compounded)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2pdu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 58238e65c7edf..309e2fcabc087 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3915,7 +3915,7 @@ int
+ SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
+ {
+- size_t output_len = sizeof(struct smb311_posix_qinfo *) +
++ size_t output_len = sizeof(struct smb311_posix_qinfo) +
+ (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
+ *plen = 0;
+
+--
+2.51.0
+
--- /dev/null
+From ba224ff1750823ff19696bad37d5dbacb75bc607 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:33:59 -0800
+Subject: tcp: give up on stronger sk_rcvbuf checks (for now)
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 026dfef287c07f37d4d4eef7a0b5a4bfdb29b32d ]
+
+We hit another corner case which leads to TcpExtTCPRcvQDrop
+
+Connections which send RPCs in the 20-80kB range over loopback
+experience spurious drops. The exact conditions for most of
+the drops I investigated are that:
+ - socket exchanged >1MB of data so its not completely fresh
+ - rcvbuf is around 128kB (default, hasn't grown)
+ - there is ~60kB of data in rcvq
+ - skb > 64kB arrives
+
+The sum of skb->len (!) of both of the skbs (the one already
+in rcvq and the arriving one) is larger than rwnd.
+My suspicion is that this happens because __tcp_select_window()
+rounds the rwnd up to (1 << wscale) if less than half of
+the rwnd has been consumed.
+
+Eric suggests that given the number of Fixes we already have
+pointing to 1d2fbaad7cd8 it's probably time to give up on it,
+until a bigger revamp of rmem management.
+
+Also while we could risk tweaking the rwnd math, there are other
+drops on workloads I investigated, after the commit in question,
+not explained by this phenomenon.
+
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/20260225122355.585fd57b@kernel.org
+Fixes: 1d2fbaad7cd8 ("tcp: stronger sk_rcvbuf checks")
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20260227003359.2391017-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_input.c | 16 +---------------
+ 1 file changed, 1 insertion(+), 15 deletions(-)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 834cd37276d59..87e678903b977 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5100,25 +5100,11 @@ static void tcp_ofo_queue(struct sock *sk)
+ static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
+ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
+
+-/* Check if this incoming skb can be added to socket receive queues
+- * while satisfying sk->sk_rcvbuf limit.
+- *
+- * In theory we should use skb->truesize, but this can cause problems
+- * when applications use too small SO_RCVBUF values.
+- * When LRO / hw gro is used, the socket might have a high tp->scaling_ratio,
+- * allowing RWIN to be close to available space.
+- * Whenever the receive queue gets full, we can receive a small packet
+- * filling RWIN, but with a high skb->truesize, because most NIC use 4K page
+- * plus sk_buff metadata even when receiving less than 1500 bytes of payload.
+- *
+- * Note that we use skb->len to decide to accept or drop this packet,
+- * but sk->sk_rmem_alloc is the sum of all skb->truesize.
+- */
+ static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
+ {
+ unsigned int rmem = atomic_read(&sk->sk_rmem_alloc);
+
+- return rmem + skb->len <= sk->sk_rcvbuf;
++ return rmem <= sk->sk_rcvbuf;
+ }
+
+ static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
+--
+2.51.0
+
--- /dev/null
+From 692e77de22ffa12460654204a650dbaac7f66b70 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 20:55:27 +0000
+Subject: tcp: secure_seq: add back ports to TS offset
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 165573e41f2f66ef98940cf65f838b2cb575d9d1 ]
+
+This reverts 28ee1b746f49 ("secure_seq: downgrade to per-host timestamp offsets")
+
+tcp_tw_recycle went away in 2017.
+
+Zhouyan Deng reported off-path TCP source port leakage via
+SYN cookie side-channel that can be fixed in multiple ways.
+
+One of them is to bring back TCP ports in TS offset randomization.
+
+As a bonus, we perform a single siphash() computation
+to provide both an ISN and a TS offset.
+
+Fixes: 28ee1b746f49 ("secure_seq: downgrade to per-host timestamp offsets")
+Reported-by: Zhouyan Deng <dengzhouyan_nwpu@163.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20260302205527.1982836-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/secure_seq.h | 45 ++++++++++++++++++----
+ include/net/tcp.h | 6 ++-
+ net/core/secure_seq.c | 80 +++++++++++++++-------------------------
+ net/ipv4/syncookies.c | 11 ++++--
+ net/ipv4/tcp_input.c | 8 +++-
+ net/ipv4/tcp_ipv4.c | 37 +++++++++----------
+ net/ipv6/syncookies.c | 11 ++++--
+ net/ipv6/tcp_ipv6.c | 37 +++++++++----------
+ 8 files changed, 127 insertions(+), 108 deletions(-)
+
+diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
+index cddebafb9f779..6f996229167b3 100644
+--- a/include/net/secure_seq.h
++++ b/include/net/secure_seq.h
+@@ -5,16 +5,47 @@
+ #include <linux/types.h>
+
+ struct net;
++extern struct net init_net;
++
++union tcp_seq_and_ts_off {
++ struct {
++ u32 seq;
++ u32 ts_off;
++ };
++ u64 hash64;
++};
+
+ u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+ u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport);
+-u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+- __be16 sport, __be16 dport);
+-u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr);
+-u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+- __be16 sport, __be16 dport);
+-u32 secure_tcpv6_ts_off(const struct net *net,
+- const __be32 *saddr, const __be32 *daddr);
++union tcp_seq_and_ts_off
++secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr,
++ __be16 sport, __be16 dport);
++
++static inline u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
++ __be16 sport, __be16 dport)
++{
++ union tcp_seq_and_ts_off ts;
++
++ ts = secure_tcp_seq_and_ts_off(&init_net, saddr, daddr,
++ sport, dport);
++
++ return ts.seq;
++}
++
++union tcp_seq_and_ts_off
++secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr,
++ const __be32 *daddr,
++ __be16 sport, __be16 dport);
++
++static inline u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
++ __be16 sport, __be16 dport)
++{
++ union tcp_seq_and_ts_off ts;
++
++ ts = secure_tcpv6_seq_and_ts_off(&init_net, saddr, daddr,
++ sport, dport);
+
++ return ts.seq;
++}
+ #endif /* _NET_SECURE_SEQ */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index aa4d24c42a270..7647ed5c732c1 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -43,6 +43,7 @@
+ #include <net/dst.h>
+ #include <net/mptcp.h>
+ #include <net/xfrm.h>
++#include <net/secure_seq.h>
+
+ #include <linux/seq_file.h>
+ #include <linux/memcontrol.h>
+@@ -2435,8 +2436,9 @@ struct tcp_request_sock_ops {
+ struct flowi *fl,
+ struct request_sock *req,
+ u32 tw_isn);
+- u32 (*init_seq)(const struct sk_buff *skb);
+- u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
++ union tcp_seq_and_ts_off (*init_seq_and_ts_off)(
++ const struct net *net,
++ const struct sk_buff *skb);
+ int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
+ struct flowi *fl, struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+index 9a39656804513..6a6f2cda5aaef 100644
+--- a/net/core/secure_seq.c
++++ b/net/core/secure_seq.c
+@@ -20,7 +20,6 @@
+ #include <net/tcp.h>
+
+ static siphash_aligned_key_t net_secret;
+-static siphash_aligned_key_t ts_secret;
+
+ #define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ)
+
+@@ -28,11 +27,6 @@ static __always_inline void net_secret_init(void)
+ {
+ net_get_random_once(&net_secret, sizeof(net_secret));
+ }
+-
+-static __always_inline void ts_secret_init(void)
+-{
+- net_get_random_once(&ts_secret, sizeof(ts_secret));
+-}
+ #endif
+
+ #ifdef CONFIG_INET
+@@ -53,28 +47,9 @@ static u32 seq_scale(u32 seq)
+ #endif
+
+ #if IS_ENABLED(CONFIG_IPV6)
+-u32 secure_tcpv6_ts_off(const struct net *net,
+- const __be32 *saddr, const __be32 *daddr)
+-{
+- const struct {
+- struct in6_addr saddr;
+- struct in6_addr daddr;
+- } __aligned(SIPHASH_ALIGNMENT) combined = {
+- .saddr = *(struct in6_addr *)saddr,
+- .daddr = *(struct in6_addr *)daddr,
+- };
+-
+- if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
+- return 0;
+-
+- ts_secret_init();
+- return siphash(&combined, offsetofend(typeof(combined), daddr),
+- &ts_secret);
+-}
+-EXPORT_IPV6_MOD(secure_tcpv6_ts_off);
+-
+-u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+- __be16 sport, __be16 dport)
++union tcp_seq_and_ts_off
++secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr,
++ const __be32 *daddr, __be16 sport, __be16 dport)
+ {
+ const struct {
+ struct in6_addr saddr;
+@@ -87,14 +62,20 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+ .sport = sport,
+ .dport = dport
+ };
+- u32 hash;
++ union tcp_seq_and_ts_off st;
+
+ net_secret_init();
+- hash = siphash(&combined, offsetofend(typeof(combined), dport),
+- &net_secret);
+- return seq_scale(hash);
++
++ st.hash64 = siphash(&combined, offsetofend(typeof(combined), dport),
++ &net_secret);
++
++ if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
++ st.ts_off = 0;
++
++ st.seq = seq_scale(st.seq);
++ return st;
+ }
+-EXPORT_SYMBOL(secure_tcpv6_seq);
++EXPORT_SYMBOL(secure_tcpv6_seq_and_ts_off);
+
+ u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport)
+@@ -118,33 +99,30 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
+ #endif
+
+ #ifdef CONFIG_INET
+-u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr)
+-{
+- if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
+- return 0;
+-
+- ts_secret_init();
+- return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+- &ts_secret);
+-}
+-
+ /* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
+ * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
+ * it would be easy enough to have the former function use siphash_4u32, passing
+ * the arguments as separate u32.
+ */
+-u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+- __be16 sport, __be16 dport)
++union tcp_seq_and_ts_off
++secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr,
++ __be16 sport, __be16 dport)
+ {
+- u32 hash;
++ u32 ports = (__force u32)sport << 16 | (__force u32)dport;
++ union tcp_seq_and_ts_off st;
+
+ net_secret_init();
+- hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
+- (__force u32)sport << 16 | (__force u32)dport,
+- &net_secret);
+- return seq_scale(hash);
++
++ st.hash64 = siphash_3u32((__force u32)saddr, (__force u32)daddr,
++ ports, &net_secret);
++
++ if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
++ st.ts_off = 0;
++
++ st.seq = seq_scale(st.seq);
++ return st;
+ }
+-EXPORT_SYMBOL_GPL(secure_tcp_seq);
++EXPORT_SYMBOL_GPL(secure_tcp_seq_and_ts_off);
+
+ u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
+ {
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 061751aabc8e1..fc3affd9c8014 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -378,9 +378,14 @@ static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk,
+ tcp_parse_options(net, skb, &tcp_opt, 0, NULL);
+
+ if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
+- tsoff = secure_tcp_ts_off(net,
+- ip_hdr(skb)->daddr,
+- ip_hdr(skb)->saddr);
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcp_seq_and_ts_off(net,
++ ip_hdr(skb)->daddr,
++ ip_hdr(skb)->saddr,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
++ tsoff = st.ts_off;
+ tcp_opt.rcv_tsecr -= tsoff;
+ }
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 87e678903b977..96486eea26724 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -7385,6 +7385,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ const struct tcp_sock *tp = tcp_sk(sk);
+ struct net *net = sock_net(sk);
+ struct sock *fastopen_sk = NULL;
++ union tcp_seq_and_ts_off st;
+ struct request_sock *req;
+ bool want_cookie = false;
+ struct dst_entry *dst;
+@@ -7454,9 +7455,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ if (!dst)
+ goto drop_and_free;
+
++ if (tmp_opt.tstamp_ok || (!want_cookie && !isn))
++ st = af_ops->init_seq_and_ts_off(net, skb);
++
+ if (tmp_opt.tstamp_ok) {
+ tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
+- tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
++ tcp_rsk(req)->ts_off = st.ts_off;
+ }
+ if (!want_cookie && !isn) {
+ int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
+@@ -7478,7 +7482,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ goto drop_and_release;
+ }
+
+- isn = af_ops->init_seq(skb);
++ isn = st.seq;
+ }
+
+ tcp_ecn_create_request(req, skb, sk, dst);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index 0fbf13dcf3c2b..75a11d7feb260 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -104,17 +104,14 @@ static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
+
+ static DEFINE_MUTEX(tcp_exit_batch_mutex);
+
+-static u32 tcp_v4_init_seq(const struct sk_buff *skb)
++static union tcp_seq_and_ts_off
++tcp_v4_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb)
+ {
+- return secure_tcp_seq(ip_hdr(skb)->daddr,
+- ip_hdr(skb)->saddr,
+- tcp_hdr(skb)->dest,
+- tcp_hdr(skb)->source);
+-}
+-
+-static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
+-{
+- return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
++ return secure_tcp_seq_and_ts_off(net,
++ ip_hdr(skb)->daddr,
++ ip_hdr(skb)->saddr,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
+ }
+
+ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+@@ -326,15 +323,16 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
+ rt = NULL;
+
+ if (likely(!tp->repair)) {
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcp_seq_and_ts_off(net,
++ inet->inet_saddr,
++ inet->inet_daddr,
++ inet->inet_sport,
++ usin->sin_port);
+ if (!tp->write_seq)
+- WRITE_ONCE(tp->write_seq,
+- secure_tcp_seq(inet->inet_saddr,
+- inet->inet_daddr,
+- inet->inet_sport,
+- usin->sin_port));
+- WRITE_ONCE(tp->tsoffset,
+- secure_tcp_ts_off(net, inet->inet_saddr,
+- inet->inet_daddr));
++ WRITE_ONCE(tp->write_seq, st.seq);
++ WRITE_ONCE(tp->tsoffset, st.ts_off);
+ }
+
+ atomic_set(&inet->inet_id, get_random_u16());
+@@ -1727,8 +1725,7 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+ .cookie_init_seq = cookie_v4_init_sequence,
+ #endif
+ .route_req = tcp_v4_route_req,
+- .init_seq = tcp_v4_init_seq,
+- .init_ts_off = tcp_v4_init_ts_off,
++ .init_seq_and_ts_off = tcp_v4_init_seq_and_ts_off,
+ .send_synack = tcp_v4_send_synack,
+ };
+
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 7e007f013ec82..4f6f0d751d6c5 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -151,9 +151,14 @@ static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk,
+ tcp_parse_options(net, skb, &tcp_opt, 0, NULL);
+
+ if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
+- tsoff = secure_tcpv6_ts_off(net,
+- ipv6_hdr(skb)->daddr.s6_addr32,
+- ipv6_hdr(skb)->saddr.s6_addr32);
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcpv6_seq_and_ts_off(net,
++ ipv6_hdr(skb)->daddr.s6_addr32,
++ ipv6_hdr(skb)->saddr.s6_addr32,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
++ tsoff = st.ts_off;
+ tcp_opt.rcv_tsecr -= tsoff;
+ }
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 5faa46f4cf9a2..90afe81bc8e5d 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -105,18 +105,14 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ }
+ }
+
+-static u32 tcp_v6_init_seq(const struct sk_buff *skb)
++static union tcp_seq_and_ts_off
++tcp_v6_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb)
+ {
+- return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
+- ipv6_hdr(skb)->saddr.s6_addr32,
+- tcp_hdr(skb)->dest,
+- tcp_hdr(skb)->source);
+-}
+-
+-static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
+-{
+- return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
+- ipv6_hdr(skb)->saddr.s6_addr32);
++ return secure_tcpv6_seq_and_ts_off(net,
++ ipv6_hdr(skb)->daddr.s6_addr32,
++ ipv6_hdr(skb)->saddr.s6_addr32,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
+ }
+
+ static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
+@@ -319,14 +315,16 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
+ sk_set_txhash(sk);
+
+ if (likely(!tp->repair)) {
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcpv6_seq_and_ts_off(net,
++ np->saddr.s6_addr32,
++ sk->sk_v6_daddr.s6_addr32,
++ inet->inet_sport,
++ inet->inet_dport);
+ if (!tp->write_seq)
+- WRITE_ONCE(tp->write_seq,
+- secure_tcpv6_seq(np->saddr.s6_addr32,
+- sk->sk_v6_daddr.s6_addr32,
+- inet->inet_sport,
+- inet->inet_dport));
+- tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
+- sk->sk_v6_daddr.s6_addr32);
++ WRITE_ONCE(tp->write_seq, st.seq);
++ tp->tsoffset = st.ts_off;
+ }
+
+ if (tcp_fastopen_defer_connect(sk, &err))
+@@ -859,8 +857,7 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+ .cookie_init_seq = cookie_v6_init_sequence,
+ #endif
+ .route_req = tcp_v6_route_req,
+- .init_seq = tcp_v6_init_seq,
+- .init_ts_off = tcp_v6_init_ts_off,
++ .init_seq_and_ts_off = tcp_v6_init_seq_and_ts_off,
+ .send_synack = tcp_v6_send_synack,
+ };
+
+--
+2.51.0
+
--- /dev/null
+From 62af442214a19aadc59a1dc81eab966ed1907688 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:51:35 +0100
+Subject: timekeeping: Fix timex status validation for auxiliary clocks
+
+From: Miroslav Lichvar <mlichvar@redhat.com>
+
+[ Upstream commit e48a869957a70cc39b4090cd27c36a86f8db9b92 ]
+
+The timekeeping_validate_timex() function validates the timex status
+of an auxiliary system clock even when the status is not to be changed,
+which causes unexpected errors for applications that make read-only
+clock_adjtime() calls, or set some other timex fields, but without
+clearing the status field.
+
+Do the AUX-specific status validation only when the modes field contains
+ADJ_STATUS, i.e. the application is actually trying to change the
+status. This makes the AUX-specific clock_adjtime() behavior consistent
+with CLOCK_REALTIME.
+
+Fixes: 4eca49d0b621 ("timekeeping: Prepare do_adtimex() for auxiliary clocks")
+Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260225085231.276751-1-mlichvar@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/timekeeping.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 340fef20bdcd0..c7dcccc5f3d6b 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -2639,7 +2639,8 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc, bool aux
+
+ if (aux_clock) {
+ /* Auxiliary clocks are similar to TAI and do not have leap seconds */
+- if (txc->status & (STA_INS | STA_DEL))
++ if (txc->modes & ADJ_STATUS &&
++ txc->status & (STA_INS | STA_DEL))
+ return -EINVAL;
+
+ /* No TAI offset setting */
+@@ -2647,7 +2648,8 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc, bool aux
+ return -EINVAL;
+
+ /* No PPS support either */
+- if (txc->status & (STA_PPSFREQ | STA_PPSTIME))
++ if (txc->modes & ADJ_STATUS &&
++ txc->status & (STA_PPSFREQ | STA_PPSTIME))
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 67b64a7fb8364c2dd7656b33b6dee897bf331ef3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 11:33:39 -0800
+Subject: tracing: Add NULL pointer check to trigger_data_free()
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 457965c13f0837a289c9164b842d0860133f6274 ]
+
+If trigger_data_alloc() fails and returns NULL, event_hist_trigger_parse()
+jumps to the out_free error path. While kfree() safely handles a NULL
+pointer, trigger_data_free() does not. This causes a NULL pointer
+dereference in trigger_data_free() when evaluating
+data->cmd_ops->set_filter.
+
+Fix the problem by adding a NULL pointer check to trigger_data_free().
+
+The problem was found by an experimental code review agent based on
+gemini-3.1-pro while reviewing backports into v6.18.y.
+
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Link: https://patch.msgid.link/20260305193339.2810953-1-linux@roeck-us.net
+Fixes: 0550069cc25f ("tracing: Properly process error handling in event_hist_trigger_parse()")
+Assisted-by: Gemini:gemini-3.1-pro
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_events_trigger.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index cbfc306c0159a..98b8d5df15c79 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -19,6 +19,9 @@ static DEFINE_MUTEX(trigger_cmd_mutex);
+
+ void trigger_data_free(struct event_trigger_data *data)
+ {
++ if (!data)
++ return;
++
+ if (data->cmd_ops->set_filter)
+ data->cmd_ops->set_filter(NULL, data, NULL);
+
+--
+2.51.0
+
--- /dev/null
+From 02ea3ccb6a761c0dcf303462b8313d5fdd5f470f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 03:55:35 +0000
+Subject: udp: Unhash auto-bound connected sk from 4-tuple hash table when
+ disconnected.
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 6996a2d2d0a64808c19c98002aeb5d9d1b2df6a4 ]
+
+Let's say we bind() an UDP socket to the wildcard address with a
+non-zero port, connect() it to an address, and disconnect it from
+the address.
+
+bind() sets SOCK_BINDPORT_LOCK on sk->sk_userlocks (but not
+SOCK_BINDADDR_LOCK), and connect() calls udp_lib_hash4() to put
+the socket into the 4-tuple hash table.
+
+Then, __udp_disconnect() calls sk->sk_prot->rehash(sk).
+
+It computes a new hash based on the wildcard address and moves
+the socket to a new slot in the 4-tuple hash table, leaving a
+garbage in the chain that no packet hits.
+
+Let's remove such a socket from 4-tuple hash table when disconnected.
+
+Note that udp_sk(sk)->udp_portaddr_hash needs to be udpated after
+udp_hash4_dec(hslot2) in udp_unhash4().
+
+Fixes: 78c91ae2c6de ("ipv4/udp: Add 4-tuple hash for connected socket")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20260227035547.3321327-1-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/udp.c | 25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 777199fa9502f..024cb4f5978c1 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2266,7 +2266,6 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
+ udp_sk(sk)->udp_port_hash);
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ nhslot2 = udp_hashslot2(udptable, newhash);
+- udp_sk(sk)->udp_portaddr_hash = newhash;
+
+ if (hslot2 != nhslot2 ||
+ rcu_access_pointer(sk->sk_reuseport_cb)) {
+@@ -2300,19 +2299,25 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
+ if (udp_hashed4(sk)) {
+ spin_lock_bh(&hslot->lock);
+
+- udp_rehash4(udptable, sk, newhash4);
+- if (hslot2 != nhslot2) {
+- spin_lock(&hslot2->lock);
+- udp_hash4_dec(hslot2);
+- spin_unlock(&hslot2->lock);
+-
+- spin_lock(&nhslot2->lock);
+- udp_hash4_inc(nhslot2);
+- spin_unlock(&nhslot2->lock);
++ if (inet_rcv_saddr_any(sk)) {
++ udp_unhash4(udptable, sk);
++ } else {
++ udp_rehash4(udptable, sk, newhash4);
++ if (hslot2 != nhslot2) {
++ spin_lock(&hslot2->lock);
++ udp_hash4_dec(hslot2);
++ spin_unlock(&hslot2->lock);
++
++ spin_lock(&nhslot2->lock);
++ udp_hash4_inc(nhslot2);
++ spin_unlock(&nhslot2->lock);
++ }
+ }
+
+ spin_unlock_bh(&hslot->lock);
+ }
++
++ udp_sk(sk)->udp_portaddr_hash = newhash;
+ }
+ }
+ EXPORT_IPV6_MOD(udp_lib_rehash);
+--
+2.51.0
+
--- /dev/null
+From 7fd49b2d408b8cd084d548345faf7dac30f8d131 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:24 -0800
+Subject: wifi: cw1200: Fix locking in error paths
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit d98c24617a831e92e7224a07dcaed2dd0b02af96 ]
+
+cw1200_wow_suspend() must only return with priv->conf_mutex locked if it
+returns zero. This mutex must be unlocked if an error is returned. Add
+mutex_unlock() calls to the error paths from which that call is missing.
+This has been detected by the Clang thread-safety analyzer.
+
+Fixes: a910e4a94f69 ("cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-25-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/st/cw1200/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
+index 2002e3f9fe45b..b656afe65db07 100644
+--- a/drivers/net/wireless/st/cw1200/pm.c
++++ b/drivers/net/wireless/st/cw1200/pm.c
+@@ -264,12 +264,14 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EAGAIN;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From e2d00c0415c2675f5fadd969712a911f9ea67d5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:16 +0100
+Subject: wifi: mt76: Fix possible oob access in
+ mt76_connac2_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 4e10a730d1b511ff49723371ed6d694dd1b2c785 ]
+
+Check frame length before accessing the mgmt fields in
+mt76_connac2_mac_write_txwi_80211 in order to avoid a possible oob
+access.
+
+Fixes: 577dbc6c656d ("mt76: mt7915: enable offloading of sequence number assignment")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-3-b0f6d1ad4850@kernel.org
+[fix check to also cover mgmt->u.action.u.addba_req.capab,
+correct Fixes tag]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index 0db00efe88b0b..837bd0f136fa1 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -411,6 +411,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + 1 + 2 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+--
+2.51.0
+
--- /dev/null
+From 9d3a2950357bf26086db723eac5e0b6b0eba9cc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:15 +0100
+Subject: wifi: mt76: mt7925: Fix possible oob access in
+ mt7925_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit c41a9abd6ae31d130e8f332e7c8800c4c866234b ]
+
+Check frame length before accessing the mgmt fields in
+mt7925_mac_write_txwi_80211 in order to avoid a possible oob access.
+
+Fixes: c948b5da6bbec ("wifi: mt76: mt7925: add Mediatek Wi-Fi7 driver for mt7925 chips")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-2-b0f6d1ad4850@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+index 1e44e96f034e9..e880f3820a1ad 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+@@ -667,6 +667,7 @@ mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ tid = MT_TX_ADDBA;
+--
+2.51.0
+
--- /dev/null
+From 99c8970db641cd51011b8180947199ad4453a578 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:14 +0100
+Subject: wifi: mt76: mt7996: Fix possible oob access in
+ mt7996_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 60862846308627e9e15546bb647a00de44deb27b ]
+
+Check frame length before accessing the mgmt fields in
+mt7996_mac_write_txwi_80211 in order to avoid a possible oob access.
+
+Fixes: 98686cd21624c ("wifi: mt76: mt7996: add driver for MediaTek Wi-Fi 7 (802.11be) devices")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-1-b0f6d1ad4850@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 502136691a69e..0958961d2758e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -799,6 +799,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ if (is_mt7990(&dev->mt76))
+--
+2.51.0
+
--- /dev/null
+From f034569c0ebb81c959c76c29dad9ce4d0aa20cba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 17:28:04 +0100
+Subject: wifi: rsi: Don't default to -EOPNOTSUPP in rsi_mac80211_config
+
+From: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
+
+[ Upstream commit d973b1039ccde6b241b438d53297edce4de45b5c ]
+
+This triggers a WARN_ON in ieee80211_hw_conf_init and isn't the expected
+behavior from the driver - other drivers default to 0 too.
+
+Fixes: 0a44dfc07074 ("wifi: mac80211: simplify non-chanctx drivers")
+Signed-off-by: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
+Link: https://patch.msgid.link/20260221-rsi-config-ret-v1-1-9a8f805e2f31@puri.sm
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/rsi/rsi_91x_mac80211.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+index 8c8e074a3a705..c7ae8031436ae 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+@@ -668,7 +668,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+- int status = -EOPNOTSUPP;
++ int status = 0;
+
+ mutex_lock(&common->mutex);
+
+--
+2.51.0
+
--- /dev/null
+From 54f2b02563957d5a724a669d81447749cc07b13d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:25 -0800
+Subject: wifi: wlcore: Fix a locking bug
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 72c6df8f284b3a49812ce2ac136727ace70acc7c ]
+
+Make sure that wl->mutex is locked before it is unlocked. This has been
+detected by the Clang thread-safety analyzer.
+
+Fixes: 45aa7f071b06 ("wlcore: Use generic runtime pm calls for wowlan elp configuration")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-26-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 6116a8522d960..bdb06584d7e45 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -1880,6 +1880,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
+
++ mutex_lock(&wl->mutex);
++
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+@@ -1896,8 +1898,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+- mutex_lock(&wl->mutex);
+-
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+--
+2.51.0
+
--- /dev/null
+From 9a8b785bae6282210bd3946ac5c989a45954336a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:50 +0100
+Subject: xdp: produce a warning when calculated tailroom is negative
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8821e857759be9db3cde337ad328b71fe5c8a55f ]
+
+Many ethernet drivers report xdp Rx queue frag size as being the same as
+DMA write size. However, the only user of this field, namely
+bpf_xdp_frags_increase_tail(), clearly expects a truesize.
+
+Such difference leads to unspecific memory corruption issues under certain
+circumstances, e.g. in ixgbevf maximum DMA write size is 3 KB, so when
+running xskxceiver's XDP_ADJUST_TAIL_GROW_MULTI_BUFF, 6K packet fully uses
+all DMA-writable space in 2 buffers. This would be fine, if only
+rxq->frag_size was properly set to 4K, but value of 3K results in a
+negative tailroom, because there is a non-zero page offset.
+
+We are supposed to return -EINVAL and be done with it in such case, but due
+to tailroom being stored as an unsigned int, it is reported to be somewhere
+near UINT_MAX, resulting in a tail being grown, even if the requested
+offset is too much (it is around 2K in the abovementioned test). This later
+leads to all kinds of unspecific calltraces.
+
+[ 7340.337579] xskxceiver[1440]: segfault at 1da718 ip 00007f4161aeac9d sp 00007f41615a6a00 error 6
+[ 7340.338040] xskxceiver[1441]: segfault at 7f410000000b ip 00000000004042b5 sp 00007f415bffecf0 error 4
+[ 7340.338179] in libc.so.6[61c9d,7f4161aaf000+160000]
+[ 7340.339230] in xskxceiver[42b5,400000+69000]
+[ 7340.340300] likely on CPU 6 (core 0, socket 6)
+[ 7340.340302] Code: ff ff 01 e9 f4 fe ff ff 0f 1f 44 00 00 4c 39 f0 74 73 31 c0 ba 01 00 00 00 f0 0f b1 17 0f 85 ba 00 00 00 49 8b 87 88 00 00 00 <4c> 89 70 08 eb cc 0f 1f 44 00 00 48 8d bd f0 fe ff ff 89 85 ec fe
+[ 7340.340888] likely on CPU 3 (core 0, socket 3)
+[ 7340.345088] Code: 00 00 00 ba 00 00 00 00 be 00 00 00 00 89 c7 e8 31 ca ff ff 89 45 ec 8b 45 ec 85 c0 78 07 b8 00 00 00 00 eb 46 e8 0b c8 ff ff <8b> 00 83 f8 69 74 24 e8 ff c7 ff ff 8b 00 83 f8 0b 74 18 e8 f3 c7
+[ 7340.404334] Oops: general protection fault, probably for non-canonical address 0x6d255010bdffc: 0000 [#1] SMP NOPTI
+[ 7340.405972] CPU: 7 UID: 0 PID: 1439 Comm: xskxceiver Not tainted 6.19.0-rc1+ #21 PREEMPT(lazy)
+[ 7340.408006] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-5.fc42 04/01/2014
+[ 7340.409716] RIP: 0010:lookup_swap_cgroup_id+0x44/0x80
+[ 7340.410455] Code: 83 f8 1c 73 39 48 ba ff ff ff ff ff ff ff 03 48 8b 04 c5 20 55 fa bd 48 21 d1 48 89 ca 83 e1 01 48 d1 ea c1 e1 04 48 8d 04 90 <8b> 00 48 83 c4 10 d3 e8 c3 cc cc cc cc 31 c0 e9 98 b7 dd 00 48 89
+[ 7340.412787] RSP: 0018:ffffcc5c04f7f6d0 EFLAGS: 00010202
+[ 7340.413494] RAX: 0006d255010bdffc RBX: ffff891f477895a8 RCX: 0000000000000010
+[ 7340.414431] RDX: 0001c17e3fffffff RSI: 00fa070000000000 RDI: 000382fc7fffffff
+[ 7340.415354] RBP: 00fa070000000000 R08: ffffcc5c04f7f8f8 R09: ffffcc5c04f7f7d0
+[ 7340.416283] R10: ffff891f4c1a7000 R11: ffffcc5c04f7f9c8 R12: ffffcc5c04f7f7d0
+[ 7340.417218] R13: 03ffffffffffffff R14: 00fa06fffffffe00 R15: ffff891f47789500
+[ 7340.418229] FS: 0000000000000000(0000) GS:ffff891ffdfaa000(0000) knlGS:0000000000000000
+[ 7340.419489] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 7340.420286] CR2: 00007f415bfffd58 CR3: 0000000103f03002 CR4: 0000000000772ef0
+[ 7340.421237] PKRU: 55555554
+[ 7340.421623] Call Trace:
+[ 7340.421987] <TASK>
+[ 7340.422309] ? softleaf_from_pte+0x77/0xa0
+[ 7340.422855] swap_pte_batch+0xa7/0x290
+[ 7340.423363] zap_nonpresent_ptes.constprop.0.isra.0+0xd1/0x270
+[ 7340.424102] zap_pte_range+0x281/0x580
+[ 7340.424607] zap_pmd_range.isra.0+0xc9/0x240
+[ 7340.425177] unmap_page_range+0x24d/0x420
+[ 7340.425714] unmap_vmas+0xa1/0x180
+[ 7340.426185] exit_mmap+0xe1/0x3b0
+[ 7340.426644] __mmput+0x41/0x150
+[ 7340.427098] exit_mm+0xb1/0x110
+[ 7340.427539] do_exit+0x1b2/0x460
+[ 7340.427992] do_group_exit+0x2d/0xc0
+[ 7340.428477] get_signal+0x79d/0x7e0
+[ 7340.428957] arch_do_signal_or_restart+0x34/0x100
+[ 7340.429571] exit_to_user_mode_loop+0x8e/0x4c0
+[ 7340.430159] do_syscall_64+0x188/0x6b0
+[ 7340.430672] ? __do_sys_clone3+0xd9/0x120
+[ 7340.431212] ? switch_fpu_return+0x4e/0xd0
+[ 7340.431761] ? arch_exit_to_user_mode_prepare.isra.0+0xa1/0xc0
+[ 7340.432498] ? do_syscall_64+0xbb/0x6b0
+[ 7340.433015] ? __handle_mm_fault+0x445/0x690
+[ 7340.433582] ? count_memcg_events+0xd6/0x210
+[ 7340.434151] ? handle_mm_fault+0x212/0x340
+[ 7340.434697] ? do_user_addr_fault+0x2b4/0x7b0
+[ 7340.435271] ? clear_bhb_loop+0x30/0x80
+[ 7340.435788] ? clear_bhb_loop+0x30/0x80
+[ 7340.436299] ? clear_bhb_loop+0x30/0x80
+[ 7340.436812] ? clear_bhb_loop+0x30/0x80
+[ 7340.437323] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 7340.437973] RIP: 0033:0x7f4161b14169
+[ 7340.438468] Code: Unable to access opcode bytes at 0x7f4161b1413f.
+[ 7340.439242] RSP: 002b:00007ffc6ebfa770 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca
+[ 7340.440173] RAX: fffffffffffffe00 RBX: 00000000000005a1 RCX: 00007f4161b14169
+[ 7340.441061] RDX: 00000000000005a1 RSI: 0000000000000109 RDI: 00007f415bfff990
+[ 7340.441943] RBP: 00007ffc6ebfa7a0 R08: 0000000000000000 R09: 00000000ffffffff
+[ 7340.442824] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+[ 7340.443707] R13: 0000000000000000 R14: 00007f415bfff990 R15: 00007f415bfff6c0
+[ 7340.444586] </TASK>
+[ 7340.444922] Modules linked in: rfkill intel_rapl_msr intel_rapl_common intel_uncore_frequency_common skx_edac_common nfit libnvdimm kvm_intel vfat fat kvm snd_pcm irqbypass rapl iTCO_wdt snd_timer intel_pmc_bxt iTCO_vendor_support snd ixgbevf virtio_net soundcore i2c_i801 pcspkr libeth_xdp net_failover i2c_smbus lpc_ich failover libeth virtio_balloon joydev 9p fuse loop zram lz4hc_compress lz4_compress 9pnet_virtio 9pnet netfs ghash_clmulni_intel serio_raw qemu_fw_cfg
+[ 7340.449650] ---[ end trace 0000000000000000 ]---
+
+The issue can be fixed in all in-tree drivers, but we cannot just trust OOT
+drivers to not do this. Therefore, make tailroom a signed int and produce a
+warning when it is negative to prevent such mistakes in the future.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-10-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 7bcb713681bab..3d4bf4d2a1a4b 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4147,13 +4147,14 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+ struct xdp_rxq_info *rxq = xdp->rxq;
+- unsigned int tailroom;
++ int tailroom;
+
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+ tailroom = rxq->frag_size - skb_frag_size(frag) -
+ skb_frag_off(frag) % rxq->frag_size;
++ WARN_ON_ONCE(tailroom < 0);
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From 62812de64e15b0ae18a08c5975c86cffde248479 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:42 +0100
+Subject: xdp: use modulo operation to calculate XDP frag tailroom
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 88b6b7f7b216108a09887b074395fa7b751880b1 ]
+
+The current formula for calculating XDP tailroom in mbuf packets works only
+if each frag has its own page (if rxq->frag_size is PAGE_SIZE), this
+defeats the purpose of the parameter overall and without any indication
+leads to negative calculated tailroom on at least half of frags, if shared
+pages are used.
+
+There are not many drivers that set rxq->frag_size. Among them:
+* i40e and enetc always split page uniformly between frags, use shared
+ pages
+* ice uses page_pool frags via libeth, those are power-of-2 and uniformly
+ distributed across page
+* idpf has variable frag_size with XDP on, so current API is not applicable
+* mlx5, mtk and mvneta use PAGE_SIZE or 0 as frag_size for page_pool
+
+As for AF_XDP ZC, only ice, i40e and idpf declare frag_size for it. Modulo
+operation yields good results for aligned chunks, they are all power-of-2,
+between 2K and PAGE_SIZE. Formula without modulo fails when chunk_size is
+2K. Buffers in unaligned mode are not distributed uniformly, so modulo
+operation would not work.
+
+To accommodate unaligned buffers, we could define frag_size as
+data + tailroom, and hence do not subtract offset when calculating
+tailroom, but this would necessitate more changes in the drivers.
+
+Define rxq->frag_size as an even portion of a page that fully belongs to a
+single frag. When calculating tailroom, locate the data start within such
+portion by performing a modulo operation on page offset.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-2-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index d93f7dea828e5..7bcb713681bab 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4152,7 +4152,8 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+- tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag);
++ tailroom = rxq->frag_size - skb_frag_size(frag) -
++ skb_frag_off(frag) % rxq->frag_size;
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From 37da4f4c04f5b5fa2578699758fa30a2c2173af5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 09:37:11 +0000
+Subject: xen/acpi-processor: fix _CST detection using undersized evaluation
+ buffer
+
+From: David Thomson <dt@linux-mail.net>
+
+[ Upstream commit 8b57227d59a86fc06d4f09de08f98133680f2cae ]
+
+read_acpi_id() attempts to evaluate _CST using a stack buffer of
+sizeof(union acpi_object) (48 bytes), but _CST returns a nested Package
+of sub-Packages (one per C-state, each containing a register descriptor,
+type, latency, and power) requiring hundreds of bytes. The evaluation
+always fails with AE_BUFFER_OVERFLOW.
+
+On modern systems using FFH/MWAIT entry (where pblk is zero), this
+causes the function to return before setting the acpi_id_cst_present
+bit. In check_acpi_ids(), flags.power is then zero for all Phase 2 CPUs
+(physical CPUs beyond dom0's vCPU count), so push_cxx_to_hypervisor() is
+never called for them.
+
+On a system with dom0_max_vcpus=2 and 8 physical CPUs, only PCPUs 0-1
+receive C-state data. PCPUs 2-7 are stuck in C0/C1 idle, unable to
+enter C2/C3. This costs measurable wall power (4W observed on an Intel
+Core Ultra 7 265K with Xen 4.20).
+
+The function never uses the _CST return value -- it only needs to know
+whether _CST exists. Replace the broken acpi_evaluate_object() call with
+acpi_has_method(), which correctly detects _CST presence using
+acpi_get_handle() without any buffer allocation. This brings C-state
+detection to parity with the P-state path, which already works correctly
+for Phase 2 CPUs.
+
+Fixes: 59a568029181 ("xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.")
+Signed-off-by: David Thomson <dt@linux-mail.net>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260224093707.19679-1-dt@linux-mail.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xen-acpi-processor.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 2967039398463..520756159d3d3 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -379,11 +379,8 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ acpi_psd[acpi_id].domain);
+ }
+
+- status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- if (!pblk)
+- return AE_OK;
+- }
++ if (!pblk && !acpi_has_method(handle, "_CST"))
++ return AE_OK;
+ /* .. and it has a C-state */
+ __set_bit(acpi_id, acpi_id_cst_present);
+
+--
+2.51.0
+
--- /dev/null
+From 9a994811e5168b5a24294366e4ca02a91bdf0e88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:26 +0000
+Subject: xsk: Fix fragment node deletion to prevent buffer leak
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit 60abb0ac11dccd6b98fd9182bc5f85b621688861 ]
+
+After commit b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node"),
+the list_node field is reused for both the xskb pool list and the buffer
+free list, this causes a buffer leak as described below.
+
+xp_free() checks if a buffer is already on the free list using
+list_empty(&xskb->list_node). When list_del() is used to remove a node
+from the xskb pool list, it doesn't reinitialize the node pointers.
+This means list_empty() will return false even after the node has been
+removed, causing xp_free() to incorrectly skip adding the buffer to the
+free list.
+
+Fix this by using list_del_init() instead of list_del() in all fragment
+handling paths, this ensures the list node is reinitialized after removal,
+allowing the list_empty() to work correctly.
+
+Fixes: b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node")
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-2-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 6 +++---
+ net/xdp/xsk.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 4f2d3268a6769..99b6c3358e363 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -118,7 +118,7 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ goto out;
+
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ xp_free(pos);
+ }
+
+@@ -153,7 +153,7 @@ static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+ struct xdp_buff_xsk, list_node);
+ if (frag) {
+- list_del(&frag->list_node);
++ list_del_init(&frag->list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -164,7 +164,7 @@ static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
+ {
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->list_node);
++ list_del_init(&xskb->list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 69bbcca8ac753..0d3fc72147f84 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -186,7 +186,7 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ }
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From f6e662e8718f17ce7ad36d7526302a59ef0f70a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:27 +0000
+Subject: xsk: Fix zero-copy AF_XDP fragment drop
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit f7387d6579d65efd490a864254101cb665f2e7a7 ]
+
+AF_XDP should ensure that only a complete packet is sent to application.
+In the zero-copy case, if the Rx queue gets full as fragments are being
+enqueued, the remaining fragments are dropped.
+
+For the multi-buffer case, add a check to ensure that the Rx queue has
+enough space for all fragments of a packet before starting to enqueue
+them.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-3-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xdp/xsk.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 0d3fc72147f84..a78cdc3356937 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -167,25 +167,31 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ struct xdp_buff_xsk *pos, *tmp;
+ struct list_head *xskb_list;
+ u32 contd = 0;
++ u32 num_desc;
+ int err;
+
+- if (frags)
+- contd = XDP_PKT_CONTD;
++ if (likely(!frags)) {
++ err = __xsk_rcv_zc(xs, xskb, len, contd);
++ if (err)
++ goto err;
++ return 0;
++ }
+
+- err = __xsk_rcv_zc(xs, xskb, len, contd);
+- if (err)
++ contd = XDP_PKT_CONTD;
++ num_desc = xdp_get_shared_info_from_buff(xdp)->nr_frags + 1;
++ if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
++ xs->rx_queue_full++;
++ err = -ENOBUFS;
+ goto err;
+- if (likely(!frags))
+- return 0;
++ }
+
++ __xsk_rcv_zc(xs, xskb, len, contd);
+ xskb_list = &xskb->pool->xskb_list;
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+- err = __xsk_rcv_zc(xs, pos, len, contd);
+- if (err)
+- goto err;
++ __xsk_rcv_zc(xs, pos, len, contd);
+ list_del_init(&pos->list_node);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 50213f35387ee648b88b095e06bb487ceeba22fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:43 +0100
+Subject: xsk: introduce helper to determine rxq->frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 16394d80539937d348dd3b9ea32415c54e67a81b ]
+
+rxq->frag_size is basically a step between consecutive strictly aligned
+frames. In ZC mode, chunk size fits exactly, but if chunks are unaligned,
+there is no safe way to determine accessible space to grow tailroom.
+
+Report frag_size to be zero, if chunks are unaligned, chunk_size otherwise.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-3-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 99b6c3358e363..33e072768de9d 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -47,6 +47,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+@@ -333,6 +338,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return 0;
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return 0;
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+--
+2.51.0
+
--- /dev/null
+From b00b83bad948832a6d19773aa464883f7f7a9199 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:48:41 -0800
+Subject: accel/amdxdna: Fill invalid payload for failed command
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 89ff45359abbf9d8d3c4aa3f5a57ed0be82b5a12 ]
+
+Newer userspace applications may read the payload of a failed command
+to obtain detailed error information. However, the driver and old firmware
+versions may not support returning advanced error information.
+In this case, initialize the command payload with an invalid value so
+userspace can detect that no detailed error information is available.
+
+Fixes: aac243092b70 ("accel/amdxdna: Add command execution")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260227004841.3080241-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_ctx.c | 23 ++++++++---------------
+ drivers/accel/amdxdna/amdxdna_ctx.c | 27 +++++++++++++++++++++++++++
+ drivers/accel/amdxdna/amdxdna_ctx.h | 3 +++
+ 3 files changed, 38 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
+index 01a02f4c3a98d..9fc33b4298f23 100644
+--- a/drivers/accel/amdxdna/aie2_ctx.c
++++ b/drivers/accel/amdxdna/aie2_ctx.c
+@@ -186,13 +186,13 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(job->job_timeout)) {
+- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
++ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(!data) || unlikely(size != sizeof(u32))) {
+- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
++ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -202,7 +202,7 @@ aie2_sched_resp_handler(void *handle, void __iomem *data, size_t size)
+ if (status == AIE2_STATUS_SUCCESS)
+ amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_COMPLETED);
+ else
+- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
++ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_ERROR);
+
+ out:
+ aie2_sched_notify(job);
+@@ -244,13 +244,13 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
+ cmd_abo = job->cmd_bo;
+
+ if (unlikely(job->job_timeout)) {
+- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_TIMEOUT);
++ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_TIMEOUT);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (unlikely(!data) || unlikely(size != sizeof(u32) * 3)) {
+- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
++ amdxdna_cmd_set_error(cmd_abo, job, 0, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+ goto out;
+ }
+@@ -270,19 +270,12 @@ aie2_sched_cmdlist_resp_handler(void *handle, void __iomem *data, size_t size)
+ fail_cmd_idx, fail_cmd_status);
+
+ if (fail_cmd_status == AIE2_STATUS_SUCCESS) {
+- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ABORT);
++ amdxdna_cmd_set_error(cmd_abo, job, fail_cmd_idx, ERT_CMD_STATE_ABORT);
+ ret = -EINVAL;
+- goto out;
++ } else {
++ amdxdna_cmd_set_error(cmd_abo, job, fail_cmd_idx, ERT_CMD_STATE_ERROR);
+ }
+- amdxdna_cmd_set_state(cmd_abo, ERT_CMD_STATE_ERROR);
+
+- if (amdxdna_cmd_get_op(cmd_abo) == ERT_CMD_CHAIN) {
+- struct amdxdna_cmd_chain *cc = amdxdna_cmd_get_payload(cmd_abo, NULL);
+-
+- cc->error_index = fail_cmd_idx;
+- if (cc->error_index >= cc->command_count)
+- cc->error_index = 0;
+- }
+ out:
+ aie2_sched_notify(job);
+ return ret;
+diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
+index e42eb12fc7c1b..4e48519b699ac 100644
+--- a/drivers/accel/amdxdna/amdxdna_ctx.c
++++ b/drivers/accel/amdxdna/amdxdna_ctx.c
+@@ -135,6 +135,33 @@ u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo)
+ return INVALID_CU_IDX;
+ }
+
++int amdxdna_cmd_set_error(struct amdxdna_gem_obj *abo,
++ struct amdxdna_sched_job *job, u32 cmd_idx,
++ enum ert_cmd_state error_state)
++{
++ struct amdxdna_client *client = job->hwctx->client;
++ struct amdxdna_cmd *cmd = abo->mem.kva;
++ struct amdxdna_cmd_chain *cc = NULL;
++
++ cmd->header &= ~AMDXDNA_CMD_STATE;
++ cmd->header |= FIELD_PREP(AMDXDNA_CMD_STATE, error_state);
++
++ if (amdxdna_cmd_get_op(abo) == ERT_CMD_CHAIN) {
++ cc = amdxdna_cmd_get_payload(abo, NULL);
++ cc->error_index = (cmd_idx < cc->command_count) ? cmd_idx : 0;
++ abo = amdxdna_gem_get_obj(client, cc->data[0], AMDXDNA_BO_CMD);
++ if (!abo)
++ return -EINVAL;
++ cmd = abo->mem.kva;
++ }
++
++ memset(cmd->data, 0xff, abo->mem.size - sizeof(*cmd));
++ if (cc)
++ amdxdna_gem_put_obj(abo);
++
++ return 0;
++}
++
+ /*
+ * This should be called in close() and remove(). DO NOT call in other syscalls.
+ * This guarantee that when hwctx and resources will be released, if user
+diff --git a/drivers/accel/amdxdna/amdxdna_ctx.h b/drivers/accel/amdxdna/amdxdna_ctx.h
+index 16c85f08f03c6..fbdf9d0008713 100644
+--- a/drivers/accel/amdxdna/amdxdna_ctx.h
++++ b/drivers/accel/amdxdna/amdxdna_ctx.h
+@@ -167,6 +167,9 @@ amdxdna_cmd_get_state(struct amdxdna_gem_obj *abo)
+
+ void *amdxdna_cmd_get_payload(struct amdxdna_gem_obj *abo, u32 *size);
+ u32 amdxdna_cmd_get_cu_idx(struct amdxdna_gem_obj *abo);
++int amdxdna_cmd_set_error(struct amdxdna_gem_obj *abo,
++ struct amdxdna_sched_job *job, u32 cmd_idx,
++ enum ert_cmd_state error_state);
+
+ void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job);
+ void amdxdna_hwctx_remove_all(struct amdxdna_client *client);
+--
+2.51.0
+
--- /dev/null
+From 3bce0b772305611e46fe4a6cb154619a4bba1ce0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 13:38:57 -0800
+Subject: accel/amdxdna: Fix NULL pointer dereference of mgmt_chann
+
+From: Lizhi Hou <lizhi.hou@amd.com>
+
+[ Upstream commit 6270ee26e1edd862ea17e3eba148ca8fb2c99dc9 ]
+
+mgmt_chann may be set to NULL if the firmware returns an unexpected
+error in aie2_send_mgmt_msg_wait(). This can later lead to a NULL
+pointer dereference in aie2_hw_stop().
+
+Fix this by introducing a dedicated helper to destroy mgmt_chann
+and by adding proper NULL checks before accessing it.
+
+Fixes: b87f920b9344 ("accel/amdxdna: Support hardware mailbox")
+Reviewed-by: Mario Limonciello (AMD) <superm1@kernel.org>
+Signed-off-by: Lizhi Hou <lizhi.hou@amd.com>
+Link: https://patch.msgid.link/20260226213857.3068474-1-lizhi.hou@amd.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/amdxdna/aie2_message.c | 21 ++++++++++++++++-----
+ drivers/accel/amdxdna/aie2_pci.c | 7 ++-----
+ drivers/accel/amdxdna/aie2_pci.h | 1 +
+ 3 files changed, 19 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/accel/amdxdna/aie2_message.c b/drivers/accel/amdxdna/aie2_message.c
+index a758c11a05a9c..f0fb98131068c 100644
+--- a/drivers/accel/amdxdna/aie2_message.c
++++ b/drivers/accel/amdxdna/aie2_message.c
+@@ -40,11 +40,8 @@ static int aie2_send_mgmt_msg_wait(struct amdxdna_dev_hdl *ndev,
+ return -ENODEV;
+
+ ret = xdna_send_msg_wait(xdna, ndev->mgmt_chann, msg);
+- if (ret == -ETIME) {
+- xdna_mailbox_stop_channel(ndev->mgmt_chann);
+- xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+- ndev->mgmt_chann = NULL;
+- }
++ if (ret == -ETIME)
++ aie2_destroy_mgmt_chann(ndev);
+
+ if (!ret && *hdl->status != AIE2_STATUS_SUCCESS) {
+ XDNA_ERR(xdna, "command opcode 0x%x failed, status 0x%x",
+@@ -871,6 +868,20 @@ void aie2_msg_init(struct amdxdna_dev_hdl *ndev)
+ ndev->exec_msg_ops = &legacy_exec_message_ops;
+ }
+
++void aie2_destroy_mgmt_chann(struct amdxdna_dev_hdl *ndev)
++{
++ struct amdxdna_dev *xdna = ndev->xdna;
++
++ drm_WARN_ON(&xdna->ddev, !mutex_is_locked(&xdna->dev_lock));
++
++ if (!ndev->mgmt_chann)
++ return;
++
++ xdna_mailbox_stop_channel(ndev->mgmt_chann);
++ xdna_mailbox_destroy_channel(ndev->mgmt_chann);
++ ndev->mgmt_chann = NULL;
++}
++
+ static inline struct amdxdna_gem_obj *
+ aie2_cmdlist_get_cmd_buf(struct amdxdna_sched_job *job)
+ {
+diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
+index 3356c9ed079a8..0a8e7a8710eea 100644
+--- a/drivers/accel/amdxdna/aie2_pci.c
++++ b/drivers/accel/amdxdna/aie2_pci.c
+@@ -343,9 +343,7 @@ static void aie2_hw_stop(struct amdxdna_dev *xdna)
+
+ aie2_runtime_cfg(ndev, AIE2_RT_CFG_CLK_GATING, NULL);
+ aie2_mgmt_fw_fini(ndev);
+- xdna_mailbox_stop_channel(ndev->mgmt_chann);
+- xdna_mailbox_destroy_channel(ndev->mgmt_chann);
+- ndev->mgmt_chann = NULL;
++ aie2_destroy_mgmt_chann(ndev);
+ drmm_kfree(&xdna->ddev, ndev->mbox);
+ ndev->mbox = NULL;
+ aie2_psp_stop(ndev->psp_hdl);
+@@ -454,8 +452,7 @@ static int aie2_hw_start(struct amdxdna_dev *xdna)
+ return 0;
+
+ destroy_mgmt_chann:
+- xdna_mailbox_stop_channel(ndev->mgmt_chann);
+- xdna_mailbox_destroy_channel(ndev->mgmt_chann);
++ aie2_destroy_mgmt_chann(ndev);
+ stop_psp:
+ aie2_psp_stop(ndev->psp_hdl);
+ fini_smu:
+diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
+index 4fdc032bc171b..482ee555f6c47 100644
+--- a/drivers/accel/amdxdna/aie2_pci.h
++++ b/drivers/accel/amdxdna/aie2_pci.h
+@@ -302,6 +302,7 @@ int aie2_get_array_async_error(struct amdxdna_dev_hdl *ndev,
+
+ /* aie2_message.c */
+ void aie2_msg_init(struct amdxdna_dev_hdl *ndev);
++void aie2_destroy_mgmt_chann(struct amdxdna_dev_hdl *ndev);
+ int aie2_suspend_fw(struct amdxdna_dev_hdl *ndev);
+ int aie2_resume_fw(struct amdxdna_dev_hdl *ndev);
+ int aie2_set_runtime_cfg(struct amdxdna_dev_hdl *ndev, u32 type, u64 value);
+--
+2.51.0
+
--- /dev/null
+From 137cc41c06b75c97a73660ccc66729247e893846 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 16:21:55 -0600
+Subject: accel: ethosu: Fix job submit error clean-up refcount underflows
+
+From: Rob Herring (Arm) <robh@kernel.org>
+
+[ Upstream commit 150bceb3e0a4a30950279d91ea0e8cc69a736742 ]
+
+If the job submit fails before adding the job to the scheduler queue
+such as when the GEM buffer bounds checks fail, then doing a
+ethosu_job_put() results in a pm_runtime_put_autosuspend() without the
+corresponding pm_runtime_resume_and_get(). The dma_fence_put()'s are
+also unnecessary, but seem to be harmless.
+
+Split the ethosu_job_cleanup() function into 2 parts for the before
+and after the job is queued.
+
+Fixes: 5a5e9c0228e6 ("accel: Add Arm Ethos-U NPU driver")
+Reviewed-and-Tested-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://patch.msgid.link/20260218-ethos-fixes-v1-1-be3fa3ea9a30@kernel.org
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ethosu/ethosu_job.c | 26 ++++++++++++++++++--------
+ 1 file changed, 18 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/accel/ethosu/ethosu_job.c b/drivers/accel/ethosu/ethosu_job.c
+index 26e7a2f64d71a..70a144803b096 100644
+--- a/drivers/accel/ethosu/ethosu_job.c
++++ b/drivers/accel/ethosu/ethosu_job.c
+@@ -143,23 +143,29 @@ static int ethosu_job_push(struct ethosu_job *job)
+ return ret;
+ }
+
++static void ethosu_job_err_cleanup(struct ethosu_job *job)
++{
++ unsigned int i;
++
++ for (i = 0; i < job->region_cnt; i++)
++ drm_gem_object_put(job->region_bo[i]);
++
++ drm_gem_object_put(job->cmd_bo);
++
++ kfree(job);
++}
++
+ static void ethosu_job_cleanup(struct kref *ref)
+ {
+ struct ethosu_job *job = container_of(ref, struct ethosu_job,
+ refcount);
+- unsigned int i;
+
+ pm_runtime_put_autosuspend(job->dev->base.dev);
+
+ dma_fence_put(job->done_fence);
+ dma_fence_put(job->inference_done_fence);
+
+- for (i = 0; i < job->region_cnt; i++)
+- drm_gem_object_put(job->region_bo[i]);
+-
+- drm_gem_object_put(job->cmd_bo);
+-
+- kfree(job);
++ ethosu_job_err_cleanup(job);
+ }
+
+ static void ethosu_job_put(struct ethosu_job *job)
+@@ -454,12 +460,16 @@ static int ethosu_ioctl_submit_job(struct drm_device *dev, struct drm_file *file
+ }
+ }
+ ret = ethosu_job_push(ejob);
++ if (!ret) {
++ ethosu_job_put(ejob);
++ return 0;
++ }
+
+ out_cleanup_job:
+ if (ret)
+ drm_sched_job_cleanup(&ejob->base);
+ out_put_job:
+- ethosu_job_put(ejob);
++ ethosu_job_err_cleanup(ejob);
+
+ return ret;
+ }
+--
+2.51.0
+
--- /dev/null
+From 3017a4c6471801926430c26a4b79429cb4a2f561 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 16:21:56 -0600
+Subject: accel: ethosu: Fix NPU_OP_ELEMENTWISE validation with scalar
+
+From: Rob Herring (Arm) <robh@kernel.org>
+
+[ Upstream commit 838ae99f9a77a5724ee6d4e7b7b1eb079147f888 ]
+
+The NPU_OP_ELEMENTWISE instruction uses a scalar value for IFM2 if the
+IFM2_BROADCAST "scalar" mode is set. It is a bit (7) on the u65 and
+part of a field (bits 3:0) on the u85. The driver was hardcoded to the
+u85.
+
+Fixes: 5a5e9c0228e6 ("accel: Add Arm Ethos-U NPU driver")
+Reviewed-and-Tested-by: Anders Roxell <anders.roxell@linaro.org>
+Link: https://patch.msgid.link/20260218-ethos-fixes-v1-2-be3fa3ea9a30@kernel.org
+Signed-off-by: Rob Herring (Arm) <robh@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/accel/ethosu/ethosu_gem.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/accel/ethosu/ethosu_gem.c b/drivers/accel/ethosu/ethosu_gem.c
+index 7b073116314ba..4e84481a29d2f 100644
+--- a/drivers/accel/ethosu/ethosu_gem.c
++++ b/drivers/accel/ethosu/ethosu_gem.c
+@@ -417,7 +417,10 @@ static int ethosu_gem_cmdstream_copy_and_validate(struct drm_device *ddev,
+ return ret;
+ break;
+ case NPU_OP_ELEMENTWISE:
+- use_ifm2 = !((st.ifm2.broadcast == 8) || (param == 5) ||
++ use_scale = ethosu_is_u65(edev) ?
++ (st.ifm2.broadcast & 0x80) :
++ (st.ifm2.broadcast == 8);
++ use_ifm2 = !(use_scale || (param == 5) ||
+ (param == 6) || (param == 7) || (param == 0x24));
+ use_ifm = st.ifm.broadcast != 8;
+ ret = calc_sizes_elemwise(ddev, info, cmd, &st, use_ifm, use_ifm2);
+--
+2.51.0
+
--- /dev/null
+From 201fcf1df227e0461f90e315cd84b8b1c3713f8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 22:37:53 +0530
+Subject: amd-xgbe: fix MAC_TCR_SS register width for 2.5G and 10M speeds
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 9439a661c2e80485406ce2c90b107ca17858382d ]
+
+Extend the MAC_TCR_SS (Speed Select) register field width from 2 bits
+to 3 bits to properly support all speed settings.
+
+The MAC_TCR register's SS field encoding requires 3 bits to represent
+all supported speeds:
+ - 0x00: 10Gbps (XGMII)
+ - 0x02: 2.5Gbps (GMII) / 100Mbps
+ - 0x03: 1Gbps / 10Mbps
+ - 0x06: 2.5Gbps (XGMII) - P100a only
+
+With only 2 bits, values 0x04-0x07 cannot be represented, which breaks
+2.5G XGMII mode on newer platforms and causes incorrect speed select
+values to be programmed.
+
+Fixes: 07445f3c7ca1 ("amd-xgbe: Add support for 10 Mbps speed")
+Co-developed-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260226170753.250312-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index 62b01de93db49..826c5caa70d71 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -431,7 +431,7 @@
+ #define MAC_SSIR_SSINC_INDEX 16
+ #define MAC_SSIR_SSINC_WIDTH 8
+ #define MAC_TCR_SS_INDEX 29
+-#define MAC_TCR_SS_WIDTH 2
++#define MAC_TCR_SS_WIDTH 3
+ #define MAC_TCR_TE_INDEX 0
+ #define MAC_TCR_TE_WIDTH 1
+ #define MAC_TCR_VNE_INDEX 24
+--
+2.51.0
+
--- /dev/null
+From b12601840857aa02c0190cbfa4aba283e6541584 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 09:51:24 +0530
+Subject: amd-xgbe: fix sleep while atomic on suspend/resume
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit e2f27363aa6d983504c6836dd0975535e2e9dba0 ]
+
+The xgbe_powerdown() and xgbe_powerup() functions use spinlocks
+(spin_lock_irqsave) while calling functions that may sleep:
+- napi_disable() can sleep waiting for NAPI polling to complete
+- flush_workqueue() can sleep waiting for pending work items
+
+This causes a "BUG: scheduling while atomic" error during suspend/resume
+cycles on systems using the AMD XGBE Ethernet controller.
+
+The spinlock protection in these functions is unnecessary as these
+functions are called from suspend/resume paths which are already serialized
+by the PM core
+
+Fix this by removing the spinlock. Since only code that takes this lock
+is xgbe_powerdown() and xgbe_powerup(), remove it completely.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260302042124.1386445-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ----------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 -
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ---
+ 3 files changed, 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index b5a60a0488967..20ce2ed4cd9f7 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1120,7 +1120,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+@@ -1131,8 +1130,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+@@ -1148,8 +1145,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+
+ pdata->power_down = 1;
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+@@ -1159,7 +1154,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+@@ -1170,8 +1164,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+@@ -1186,8 +1178,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ xgbe_start_timers(pdata);
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index d1f0419edb234..7d45ea22a02e2 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -76,7 +76,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+- spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index 03ef0f5484830..4ba23779b2b7e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1003,9 +1003,6 @@ struct xgbe_prv_data {
+ unsigned int pp3;
+ unsigned int pp4;
+
+- /* Overall device lock */
+- spinlock_t lock;
+-
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+--
+2.51.0
+
--- /dev/null
+From 4f6f122ae08f789c113ff8163d7f80ccc05013e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:17:07 +0000
+Subject: ASoC: SDCA: Add allocation failure check for Entity name
+
+From: Charles Keepax <ckeepax@opensource.cirrus.com>
+
+[ Upstream commit 27990181031fdcdbe0f7c46011f6404e5d116386 ]
+
+Currently find_sdca_entity_iot() can allocate a string for the
+Entity name but it doesn't check if that allocation succeeded.
+Add the missing NULL check after the allocation.
+
+Fixes: 48fa77af2f4a ("ASoC: SDCA: Add terminal type into input/output widget name")
+Signed-off-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://patch.msgid.link/20260303141707.3841635-1-ckeepax@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/sdca/sdca_functions.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/sdca/sdca_functions.c b/sound/soc/sdca/sdca_functions.c
+index e86004c9dea03..d2de9e81b4f9f 100644
+--- a/sound/soc/sdca/sdca_functions.c
++++ b/sound/soc/sdca/sdca_functions.c
+@@ -1120,9 +1120,12 @@ static int find_sdca_entity_iot(struct device *dev,
+ if (!terminal->is_dataport) {
+ const char *type_name = sdca_find_terminal_name(terminal->type);
+
+- if (type_name)
++ if (type_name) {
+ entity->label = devm_kasprintf(dev, GFP_KERNEL, "%s %s",
+ entity->label, type_name);
++ if (!entity->label)
++ return -ENOMEM;
++ }
+ }
+
+ ret = fwnode_property_read_u32(entity_node,
+--
+2.51.0
+
--- /dev/null
+From eecb291b4182f505fb814ad3b37668c82afd2dcd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 11:03:42 +0100
+Subject: ata: libata: cancel pending work after clearing deferred_qc
+
+From: Niklas Cassel <cassel@kernel.org>
+
+[ Upstream commit aac9b27f7c1f2b2cf7f50a9ca633ecbbcaf22af9 ]
+
+Syzbot reported a WARN_ON() in ata_scsi_deferred_qc_work(), caused by
+ap->ops->qc_defer() returning non-zero before issuing the deferred qc.
+
+ata_scsi_schedule_deferred_qc() is called during each command completion.
+This function will check if there is a deferred QC, and if
+ap->ops->qc_defer() returns zero, meaning that it is possible to queue the
+deferred qc at this time (without being deferred), then it will queue the
+work which will issue the deferred qc.
+
+Once the work get to run, which can potentially be a very long time after
+the work was scheduled, there is a WARN_ON() if ap->ops->qc_defer() returns
+non-zero.
+
+While we hold the ap->lock both when assigning and clearing deferred_qc,
+and the work itself holds the ap->lock, the code currently does not cancel
+the work after clearing the deferred qc.
+
+This means that the following scenario can happen:
+1) One or several NCQ commands are queued.
+2) A non-NCQ command is queued, gets stored in ap->deferred_qc.
+3) Last NCQ command gets completed, work is queued to issue the deferred
+ qc.
+4) Timeout or error happens, ap->deferred_qc is cleared. The queued work is
+ currently NOT canceled.
+5) Port is reset.
+6) One or several NCQ commands are queued.
+7) A non-NCQ command is queued, gets stored in ap->deferred_qc.
+8) Work is finally run. Yet at this time, there is still NCQ commands in
+ flight.
+
+The work in 8) really belongs to the non-NCQ command in 2), not to the
+non-NCQ command in 7). The reason why the work is executed when it is not
+supposed to, is because it was never canceled when ap->deferred_qc was
+cleared in 4). Thus, ensure that we always cancel the work after clearing
+ap->deferred_qc.
+
+Another potential fix would have been to let ata_scsi_deferred_qc_work() do
+nothing if ap->ops->qc_defer() returns non-zero. However, canceling the
+work when clearing ap->deferred_qc seems slightly more logical, as we hold
+the ap->lock when clearing ap->deferred_qc, so we know that the work cannot
+be holding the lock. (The function could be waiting for the lock, but that
+is okay since it will do nothing if ap->deferred_qc is not set.)
+
+Reported-by: syzbot+bcaf842a1e8ead8dfb89@syzkaller.appspotmail.com
+Fixes: 0ea84089dbf6 ("ata: libata-scsi: avoid Non-NCQ command starvation")
+Fixes: eddb98ad9364 ("ata: libata-eh: correctly handle deferred qc timeouts")
+Reviewed-by: Igor Pylypiv <ipylypiv@google.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-eh.c | 1 +
+ drivers/ata/libata-scsi.c | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index b373cceb95d23..563432400f727 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -659,6 +659,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ */
+ WARN_ON_ONCE(qc->flags & ATA_QCFLAG_ACTIVE);
+ ap->deferred_qc = NULL;
++ cancel_work(&ap->deferred_qc_work);
+ set_host_byte(scmd, DID_TIME_OUT);
+ scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
+ } else if (i < ATA_MAX_QUEUE) {
+diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
+index 6b954efa9adb1..98ee5e7f61eb6 100644
+--- a/drivers/ata/libata-scsi.c
++++ b/drivers/ata/libata-scsi.c
+@@ -1699,6 +1699,7 @@ void ata_scsi_requeue_deferred_qc(struct ata_port *ap)
+
+ scmd = qc->scsicmd;
+ ap->deferred_qc = NULL;
++ cancel_work(&ap->deferred_qc_work);
+ ata_qc_free(qc);
+ scmd->result = (DID_SOFT_ERROR << 16);
+ scsi_done(scmd);
+--
+2.51.0
+
--- /dev/null
+From 94fd96ef953b4fdbb7d25c17e5c6e27cd09e29f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 18:48:05 -0800
+Subject: ata: libata-eh: Fix detection of deferred qc timeouts
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit ee0e6e69a772d601e152e5368a1da25d656122a8 ]
+
+If the ata_qc_for_each_raw() loop finishes without finding a matching SCSI
+command for any QC, the variable qc will hold a pointer to the last element
+examined, which has the tag i == ATA_MAX_QUEUE - 1. This qc can match the
+port deferred QC (ap->deferred_qc).
+
+If that happens, the condition qc == ap->deferred_qc evaluates to true
+despite the loop not breaking with a match on the SCSI command for this QC.
+In that case, the error handler mistakenly intercepts a command that has
+not been issued yet and that has not timed out, and thus erroneously
+returning a timeout error.
+
+Fix the problem by checking for i < ATA_MAX_QUEUE in addition to
+qc == ap->deferred_qc.
+
+The problem was found by an experimental code review agent based on
+gemini-3.1-pro while reviewing backports into v6.18.y.
+
+Assisted-by: Gemini:gemini-3.1-pro
+Fixes: eddb98ad9364 ("ata: libata-eh: correctly handle deferred qc timeouts")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+[cassel: modified commit log as suggested by Damien]
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Signed-off-by: Niklas Cassel <cassel@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/libata-eh.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
+index 563432400f727..23be85418b3b1 100644
+--- a/drivers/ata/libata-eh.c
++++ b/drivers/ata/libata-eh.c
+@@ -647,7 +647,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
+ break;
+ }
+
+- if (qc == ap->deferred_qc) {
++ if (i < ATA_MAX_QUEUE && qc == ap->deferred_qc) {
+ /*
+ * This is a deferred command that timed out while
+ * waiting for the command queue to drain. Since the qc
+--
+2.51.0
+
--- /dev/null
+From 0b26f92489671b9d4ebbd848357bae5cb87ed4f7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:32:40 +0800
+Subject: atm: lec: fix null-ptr-deref in lec_arp_clear_vccs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 101bacb303e89dc2e0640ae6a5e0fb97c4eb45bb ]
+
+syzkaller reported a null-ptr-deref in lec_arp_clear_vccs().
+This issue can be easily reproduced using the syzkaller reproducer.
+
+In the ATM LANE (LAN Emulation) module, the same atm_vcc can be shared by
+multiple lec_arp_table entries (e.g., via entry->vcc or entry->recv_vcc).
+When the underlying VCC is closed, lec_vcc_close() iterates over all
+ARP entries and calls lec_arp_clear_vccs() for each matched entry.
+
+For example, when lec_vcc_close() iterates through the hlists in
+priv->lec_arp_empty_ones or other ARP tables:
+
+1. In the first iteration, for the first matched ARP entry sharing the VCC,
+lec_arp_clear_vccs() frees the associated vpriv (which is vcc->user_back)
+and sets vcc->user_back to NULL.
+2. In the second iteration, for the next matched ARP entry sharing the same
+VCC, lec_arp_clear_vccs() is called again. It obtains a NULL vpriv from
+vcc->user_back (via LEC_VCC_PRIV(vcc)) and then attempts to dereference it
+via `vcc->pop = vpriv->old_pop`, leading to a null-ptr-deref crash.
+
+Fix this by adding a null check for vpriv before dereferencing
+it. If vpriv is already NULL, it means the VCC has been cleared
+by a previous call, so we can safely skip the cleanup and just
+clear the entry's vcc/recv_vcc pointers.
+
+The entire cleanup block (including vcc_release_async()) is placed inside
+the vpriv guard because a NULL vpriv indicates the VCC has already been
+fully released by a prior iteration — repeating the teardown would
+redundantly set flags and trigger callbacks on an already-closing socket.
+
+The Fixes tag points to the initial commit because the entry->vcc path has
+been vulnerable since the original code. The entry->recv_vcc path was later
+added by commit 8d9f73c0ad2f ("atm: fix a memory leak of vcc->user_back")
+with the same pattern, and both paths are fixed here.
+
+Reported-by: syzbot+72e3ea390c305de0e259@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68c95a83.050a0220.3c6139.0e5c.GAE@google.com/T/
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260225123250.189289-1-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index afb8d3eb21850..c39dc5d367979 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1260,24 +1260,28 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+ struct net_device *dev = (struct net_device *)vcc->proto_data;
+
+- vcc->pop = vpriv->old_pop;
+- if (vpriv->xoff)
+- netif_wake_queue(dev);
+- kfree(vpriv);
+- vcc->user_back = NULL;
+- vcc->push = entry->old_push;
+- vcc_release_async(vcc, -EPIPE);
++ if (vpriv) {
++ vcc->pop = vpriv->old_pop;
++ if (vpriv->xoff)
++ netif_wake_queue(dev);
++ kfree(vpriv);
++ vcc->user_back = NULL;
++ vcc->push = entry->old_push;
++ vcc_release_async(vcc, -EPIPE);
++ }
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+- kfree(vpriv);
+- vcc->user_back = NULL;
++ if (vpriv) {
++ kfree(vpriv);
++ vcc->user_back = NULL;
+
+- entry->recv_vcc->push = entry->old_recv_push;
+- vcc_release_async(entry->recv_vcc, -EPIPE);
++ entry->recv_vcc->push = entry->old_recv_push;
++ vcc_release_async(entry->recv_vcc, -EPIPE);
++ }
+ entry->recv_vcc = NULL;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From a3d16258cfc827607f8c4d2aba078973bc9c7630 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 16:22:07 -0800
+Subject: blktrace: fix __this_cpu_read/write in preemptible context
+
+From: Chaitanya Kulkarni <kch@nvidia.com>
+
+[ Upstream commit da46b5dfef48658d03347cda21532bcdbb521e67 ]
+
+tracing_record_cmdline() internally uses __this_cpu_read() and
+__this_cpu_write() on the per-CPU variable trace_cmdline_save, and
+trace_save_cmdline() explicitly asserts preemption is disabled via
+lockdep_assert_preemption_disabled(). These operations are only safe
+when preemption is off, as they were designed to be called from the
+scheduler context (probe_wakeup_sched_switch() / probe_wakeup()).
+
+__blk_add_trace() was calling tracing_record_cmdline(current) early in
+the blk_tracer path, before ring buffer reservation, from process
+context where preemption is fully enabled. This triggers the following
+using blktests/blktrace/002:
+
+blktrace/002 (blktrace ftrace corruption with sysfs trace) [failed]
+ runtime 0.367s ... 0.437s
+ something found in dmesg:
+ [ 81.211018] run blktests blktrace/002 at 2026-02-25 22:24:33
+ [ 81.239580] null_blk: disk nullb1 created
+ [ 81.357294] BUG: using __this_cpu_read() in preemptible [00000000] code: dd/2516
+ [ 81.362842] caller is tracing_record_cmdline+0x10/0x40
+ [ 81.362872] CPU: 16 UID: 0 PID: 2516 Comm: dd Tainted: G N 7.0.0-rc1lblk+ #84 PREEMPT(full)
+ [ 81.362877] Tainted: [N]=TEST
+ [ 81.362878] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
+ [ 81.362881] Call Trace:
+ [ 81.362884] <TASK>
+ [ 81.362886] dump_stack_lvl+0x8d/0xb0
+ ...
+ (See '/mnt/sda/blktests/results/nodev/blktrace/002.dmesg' for the entire message)
+
+[ 81.211018] run blktests blktrace/002 at 2026-02-25 22:24:33
+[ 81.239580] null_blk: disk nullb1 created
+[ 81.357294] BUG: using __this_cpu_read() in preemptible [00000000] code: dd/2516
+[ 81.362842] caller is tracing_record_cmdline+0x10/0x40
+[ 81.362872] CPU: 16 UID: 0 PID: 2516 Comm: dd Tainted: G N 7.0.0-rc1lblk+ #84 PREEMPT(full)
+[ 81.362877] Tainted: [N]=TEST
+[ 81.362878] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.17.0-0-gb52ca86e094d-prebuilt.qemu.org 04/01/2014
+[ 81.362881] Call Trace:
+[ 81.362884] <TASK>
+[ 81.362886] dump_stack_lvl+0x8d/0xb0
+[ 81.362895] check_preemption_disabled+0xce/0xe0
+[ 81.362902] tracing_record_cmdline+0x10/0x40
+[ 81.362923] __blk_add_trace+0x307/0x5d0
+[ 81.362934] ? lock_acquire+0xe0/0x300
+[ 81.362940] ? iov_iter_extract_pages+0x101/0xa30
+[ 81.362959] blk_add_trace_bio+0x106/0x1e0
+[ 81.362968] submit_bio_noacct_nocheck+0x24b/0x3a0
+[ 81.362979] ? lockdep_init_map_type+0x58/0x260
+[ 81.362988] submit_bio_wait+0x56/0x90
+[ 81.363009] __blkdev_direct_IO_simple+0x16c/0x250
+[ 81.363026] ? __pfx_submit_bio_wait_endio+0x10/0x10
+[ 81.363038] ? rcu_read_lock_any_held+0x73/0xa0
+[ 81.363051] blkdev_read_iter+0xc1/0x140
+[ 81.363059] vfs_read+0x20b/0x330
+[ 81.363083] ksys_read+0x67/0xe0
+[ 81.363090] do_syscall_64+0xbf/0xf00
+[ 81.363102] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 81.363106] RIP: 0033:0x7f281906029d
+[ 81.363111] Code: 31 c0 e9 c6 fe ff ff 50 48 8d 3d 66 63 0a 00 e8 59 ff 01 00 66 0f 1f 84 00 00 00 00 00 80 3d 41 33 0e 00 00 74 17 31 c0 0f 05 <48> 3d 00 f0 ff ff 77 5b c3 66 2e 0f 1f 84 00 00 00 00 00 48 83 ec
+[ 81.363113] RSP: 002b:00007ffca127dd48 EFLAGS: 00000246 ORIG_RAX: 0000000000000000
+[ 81.363120] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f281906029d
+[ 81.363122] RDX: 0000000000001000 RSI: 0000559f8bfae000 RDI: 0000000000000000
+[ 81.363123] RBP: 0000000000001000 R08: 0000002863a10a81 R09: 00007f281915f000
+[ 81.363124] R10: 00007f2818f77b60 R11: 0000000000000246 R12: 0000559f8bfae000
+[ 81.363126] R13: 0000000000000000 R14: 0000000000000000 R15: 000000000000000a
+[ 81.363142] </TASK>
+
+The same BUG fires from blk_add_trace_plug(), blk_add_trace_unplug(),
+and blk_add_trace_rq() paths as well.
+
+The purpose of tracing_record_cmdline() is to cache the task->comm for
+a given PID so that the trace can later resolve it. It is only
+meaningful when a trace event is actually being recorded. Ring buffer
+reservation via ring_buffer_lock_reserve() disables preemption, and
+preemption remains disabled until the event is committed :-
+
+__blk_add_trace()
+ __trace_buffer_lock_reserve()
+ __trace_buffer_lock_reserve()
+ ring_buffer_lock_reserve()
+ preempt_disable_notrace(); <---
+
+With this fix blktests for blktrace pass:
+
+ blktests (master) # ./check blktrace
+ blktrace/001 (blktrace zone management command tracing) [passed]
+ runtime 3.650s ... 3.647s
+ blktrace/002 (blktrace ftrace corruption with sysfs trace) [passed]
+ runtime 0.411s ... 0.384s
+
+Fixes: 7ffbd48d5cab ("tracing: Cache comms only after an event occurred")
+Reported-by: Shinichiro Kawasaki <shinichiro.kawasaki@wdc.com>
+Suggested-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/blktrace.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
+index c4db5c2e71037..0548e64b08f23 100644
+--- a/kernel/trace/blktrace.c
++++ b/kernel/trace/blktrace.c
+@@ -383,8 +383,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ cpu = raw_smp_processor_id();
+
+ if (blk_tracer) {
+- tracing_record_cmdline(current);
+-
+ buffer = blk_tr->array_buffer.buffer;
+ trace_ctx = tracing_gen_ctx_flags(0);
+ switch (bt->version) {
+@@ -419,6 +417,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
+ if (!event)
+ return;
+
++ tracing_record_cmdline(current);
+ switch (bt->version) {
+ case 1:
+ record_blktrace_event(ring_buffer_event_data(event),
+--
+2.51.0
+
--- /dev/null
+From d123dd17bbff8101aaec29021010cc343090ce6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 11:15:50 +0800
+Subject: block: use trylock to avoid lockdep circular dependency in sysfs
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit ce8ee8583ed83122405eabaa8fb351be4d9dc65c ]
+
+Use trylock instead of blocking lock acquisition for update_nr_hwq_lock
+in queue_requests_store() and elv_iosched_store() to avoid circular lock
+dependency with kernfs active reference during concurrent disk deletion:
+
+ update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
+ kn->active -> update_nr_hwq_lock (via sysfs write path)
+
+Return -EBUSY when the lock is not immediately available.
+
+Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
+Closes: https://lore.kernel.org/linux-block/CAHj4cs-em-4acsHabMdT=jJhXkCzjnprD-aQH1OgrZo4nTnmMw@mail.gmail.com/
+Fixes: 626ff4f8ebcb ("blk-mq: convert to serialize updating nr_requests with update_nr_hwq_lock")
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Tested-by: Yi Zhang <yi.zhang@redhat.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-sysfs.c | 8 +++++++-
+ block/elevator.c | 12 +++++++++++-
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
+index e0a70d26972b3..af12526d866a9 100644
+--- a/block/blk-sysfs.c
++++ b/block/blk-sysfs.c
+@@ -78,8 +78,14 @@ queue_requests_store(struct gendisk *disk, const char *page, size_t count)
+ /*
+ * Serialize updating nr_requests with concurrent queue_requests_store()
+ * and switching elevator.
++ *
++ * Use trylock to avoid circular lock dependency with kernfs active
++ * reference during concurrent disk deletion:
++ * update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
++ * kn->active -> update_nr_hwq_lock (via this sysfs write path)
+ */
+- down_write(&set->update_nr_hwq_lock);
++ if (!down_write_trylock(&set->update_nr_hwq_lock))
++ return -EBUSY;
+
+ if (nr == q->nr_requests)
+ goto unlock;
+diff --git a/block/elevator.c b/block/elevator.c
+index a2f8b2251dc6e..7a97998cd8bd7 100644
+--- a/block/elevator.c
++++ b/block/elevator.c
+@@ -806,7 +806,16 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
+ elv_iosched_load_module(ctx.name);
+ ctx.type = elevator_find_get(ctx.name);
+
+- down_read(&set->update_nr_hwq_lock);
++ /*
++ * Use trylock to avoid circular lock dependency with kernfs active
++ * reference during concurrent disk deletion:
++ * update_nr_hwq_lock -> kn->active (via del_gendisk -> kobject_del)
++ * kn->active -> update_nr_hwq_lock (via this sysfs write path)
++ */
++ if (!down_read_trylock(&set->update_nr_hwq_lock)) {
++ ret = -EBUSY;
++ goto out;
++ }
+ if (!blk_queue_no_elv_switch(q)) {
+ ret = elevator_change(q, &ctx);
+ if (!ret)
+@@ -816,6 +825,7 @@ ssize_t elv_iosched_store(struct gendisk *disk, const char *buf,
+ }
+ up_read(&set->update_nr_hwq_lock);
+
++out:
+ if (ctx.type)
+ elevator_put(ctx.type);
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 518e32bce402a9acf49dba7df522d083932f69fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:03:01 +0800
+Subject: bpf/bonding: reject vlan+srcmac xmit_hash_policy change when XDP is
+ loaded
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 479d589b40b836442bbdadc3fdb37f001bb67f26 ]
+
+bond_option_mode_set() already rejects mode changes that would make a
+loaded XDP program incompatible via bond_xdp_check(). However,
+bond_option_xmit_hash_policy_set() has no such guard.
+
+For 802.3ad and balance-xor modes, bond_xdp_check() returns false when
+xmit_hash_policy is vlan+srcmac, because the 802.1q payload is usually
+absent due to hardware offload. This means a user can:
+
+1. Attach a native XDP program to a bond in 802.3ad/balance-xor mode
+ with a compatible xmit_hash_policy (e.g. layer2+3).
+2. Change xmit_hash_policy to vlan+srcmac while XDP remains loaded.
+
+This leaves bond->xdp_prog set but bond_xdp_check() now returning false
+for the same device. When the bond is later destroyed, dev_xdp_uninstall()
+calls bond_xdp_set(dev, NULL, NULL) to remove the program, which hits
+the bond_xdp_check() guard and returns -EOPNOTSUPP, triggering:
+
+WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL))
+
+Fix this by rejecting xmit_hash_policy changes to vlan+srcmac when an
+XDP program is loaded on a bond in 802.3ad or balance-xor mode.
+
+commit 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+introduced bond_xdp_check() which returns false for 802.3ad/balance-xor
+modes when xmit_hash_policy is vlan+srcmac. The check was wired into
+bond_xdp_set() to reject XDP attachment with an incompatible policy, but
+the symmetric path -- preventing xmit_hash_policy from being changed to an
+incompatible value after XDP is already loaded -- was left unguarded in
+bond_option_xmit_hash_policy_set().
+
+Note:
+commit 094ee6017ea0 ("bonding: check xdp prog when set bond mode")
+later added a similar guard to bond_option_mode_set(), but
+bond_option_xmit_hash_policy_set() remained unprotected.
+
+Reported-by: syzbot+5a287bcdc08104bc3132@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/6995aff6.050a0220.2eeac1.014e.GAE@google.com/T/
+Fixes: 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260226080306.98766-2-jiayuan.chen@linux.dev
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 9 +++++++--
+ drivers/net/bonding/bond_options.c | 2 ++
+ include/net/bonding.h | 1 +
+ 3 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 4c58d1dafcacb..739e6eea6b529 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -324,7 +324,7 @@ static bool bond_sk_check(struct bonding *bond)
+ }
+ }
+
+-bool bond_xdp_check(struct bonding *bond, int mode)
++bool __bond_xdp_check(int mode, int xmit_policy)
+ {
+ switch (mode) {
+ case BOND_MODE_ROUNDROBIN:
+@@ -335,7 +335,7 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
+ * payload is not in the packet due to hardware offload.
+ */
+- if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
++ if (xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return true;
+ fallthrough;
+ default:
+@@ -343,6 +343,11 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ }
+ }
+
++bool bond_xdp_check(struct bonding *bond, int mode)
++{
++ return __bond_xdp_check(mode, bond->params.xmit_policy);
++}
++
+ /*---------------------------------- VLAN -----------------------------------*/
+
+ /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index f1c6e9d8f6167..adc216df43459 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1574,6 +1574,8 @@ static int bond_option_fail_over_mac_set(struct bonding *bond,
+ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+ {
++ if (bond->xdp_prog && !__bond_xdp_check(BOND_MODE(bond), newval->value))
++ return -EOPNOTSUPP;
+ netdev_dbg(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.xmit_policy = newval->value;
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 4620784035570..99c1bdadcd11a 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -698,6 +698,7 @@ void bond_debug_register(struct bonding *bond);
+ void bond_debug_unregister(struct bonding *bond);
+ void bond_debug_reregister(struct bonding *bond);
+ const char *bond_mode_name(int mode);
++bool __bond_xdp_check(int mode, int xmit_policy);
+ bool bond_xdp_check(struct bonding *bond, int mode);
+ void bond_setup(struct net_device *bond_dev);
+ unsigned int bond_get_num_tx_queues(void);
+--
+2.51.0
+
--- /dev/null
+From af6e6ef6a41132a89fbfe5544a1b386eb2a25761 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2026 16:02:47 -0800
+Subject: bpf: collect only live registers in linked regs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Eduard Zingerman <eddyz87@gmail.com>
+
+[ Upstream commit 2658a1720a1944fbaeda937000ad2b3c3dfaf1bb ]
+
+Fix an inconsistency between func_states_equal() and
+collect_linked_regs():
+- regsafe() uses check_ids() to verify that cached and current states
+ have identical register id mapping.
+- func_states_equal() calls regsafe() only for registers computed as
+ live by compute_live_registers().
+- clean_live_states() is supposed to remove dead registers from cached
+ states, but it can skip states belonging to an iterator-based loop.
+- collect_linked_regs() collects all registers sharing the same id,
+ ignoring the marks computed by compute_live_registers().
+ Linked registers are stored in the state's jump history.
+- backtrack_insn() marks all linked registers for an instruction
+ as precise whenever one of the linked registers is precise.
+
+The above might lead to a scenario:
+- There is an instruction I with register rY known to be dead at I.
+- Instruction I is reached via two paths: first A, then B.
+- On path A:
+ - There is an id link between registers rX and rY.
+ - Checkpoint C is created at I.
+ - Linked register set {rX, rY} is saved to the jump history.
+ - rX is marked as precise at I, causing both rX and rY
+ to be marked precise at C.
+- On path B:
+ - There is no id link between registers rX and rY,
+ otherwise register states are sub-states of those in C.
+ - Because rY is dead at I, check_ids() returns true.
+ - Current state is considered equal to checkpoint C,
+ propagate_precision() propagates spurious precision
+ mark for register rY along the path B.
+ - Depending on a program, this might hit verifier_bug()
+ in the backtrack_insn(), e.g. if rY ∈ [r1..r5]
+ and backtrack_insn() spots a function call.
+
+The reproducer program is in the next patch.
+This was hit by sched_ext scx_lavd scheduler code.
+
+Changes in tests:
+- verifier_scalar_ids.c selftests need modification to preserve
+ some registers as live for __msg() checks.
+- exceptions_assert.c adjusted to match changes in the verifier log,
+ R0 is dead after conditional instruction and thus does not get
+ range.
+- precise.c adjusted to match changes in the verifier log, register r9
+ is dead after comparison and it's range is not important for test.
+
+Reported-by: Emil Tsalapatis <emil@etsalapatis.com>
+Fixes: 0fb3cf6110a5 ("bpf: use register liveness information for func_states_equal")
+Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
+Link: https://lore.kernel.org/r/20260306-linked-regs-and-propagate-precision-v1-1-18e859be570d@gmail.com
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 13 ++++-
+ .../selftests/bpf/progs/exceptions_assert.c | 34 +++++------
+ .../selftests/bpf/progs/verifier_scalar_ids.c | 56 ++++++++++++++-----
+ .../testing/selftests/bpf/verifier/precise.c | 8 +--
+ 4 files changed, 73 insertions(+), 38 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index c3b58f5d062b0..b594a065b83c4 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -16895,17 +16895,24 @@ static void __collect_linked_regs(struct linked_regs *reg_set, struct bpf_reg_st
+ * in verifier state, save R in linked_regs if R->id == id.
+ * If there are too many Rs sharing same id, reset id for leftover Rs.
+ */
+-static void collect_linked_regs(struct bpf_verifier_state *vstate, u32 id,
++static void collect_linked_regs(struct bpf_verifier_env *env,
++ struct bpf_verifier_state *vstate,
++ u32 id,
+ struct linked_regs *linked_regs)
+ {
++ struct bpf_insn_aux_data *aux = env->insn_aux_data;
+ struct bpf_func_state *func;
+ struct bpf_reg_state *reg;
++ u16 live_regs;
+ int i, j;
+
+ id = id & ~BPF_ADD_CONST;
+ for (i = vstate->curframe; i >= 0; i--) {
++ live_regs = aux[frame_insn_idx(vstate, i)].live_regs_before;
+ func = vstate->frame[i];
+ for (j = 0; j < BPF_REG_FP; j++) {
++ if (!(live_regs & BIT(j)))
++ continue;
+ reg = &func->regs[j];
+ __collect_linked_regs(linked_regs, reg, id, i, j, true);
+ }
+@@ -17113,9 +17120,9 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
+ * if parent state is created.
+ */
+ if (BPF_SRC(insn->code) == BPF_X && src_reg->type == SCALAR_VALUE && src_reg->id)
+- collect_linked_regs(this_branch, src_reg->id, &linked_regs);
++ collect_linked_regs(env, this_branch, src_reg->id, &linked_regs);
+ if (dst_reg->type == SCALAR_VALUE && dst_reg->id)
+- collect_linked_regs(this_branch, dst_reg->id, &linked_regs);
++ collect_linked_regs(env, this_branch, dst_reg->id, &linked_regs);
+ if (linked_regs.cnt > 1) {
+ err = push_jmp_history(env, this_branch, 0, linked_regs_pack(&linked_regs));
+ if (err)
+diff --git a/tools/testing/selftests/bpf/progs/exceptions_assert.c b/tools/testing/selftests/bpf/progs/exceptions_assert.c
+index a01c2736890f9..858af5988a38a 100644
+--- a/tools/testing/selftests/bpf/progs/exceptions_assert.c
++++ b/tools/testing/selftests/bpf/progs/exceptions_assert.c
+@@ -18,43 +18,43 @@
+ return *(u64 *)num; \
+ }
+
+-__msg(": R0=0xffffffff80000000")
++__msg("R{{.}}=0xffffffff80000000")
+ check_assert(s64, ==, eq_int_min, INT_MIN);
+-__msg(": R0=0x7fffffff")
++__msg("R{{.}}=0x7fffffff")
+ check_assert(s64, ==, eq_int_max, INT_MAX);
+-__msg(": R0=0")
++__msg("R{{.}}=0")
+ check_assert(s64, ==, eq_zero, 0);
+-__msg(": R0=0x8000000000000000 R1=0x8000000000000000")
++__msg("R{{.}}=0x8000000000000000")
+ check_assert(s64, ==, eq_llong_min, LLONG_MIN);
+-__msg(": R0=0x7fffffffffffffff R1=0x7fffffffffffffff")
++__msg("R{{.}}=0x7fffffffffffffff")
+ check_assert(s64, ==, eq_llong_max, LLONG_MAX);
+
+-__msg(": R0=scalar(id=1,smax=0x7ffffffe)")
++__msg("R{{.}}=scalar(id=1,smax=0x7ffffffe)")
+ check_assert(s64, <, lt_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smax=-1,umin=0x8000000000000000,var_off=(0x8000000000000000; 0x7fffffffffffffff))")
+ check_assert(s64, <, lt_zero, 0);
+-__msg(": R0=scalar(id=1,smax=0xffffffff7fffffff")
++__msg("R{{.}}=scalar(id=1,smax=0xffffffff7fffffff")
+ check_assert(s64, <, lt_neg, INT_MIN);
+
+-__msg(": R0=scalar(id=1,smax=0x7fffffff)")
++__msg("R{{.}}=scalar(id=1,smax=0x7fffffff)")
+ check_assert(s64, <=, le_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smax=0)")
++__msg("R{{.}}=scalar(id=1,smax=0)")
+ check_assert(s64, <=, le_zero, 0);
+-__msg(": R0=scalar(id=1,smax=0xffffffff80000000")
++__msg("R{{.}}=scalar(id=1,smax=0xffffffff80000000")
+ check_assert(s64, <=, le_neg, INT_MIN);
+
+-__msg(": R0=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=umin=0x80000000,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >, gt_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=umin=1,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >, gt_zero, 0);
+-__msg(": R0=scalar(id=1,smin=0xffffffff80000001")
++__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000001")
+ check_assert(s64, >, gt_neg, INT_MIN);
+
+-__msg(": R0=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=umin=0x7fffffff,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >=, ge_pos, INT_MAX);
+-__msg(": R0=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
++__msg("R{{.}}=scalar(id=1,smin=0,umax=0x7fffffffffffffff,var_off=(0x0; 0x7fffffffffffffff))")
+ check_assert(s64, >=, ge_zero, 0);
+-__msg(": R0=scalar(id=1,smin=0xffffffff80000000")
++__msg("R{{.}}=scalar(id=1,smin=0xffffffff80000000")
+ check_assert(s64, >=, ge_neg, INT_MIN);
+
+ SEC("?tc")
+diff --git a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+index c0ce690ddb68a..1fdd85b4b8443 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
++++ b/tools/testing/selftests/bpf/progs/verifier_scalar_ids.c
+@@ -40,6 +40,9 @@ __naked void linked_regs_bpf_k(void)
+ */
+ "r3 = r10;"
+ "r3 += r0;"
++ /* Mark r1 and r2 as alive. */
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -73,6 +76,9 @@ __naked void linked_regs_bpf_x_src(void)
+ */
+ "r4 = r10;"
+ "r4 += r0;"
++ /* Mark r1 and r2 as alive. */
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -106,6 +112,10 @@ __naked void linked_regs_bpf_x_dst(void)
+ */
+ "r4 = r10;"
+ "r4 += r3;"
++ /* Mark r1 and r2 as alive. */
++ "r0 = r0;"
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -143,6 +153,9 @@ __naked void linked_regs_broken_link(void)
+ */
+ "r3 = r10;"
+ "r3 += r0;"
++ /* Mark r1 and r2 as alive. */
++ "r1 = r1;"
++ "r2 = r2;"
+ "r0 = 0;"
+ "exit;"
+ :
+@@ -156,16 +169,16 @@ __naked void linked_regs_broken_link(void)
+ */
+ SEC("socket")
+ __success __log_level(2)
+-__msg("12: (0f) r2 += r1")
++__msg("17: (0f) r2 += r1")
+ /* Current state */
+-__msg("frame2: last_idx 12 first_idx 11 subseq_idx -1 ")
+-__msg("frame2: regs=r1 stack= before 11: (bf) r2 = r10")
++__msg("frame2: last_idx 17 first_idx 14 subseq_idx -1 ")
++__msg("frame2: regs=r1 stack= before 16: (bf) r2 = r10")
+ __msg("frame2: parent state regs=r1 stack=")
+ __msg("frame1: parent state regs= stack=")
+ __msg("frame0: parent state regs= stack=")
+ /* Parent state */
+-__msg("frame2: last_idx 10 first_idx 10 subseq_idx 11 ")
+-__msg("frame2: regs=r1 stack= before 10: (25) if r1 > 0x7 goto pc+0")
++__msg("frame2: last_idx 13 first_idx 13 subseq_idx 14 ")
++__msg("frame2: regs=r1 stack= before 13: (25) if r1 > 0x7 goto pc+0")
+ __msg("frame2: parent state regs=r1 stack=")
+ /* frame1.r{6,7} are marked because mark_precise_scalar_ids()
+ * looks for all registers with frame2.r1.id in the current state
+@@ -173,20 +186,20 @@ __msg("frame2: parent state regs=r1 stack=")
+ __msg("frame1: parent state regs=r6,r7 stack=")
+ __msg("frame0: parent state regs=r6 stack=")
+ /* Parent state */
+-__msg("frame2: last_idx 8 first_idx 8 subseq_idx 10")
+-__msg("frame2: regs=r1 stack= before 8: (85) call pc+1")
++__msg("frame2: last_idx 9 first_idx 9 subseq_idx 13")
++__msg("frame2: regs=r1 stack= before 9: (85) call pc+3")
+ /* frame1.r1 is marked because of backtracking of call instruction */
+ __msg("frame1: parent state regs=r1,r6,r7 stack=")
+ __msg("frame0: parent state regs=r6 stack=")
+ /* Parent state */
+-__msg("frame1: last_idx 7 first_idx 6 subseq_idx 8")
+-__msg("frame1: regs=r1,r6,r7 stack= before 7: (bf) r7 = r1")
+-__msg("frame1: regs=r1,r6 stack= before 6: (bf) r6 = r1")
++__msg("frame1: last_idx 8 first_idx 7 subseq_idx 9")
++__msg("frame1: regs=r1,r6,r7 stack= before 8: (bf) r7 = r1")
++__msg("frame1: regs=r1,r6 stack= before 7: (bf) r6 = r1")
+ __msg("frame1: parent state regs=r1 stack=")
+ __msg("frame0: parent state regs=r6 stack=")
+ /* Parent state */
+-__msg("frame1: last_idx 4 first_idx 4 subseq_idx 6")
+-__msg("frame1: regs=r1 stack= before 4: (85) call pc+1")
++__msg("frame1: last_idx 4 first_idx 4 subseq_idx 7")
++__msg("frame1: regs=r1 stack= before 4: (85) call pc+2")
+ __msg("frame0: parent state regs=r1,r6 stack=")
+ /* Parent state */
+ __msg("frame0: last_idx 3 first_idx 1 subseq_idx 4")
+@@ -204,6 +217,7 @@ __naked void precision_many_frames(void)
+ "r1 = r0;"
+ "r6 = r0;"
+ "call precision_many_frames__foo;"
++ "r6 = r6;" /* mark r6 as live */
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+@@ -220,6 +234,8 @@ void precision_many_frames__foo(void)
+ "r6 = r1;"
+ "r7 = r1;"
+ "call precision_many_frames__bar;"
++ "r6 = r6;" /* mark r6 as live */
++ "r7 = r7;" /* mark r7 as live */
+ "exit"
+ ::: __clobber_all);
+ }
+@@ -229,6 +245,8 @@ void precision_many_frames__bar(void)
+ {
+ asm volatile (
+ "if r1 > 7 goto +0;"
++ "r6 = 0;" /* mark r6 as live */
++ "r7 = 0;" /* mark r7 as live */
+ /* force r1 to be precise, this eventually marks:
+ * - bar frame r1
+ * - foo frame r{1,6,7}
+@@ -340,6 +358,8 @@ __naked void precision_two_ids(void)
+ "r3 += r7;"
+ /* force r9 to be precise, this also marks r8 */
+ "r3 += r9;"
++ "r6 = r6;" /* mark r6 as live */
++ "r8 = r8;" /* mark r8 as live */
+ "exit;"
+ :
+ : __imm(bpf_ktime_get_ns)
+@@ -353,7 +373,7 @@ __flag(BPF_F_TEST_STATE_FREQ)
+ * collect_linked_regs() can't tie more than 6 registers for a single insn.
+ */
+ __msg("8: (25) if r0 > 0x7 goto pc+0 ; R0=scalar(id=1")
+-__msg("9: (bf) r6 = r6 ; R6=scalar(id=2")
++__msg("14: (bf) r6 = r6 ; R6=scalar(id=2")
+ /* check that r{0-5} are marked precise after 'if' */
+ __msg("frame0: regs=r0 stack= before 8: (25) if r0 > 0x7 goto pc+0")
+ __msg("frame0: parent state regs=r0,r1,r2,r3,r4,r5 stack=:")
+@@ -372,6 +392,12 @@ __naked void linked_regs_too_many_regs(void)
+ "r6 = r0;"
+ /* propagate range for r{0-6} */
+ "if r0 > 7 goto +0;"
++ /* keep r{1-5} live */
++ "r1 = r1;"
++ "r2 = r2;"
++ "r3 = r3;"
++ "r4 = r4;"
++ "r5 = r5;"
+ /* make r6 appear in the log */
+ "r6 = r6;"
+ /* force r0 to be precise,
+@@ -517,7 +543,7 @@ __naked void check_ids_in_regsafe_2(void)
+ "*(u64*)(r10 - 8) = r1;"
+ /* r9 = pointer to stack */
+ "r9 = r10;"
+- "r9 += -8;"
++ "r9 += -16;"
+ /* r8 = ktime_get_ns() */
+ "call %[bpf_ktime_get_ns];"
+ "r8 = r0;"
+@@ -538,6 +564,8 @@ __naked void check_ids_in_regsafe_2(void)
+ "if r7 > 4 goto l2_%=;"
+ /* Access memory at r9[r6] */
+ "r9 += r6;"
++ "r9 += r7;"
++ "r9 += r8;"
+ "r0 = *(u8*)(r9 + 0);"
+ "l2_%=:"
+ "r0 = 0;"
+diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
+index 59a020c356474..ef3ec56672c22 100644
+--- a/tools/testing/selftests/bpf/verifier/precise.c
++++ b/tools/testing/selftests/bpf/verifier/precise.c
+@@ -44,9 +44,9 @@
+ mark_precise: frame0: regs=r2 stack= before 23\
+ mark_precise: frame0: regs=r2 stack= before 22\
+ mark_precise: frame0: regs=r2 stack= before 20\
+- mark_precise: frame0: parent state regs=r2,r9 stack=:\
++ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 10\
+- mark_precise: frame0: regs=r2,r9 stack= before 19\
++ mark_precise: frame0: regs=r2 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: regs=r0,r9 stack= before 15\
+@@ -107,9 +107,9 @@
+ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 20 first_idx 20\
+ mark_precise: frame0: regs=r2 stack= before 20\
+- mark_precise: frame0: parent state regs=r2,r9 stack=:\
++ mark_precise: frame0: parent state regs=r2 stack=:\
+ mark_precise: frame0: last_idx 19 first_idx 17\
+- mark_precise: frame0: regs=r2,r9 stack= before 19\
++ mark_precise: frame0: regs=r2 stack= before 19\
+ mark_precise: frame0: regs=r9 stack= before 18\
+ mark_precise: frame0: regs=r8,r9 stack= before 17\
+ mark_precise: frame0: parent state regs= stack=:",
+--
+2.51.0
+
--- /dev/null
+From 696c272881e7f0be9d200c62d61583f35fbb5eaa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 17:52:17 +0800
+Subject: bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim
+
+From: Lang Xu <xulang@uniontech.com>
+
+[ Upstream commit 56145d237385ca0e7ca9ff7b226aaf2eb8ef368b ]
+
+The root cause of this bug is that when 'bpf_link_put' reduces the
+refcount of 'shim_link->link.link' to zero, the resource is considered
+released but may still be referenced via 'tr->progs_hlist' in
+'cgroup_shim_find'. The actual cleanup of 'tr->progs_hlist' in
+'bpf_shim_tramp_link_release' is deferred. During this window, another
+process can cause a use-after-free via 'bpf_trampoline_link_cgroup_shim'.
+
+Based on Martin KaFai Lau's suggestions, I have created a simple patch.
+
+To fix this:
+ Add an atomic non-zero check in 'bpf_trampoline_link_cgroup_shim'.
+ Only increment the refcount if it is not already zero.
+
+Testing:
+ I verified the fix by adding a delay in
+ 'bpf_shim_tramp_link_release' to make the bug easier to trigger:
+
+static void bpf_shim_tramp_link_release(struct bpf_link *link)
+{
+ /* ... */
+ if (!shim_link->trampoline)
+ return;
+
++ msleep(100);
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link,
+ shim_link->trampoline, NULL));
+ bpf_trampoline_put(shim_link->trampoline);
+}
+
+Before the patch, running a PoC easily reproduced the crash(almost 100%)
+with a call trace similar to KaiyanM's report.
+After the patch, the bug no longer occurs even after millions of
+iterations.
+
+Fixes: 69fd337a975c ("bpf: per-cgroup lsm flavor")
+Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
+Closes: https://lore.kernel.org/bpf/3c4ebb0b.46ff8.19abab8abe2.Coremail.kaiyanm@hust.edu.cn/
+Signed-off-by: Lang Xu <xulang@uniontech.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/279EEE1BA1DDB49D+20260303095217.34436-1-xulang@uniontech.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/trampoline.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index b9a358d7a78f1..47c70eb451f3a 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -796,10 +796,8 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ mutex_lock(&tr->mutex);
+
+ shim_link = cgroup_shim_find(tr, bpf_func);
+- if (shim_link) {
++ if (shim_link && !IS_ERR(bpf_link_inc_not_zero(&shim_link->link.link))) {
+ /* Reusing existing shim attached by the other program. */
+- bpf_link_inc(&shim_link->link.link);
+-
+ mutex_unlock(&tr->mutex);
+ bpf_trampoline_put(tr); /* bpf_trampoline_get above */
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 78c00694d0bb220295ce301691cd40d4b98bdeae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 16:39:55 +0200
+Subject: bridge: Check relevant per-VLAN options in VLAN range grouping
+
+From: Danielle Ratson <danieller@nvidia.com>
+
+[ Upstream commit 93c9475c04acad2457a7e7ea4e3ec40a6e6d94a7 ]
+
+The br_vlan_opts_eq_range() function determines if consecutive VLANs can
+be grouped together in a range for compact netlink notifications. It
+currently checks state, tunnel info, and multicast router configuration,
+but misses two categories of per-VLAN options that affect the output:
+1. User-visible priv_flags (neigh_suppress, mcast_enabled)
+2. Port multicast context (mcast_max_groups, mcast_n_groups)
+
+When VLANs have different settings for these options, they are incorrectly
+grouped into ranges, causing netlink notifications to report only one
+VLAN's settings for the entire range.
+
+Fix by checking priv_flags equality, but only for flags that affect netlink
+output (BR_VLFLAG_NEIGH_SUPPRESS_ENABLED and BR_VLFLAG_MCAST_ENABLED),
+and comparing multicast context (mcast_max_groups and mcast_n_groups).
+
+Example showing the bugs before the fix:
+
+$ bridge vlan set vid 10 dev dummy1 neigh_suppress on
+$ bridge vlan set vid 11 dev dummy1 neigh_suppress off
+$ bridge -d vlan show dev dummy1
+ port vlan-id
+ dummy1 10-11
+ ... neigh_suppress on
+
+$ bridge vlan set vid 10 dev dummy1 mcast_max_groups 100
+$ bridge vlan set vid 11 dev dummy1 mcast_max_groups 200
+$ bridge -d vlan show dev dummy1
+ port vlan-id
+ dummy1 10-11
+ ... mcast_max_groups 100
+
+After the fix, VLANs 10 and 11 are shown as separate entries with their
+correct individual settings.
+
+Fixes: a1aee20d5db2 ("net: bridge: Add netlink knobs for number / maximum MDB entries")
+Fixes: 83f6d600796c ("bridge: vlan: Allow setting VLAN neighbor suppression state")
+Signed-off-by: Danielle Ratson <danieller@nvidia.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260225143956.3995415-2-danieller@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_private.h | 10 ++++++++++
+ net/bridge/br_vlan_options.c | 26 +++++++++++++++++++++++---
+ 2 files changed, 33 insertions(+), 3 deletions(-)
+
+diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
+index b9b2981c48414..9b55d38ea9edb 100644
+--- a/net/bridge/br_private.h
++++ b/net/bridge/br_private.h
+@@ -1344,6 +1344,16 @@ br_multicast_ctx_options_equal(const struct net_bridge_mcast *brmctx1,
+ true;
+ }
+
++static inline bool
++br_multicast_port_ctx_options_equal(const struct net_bridge_mcast_port *pmctx1,
++ const struct net_bridge_mcast_port *pmctx2)
++{
++ return br_multicast_ngroups_get(pmctx1) ==
++ br_multicast_ngroups_get(pmctx2) &&
++ br_multicast_ngroups_get_max(pmctx1) ==
++ br_multicast_ngroups_get_max(pmctx2);
++}
++
+ static inline bool
+ br_multicast_ctx_matches_vlan_snooping(const struct net_bridge_mcast *brmctx)
+ {
+diff --git a/net/bridge/br_vlan_options.c b/net/bridge/br_vlan_options.c
+index 8fa89b04ee942..5514e1fc8d1fa 100644
+--- a/net/bridge/br_vlan_options.c
++++ b/net/bridge/br_vlan_options.c
+@@ -43,9 +43,29 @@ bool br_vlan_opts_eq_range(const struct net_bridge_vlan *v_curr,
+ u8 range_mc_rtr = br_vlan_multicast_router(range_end);
+ u8 curr_mc_rtr = br_vlan_multicast_router(v_curr);
+
+- return v_curr->state == range_end->state &&
+- __vlan_tun_can_enter_range(v_curr, range_end) &&
+- curr_mc_rtr == range_mc_rtr;
++ if (v_curr->state != range_end->state)
++ return false;
++
++ if (!__vlan_tun_can_enter_range(v_curr, range_end))
++ return false;
++
++ if (curr_mc_rtr != range_mc_rtr)
++ return false;
++
++ /* Check user-visible priv_flags that affect output */
++ if ((v_curr->priv_flags ^ range_end->priv_flags) &
++ (BR_VLFLAG_NEIGH_SUPPRESS_ENABLED | BR_VLFLAG_MCAST_ENABLED))
++ return false;
++
++#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
++ if (!br_vlan_is_master(v_curr) &&
++ !br_multicast_port_ctx_vlan_disabled(&v_curr->port_mcast_ctx) &&
++ !br_multicast_port_ctx_options_equal(&v_curr->port_mcast_ctx,
++ &range_end->port_mcast_ctx))
++ return false;
++#endif
++
++ return true;
+ }
+
+ bool br_vlan_opts_fill(struct sk_buff *skb, const struct net_bridge_vlan *v,
+--
+2.51.0
+
--- /dev/null
+From 19a62be6e4bb3e14a4d2c65c19b9d9623e043930 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 11:58:06 +0100
+Subject: can: bcm: fix locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c35636e91e392e1540949bbc67932167cb48bc3a ]
+
+Commit c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+added a locking for some variables that can be modified at runtime when
+updating the sending bcm_op with a new TX_SETUP command in bcm_tx_setup().
+
+Usually the RX_SETUP only handles and filters incoming traffic with one
+exception: When the RX_RTR_FRAME flag is set a predefined CAN frame is
+sent when a specific RTR frame is received. Therefore the rx bcm_op uses
+bcm_can_tx() which uses the bcm_tx_lock that was only initialized in
+bcm_tx_setup(). Add the missing spin_lock_init() when allocating the
+bcm_op in bcm_rx_setup() to handle the RTR case properly.
+
+Fixes: c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+Reported-by: syzbot+5b11eccc403dd1cea9f8@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-can/699466e4.a70a0220.2c38d7.00ff.GAE@google.com/
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20260218-bcm_spin_lock_init-v1-1-592634c8a5b5@hartkopp.net
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 7eba8ae01a5b1..ba65e6e8a923a 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1170,6 +1170,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->nframes = msg_head->nframes;
+ op->cfsiz = CFSIZ(msg_head->flags);
+--
+2.51.0
+
--- /dev/null
+From 4357dfb07f21075cfe9aa7a56d87013333ae4b82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 11:45:40 +0100
+Subject: can: dummy_can: dummy_can_init(): fix packet statistics
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c77bfbdd6aac31b152ee81522cd90ad1de18738f ]
+
+The former implementation was only counting the tx_packets value but not
+the tx_bytes as the skb was dropped on driver layer.
+
+Enable CAN echo support (IFF_ECHO) in dummy_can_init(), which activates the
+code for setting and retrieving the echo SKB and counts the tx_bytes
+correctly.
+
+Fixes: 816cf430e84b ("can: add dummy_can driver")
+Cc: Vincent Mailhol <mailhol@kernel.org>
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Reviewed-by: Vincent Mailhol <mailhol@kernel.org>
+Link: https://patch.msgid.link/20260126104540.21024-1-socketcan@hartkopp.net
+[mkl: make commit message imperative]
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/dummy_can.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/can/dummy_can.c b/drivers/net/can/dummy_can.c
+index 41953655e3d3c..cd23de488edce 100644
+--- a/drivers/net/can/dummy_can.c
++++ b/drivers/net/can/dummy_can.c
+@@ -241,6 +241,7 @@ static int __init dummy_can_init(void)
+
+ dev->netdev_ops = &dummy_can_netdev_ops;
+ dev->ethtool_ops = &dummy_can_ethtool_ops;
++ dev->flags |= IFF_ECHO; /* enable echo handling */
+ priv = netdev_priv(dev);
+ priv->can.bittiming_const = &dummy_can_bittiming_const;
+ priv->can.bitrate_max = 20 * MEGA /* BPS */;
+--
+2.51.0
+
--- /dev/null
+From 11983ccfe65d597e5d0c2980e599f2a80941c386 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:47:05 +0100
+Subject: can: mcp251x: fix deadlock in error path of mcp251x_open
+
+From: Alban Bedel <alban.bedel@lht.dlh.de>
+
+[ Upstream commit ab3f894de216f4a62adc3b57e9191888cbf26885 ]
+
+The mcp251x_open() function call free_irq() in its error path with the
+mpc_lock mutex held. But if an interrupt already occurred the
+interrupt handler will be waiting for the mpc_lock and free_irq() will
+deadlock waiting for the handler to finish.
+
+This issue is similar to the one fixed in commit 7dd9c26bd6cf ("can:
+mcp251x: fix deadlock if an interrupt occurs during mcp251x_open") but
+for the error path.
+
+To solve this issue move the call to free_irq() after the lock is
+released. Setting `priv->force_quit = 1` beforehand ensure that the IRQ
+handler will exit right away once it acquired the lock.
+
+Signed-off-by: Alban Bedel <alban.bedel@lht.dlh.de>
+Link: https://patch.msgid.link/20260209144706.2261954-1-alban.bedel@lht.dlh.de
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index fa97adf25b734..bb7782582f401 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1214,6 +1214,7 @@ static int mcp251x_open(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
++ bool release_irq = false;
+ unsigned long flags = 0;
+ int ret;
+
+@@ -1257,12 +1258,24 @@ static int mcp251x_open(struct net_device *net)
+ return 0;
+
+ out_free_irq:
+- free_irq(spi->irq, priv);
++ /* The IRQ handler might be running, and if so it will be waiting
++ * for the lock. But free_irq() must wait for the handler to finish
++ * so calling it here would deadlock.
++ *
++ * Setting priv->force_quit will let the handler exit right away
++ * without any access to the hardware. This make it safe to call
++ * free_irq() after the lock is released.
++ */
++ priv->force_quit = 1;
++ release_irq = true;
++
+ mcp251x_hw_sleep(spi);
+ out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
++ if (release_irq)
++ free_irq(spi->irq, priv);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f8c45cde5835fdebc845ebb843b58c89842859dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 13:30:42 +0300
+Subject: crypto: ccp - Fix use-after-free on error path
+
+From: Alper Ak <alperyasinak1@gmail.com>
+
+[ Upstream commit 889b0e2721e793eb46cf7d17b965aa3252af3ec8 ]
+
+In the error path of sev_tsm_init_locked(), the code dereferences 't'
+after it has been freed with kfree(). The pr_err() statement attempts
+to access t->tio_en and t->tio_init_done after the memory has been
+released.
+
+Move the pr_err() call before kfree(t) to access the fields while the
+memory is still valid.
+
+This issue reported by Smatch static analyser
+
+Fixes:4be423572da1 ("crypto/ccp: Implement SEV-TIO PCIe IDE (phase1)")
+Signed-off-by: Alper Ak <alperyasinak1@gmail.com>
+Acked-by: Tom Lendacky <thomas.lendacky@amd.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/ccp/sev-dev-tsm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c
+index 40d02adaf3f6d..7ad7e7a413c0f 100644
+--- a/drivers/crypto/ccp/sev-dev-tsm.c
++++ b/drivers/crypto/ccp/sev-dev-tsm.c
+@@ -378,9 +378,9 @@ void sev_tsm_init_locked(struct sev_device *sev, void *tio_status_page)
+ return;
+
+ error_exit:
+- kfree(t);
+ pr_err("Failed to enable SEV-TIO: ret=%d en=%d initdone=%d SEV=%d\n",
+ ret, t->tio_en, t->tio_init_done, boot_cpu_has(X86_FEATURE_SEV));
++ kfree(t);
+ }
+
+ void sev_tsm_uninit(struct sev_device *sev)
+--
+2.51.0
+
--- /dev/null
+From f3abe2852fa8afef180916a5ccb568071449557d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:58:12 -0800
+Subject: dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ
+ handler
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 74badb9c20b1a9c02a95c735c6d3cd6121679c93 ]
+
+Commit 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ
+handler") introduces a range check for if_id to avoid an out-of-bounds
+access. If an out-of-bounds if_id is detected, the interrupt status is
+not cleared. This may result in an interrupt storm.
+
+Clear the interrupt status after detecting an out-of-bounds if_id to avoid
+the problem.
+
+Found by an experimental AI code review agent at Google.
+
+Fixes: 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ handler")
+Cc: Junrui Luo <moonafterrain@outlook.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20260227055812.1777915-1-linux@roeck-us.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 78e21b46a5ba8..e212a014c8d41 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1533,7 +1533,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ if_id = (status & 0xFFFF0000) >> 16;
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id);
+- goto out;
++ goto out_clear;
+ }
+ port_priv = ethsw->ports[if_id];
+
+@@ -1553,6 +1553,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ dpaa2_switch_port_connect_mac(port_priv);
+ }
+
++out_clear:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+--
+2.51.0
+
--- /dev/null
+From d7ac67a6a5870a64a946b4454f656cf62bd45ee3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 12:30:38 -0700
+Subject: drm/amd/display: Enable DEGAMMA and reject COLOR_PIPELINE+DEGAMMA_LUT
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit a4fa2355e0add57253468ef13bd08f11285f3b6e ]
+
+[WHAT]
+Create DEGAMMA properties even if color pipeline is enabled, and enforce
+the mutual exclusion in atomic check by rejecting any commit that
+attempts to enable both COLOR_PIPELINE on the plane and DEGAMMA_LUT on
+the CRTC simultaneously.
+
+Fixes: 18a4127e9315 ("drm/amd/display: Disable CRTC degamma when color pipeline is enabled")
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4963
+Reviewed-by: Melissa Wen <mwen@igalia.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 196a6aa727f1f15eb54dda5e60a41543ea9397ee)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c | 16 ++++++++--------
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c | 8 ++++++++
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+index 9fcd72d87d25b..39fcbc3e702dc 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c
+@@ -765,15 +765,15 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
+ dm->adev->mode_info.crtcs[crtc_index] = acrtc;
+
+ /* Don't enable DRM CRTC degamma property for
+- * 1. Degamma is replaced by color pipeline.
+- * 2. DCE since it doesn't support programmable degamma anywhere.
+- * 3. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
++ * 1. DCE since it doesn't support programmable degamma anywhere.
++ * 2. DCN401 since pre-blending degamma LUT doesn't apply to cursor.
++ * Note: DEGAMMA properties are created even if the primary plane has the
++ * COLOR_PIPELINE property. User space can use either the DEGAMMA properties
++ * or the COLOR_PIPELINE property. An atomic commit which attempts to enable
++ * both is rejected.
+ */
+- if (plane->color_pipeline_property)
+- has_degamma = false;
+- else
+- has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
+- dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
++ has_degamma = dm->adev->dm.dc->caps.color.dpp.dcn_arch &&
++ dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01;
+
+ drm_crtc_enable_color_mgmt(&acrtc->base, has_degamma ? MAX_COLOR_LUT_ENTRIES : 0,
+ true, MAX_COLOR_LUT_ENTRIES);
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+index 7474f1bc1d0b8..44b9c8ca6d719 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+@@ -1256,6 +1256,14 @@ static int amdgpu_dm_plane_atomic_check(struct drm_plane *plane,
+ if (ret)
+ return ret;
+
++ /* Reject commits that attempt to use both COLOR_PIPELINE and CRTC DEGAMMA_LUT */
++ if (new_plane_state->color_pipeline && new_crtc_state->degamma_lut) {
++ drm_dbg_atomic(plane->dev,
++ "[PLANE:%d:%s] COLOR_PIPELINE and CRTC DEGAMMA_LUT cannot be enabled simultaneously\n",
++ plane->base.id, plane->name);
++ return -EINVAL;
++ }
++
+ ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info);
+ if (ret)
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From 6bac23eb9ee55991999f74a700df7ea1d9c92ec3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 12:26:04 -0700
+Subject: drm/amd/display: Use mpc.preblend flag to indicate 3D LUT
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit c28b3ec3ca034fd1abc832fef46ce36eb13f8fad ]
+
+[WHAT]
+New ASIC's 3D LUT is indicated by mpc.preblend.
+
+Fixes: 0de2b1afea8d ("drm/amd/display: add 3D LUT colorop")
+Reviewed-by: Melissa Wen <mwen@igalia.com>
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 43175f6164d32cb96362d16e357689f74298145c)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 6 ++++--
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c | 3 ++-
+ 2 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+index 20a76d81d532d..12c52bffe9964 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+@@ -1706,6 +1706,7 @@ __set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
+ struct dc_transfer_func *tf = &dc_plane_state->in_shaper_func;
+ struct drm_atomic_state *state = plane_state->state;
+ const struct amdgpu_device *adev = drm_to_adev(colorop->dev);
++ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
+ const struct drm_device *dev = colorop->dev;
+ const struct drm_color_lut32 *lut3d;
+ uint32_t lut3d_size;
+@@ -1722,7 +1723,7 @@ __set_dm_plane_colorop_3dlut(struct drm_plane_state *plane_state,
+ }
+
+ if (colorop_state && !colorop_state->bypass && colorop->type == DRM_COLOROP_3D_LUT) {
+- if (!adev->dm.dc->caps.color.dpp.hw_3d_lut) {
++ if (!has_3dlut) {
+ drm_dbg(dev, "3D LUT is not supported by hardware\n");
+ return -EINVAL;
+ }
+@@ -1875,6 +1876,7 @@ amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
+ struct drm_colorop *colorop = plane_state->color_pipeline;
+ struct drm_device *dev = plane_state->plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
++ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
+ int ret;
+
+ /* 1D Curve - DEGAM TF */
+@@ -1907,7 +1909,7 @@ amdgpu_dm_plane_set_colorop_properties(struct drm_plane_state *plane_state,
+ if (ret)
+ return ret;
+
+- if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
++ if (has_3dlut) {
+ /* 1D Curve & LUT - SHAPER TF & LUT */
+ colorop = colorop->next;
+ if (!colorop) {
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
+index a2de3bba83464..cc124ab6aa7f7 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_colorop.c
+@@ -60,6 +60,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
+ struct drm_colorop *ops[MAX_COLOR_PIPELINE_OPS];
+ struct drm_device *dev = plane->dev;
+ struct amdgpu_device *adev = drm_to_adev(dev);
++ bool has_3dlut = adev->dm.dc->caps.color.dpp.hw_3d_lut || adev->dm.dc->caps.color.mpc.preblend;
+ int ret;
+ int i = 0;
+
+@@ -112,7 +113,7 @@ int amdgpu_dm_initialize_default_pipeline(struct drm_plane *plane, struct drm_pr
+
+ i++;
+
+- if (adev->dm.dc->caps.color.dpp.hw_3d_lut) {
++ if (has_3dlut) {
+ /* 1D curve - SHAPER TF */
+ ops[i] = kzalloc(sizeof(*ops[0]), GFP_KERNEL);
+ if (!ops[i]) {
+--
+2.51.0
+
--- /dev/null
+From f2dd96089786b4aaaf3cdc698251a1ca4acfbbca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 16:24:52 +0800
+Subject: drm/sched: Fix kernel-doc warning for drm_sched_job_done()
+
+From: Yujie Liu <yujie.liu@intel.com>
+
+[ Upstream commit 61ded1083b264ff67ca8c2de822c66b6febaf9a8 ]
+
+There is a kernel-doc warning for the scheduler:
+
+Warning: drivers/gpu/drm/scheduler/sched_main.c:367 function parameter 'result' not described in 'drm_sched_job_done'
+
+Fix the warning by describing the undocumented error code.
+
+Fixes: 539f9ee4b52a ("drm/scheduler: properly forward fence errors")
+Signed-off-by: Yujie Liu <yujie.liu@intel.com>
+[phasta: Flesh out commit message]
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Link: https://patch.msgid.link/20260227082452.1802922-1-yujie.liu@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/scheduler/sched_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 1d4f1b822e7b7..2d70c06113cfe 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -361,6 +361,7 @@ static void drm_sched_run_free_queue(struct drm_gpu_scheduler *sched)
+ /**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
++ * @result: 0 on success, -ERRNO on error
+ *
+ * Finish the job's fence and resubmit the work items.
+ */
+--
+2.51.0
+
--- /dev/null
+From dffbf2057cdee1eb362a98785b5066400ece9792 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:09:32 +0100
+Subject: drm/solomon: Fix page start when updating rectangle in page
+ addressing mode
+
+From: Francesco Lavra <flavra@baylibre.com>
+
+[ Upstream commit 36d9579fed6c9429aa172f77bd28c58696ce8e2b ]
+
+In page addressing mode, the pixel values of a dirty rectangle must be sent
+to the display controller one page at a time. The range of pages
+corresponding to a given rectangle is being incorrectly calculated as if
+the Y value of the top left coordinate of the rectangle was 0. This can
+result in rectangle updates being displayed on wrong parts of the screen.
+
+Fix the above issue by consolidating the start page calculation in a single
+place at the beginning of the update_rect function, and using the
+calculated value for all addressing modes.
+
+Fixes: b0daaa5cfaa5 ("drm/ssd130x: Support page addressing mode")
+Signed-off-by: Francesco Lavra <flavra@baylibre.com>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Link: https://patch.msgid.link/20260210180932.736502-1-flavra@baylibre.com
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/solomon/ssd130x.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index 96cf393201372..33ceed86ed362 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -737,6 +737,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ unsigned int height = drm_rect_height(rect);
+ unsigned int line_length = DIV_ROUND_UP(width, 8);
+ unsigned int page_height = SSD130X_PAGE_HEIGHT;
++ u8 page_start = ssd130x->page_offset + y / page_height;
+ unsigned int pages = DIV_ROUND_UP(height, page_height);
+ struct drm_device *drm = &ssd130x->drm;
+ u32 array_idx = 0;
+@@ -774,14 +775,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+
+ if (!ssd130x->page_address_mode) {
+- u8 page_start;
+-
+ /* Set address range for horizontal addressing mode */
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ return ret;
+
+- page_start = ssd130x->page_offset + y / page_height;
+ ret = ssd130x_set_page_range(ssd130x, page_start, pages);
+ if (ret < 0)
+ return ret;
+@@ -813,7 +811,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+ if (ssd130x->page_address_mode) {
+ ret = ssd130x_set_page_pos(ssd130x,
+- ssd130x->page_offset + i,
++ page_start + i,
+ ssd130x->col_offset + x);
+ if (ret < 0)
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From b1d73ed27e2f8e7ecf46b90ef5c031c9e4b1a2c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 13:34:42 +0100
+Subject: drm/syncobj: Fix handle <-> fd ioctls with dirty stack
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Julian Orth <ju.orth@gmail.com>
+
+[ Upstream commit 2e3649e237237258a08d75afef96648dd2b379f7 ]
+
+Consider the following application:
+
+ #include <fcntl.h>
+ #include <string.h>
+ #include <drm/drm.h>
+ #include <sys/ioctl.h>
+
+ int main(void) {
+ int fd = open("/dev/dri/renderD128", O_RDWR);
+ struct drm_syncobj_create arg1;
+ ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &arg1);
+ struct drm_syncobj_handle arg2;
+ memset(&arg2, 1, sizeof(arg2)); // simulate dirty stack
+ arg2.handle = arg1.handle;
+ arg2.flags = 0;
+ arg2.fd = 0;
+ arg2.pad = 0;
+ // arg2.point = 0; // userspace is required to set point to 0
+ ioctl(fd, DRM_IOCTL_SYNCOBJ_HANDLE_TO_FD, &arg2);
+ }
+
+The last ioctl returns EINVAL because args->point is not 0. However,
+userspace developed against older kernel versions is not aware of the
+new point field and might therefore not initialize it.
+
+The correct check would be
+
+ if (args->flags & DRM_SYNCOBJ_FD_TO_HANDLE_FLAGS_TIMELINE)
+ return -EINVAL;
+
+However, there might already be userspace that relies on this not
+returning an error as long as point == 0. Therefore use the more lenient
+check.
+
+Fixes: c2d3a7300695 ("drm/syncobj: Extend EXPORT_SYNC_FILE for timeline syncobjs")
+Signed-off-by: Julian Orth <ju.orth@gmail.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://lore.kernel.org/r/20260301-point-v1-1-21fc5fd98614@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/drm_syncobj.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
+index e1b0fa4000cdd..7eb2cdbc574a0 100644
+--- a/drivers/gpu/drm/drm_syncobj.c
++++ b/drivers/gpu/drm/drm_syncobj.c
+@@ -900,7 +900,7 @@ drm_syncobj_handle_to_fd_ioctl(struct drm_device *dev, void *data,
+ return drm_syncobj_export_sync_file(file_private, args->handle,
+ point, &args->fd);
+
+- if (args->point)
++ if (point)
+ return -EINVAL;
+
+ return drm_syncobj_handle_to_fd(file_private, args->handle,
+@@ -934,7 +934,7 @@ drm_syncobj_fd_to_handle_ioctl(struct drm_device *dev, void *data,
+ args->handle,
+ point);
+
+- if (args->point)
++ if (point)
+ return -EINVAL;
+
+ return drm_syncobj_fd_to_handle(file_private, args->fd,
+--
+2.51.0
+
--- /dev/null
+From a417f3594d66e6939f44272d201789d7f92d085b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 01:34:49 +0000
+Subject: drm/xe/configfs: Free ctx_restore_mid_bb in release
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit e377182f0266f46f02d01838e6bde67b9dac0d66 ]
+
+ctx_restore_mid_bb memory is allocated in wa_bb_store(), but
+xe_config_device_release() only frees ctx_restore_post_bb.
+
+Free ctx_restore_mid_bb[0].cs as well to avoid leaking the allocation
+when the configfs device is removed.
+
+Fixes: b30d5de3d40c ("drm/xe/configfs: Add mid context restore bb")
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Nitin Gote <nitin.r.gote@intel.com>
+Link: https://patch.msgid.link/20260225013448.3547687-2-shuicheng.lin@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit a235e7d0098337c3f2d1e8f3610c719a589e115f)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_configfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c
+index 82edd04660055..4afdfd69899aa 100644
+--- a/drivers/gpu/drm/xe/xe_configfs.c
++++ b/drivers/gpu/drm/xe/xe_configfs.c
+@@ -830,6 +830,7 @@ static void xe_config_device_release(struct config_item *item)
+
+ mutex_destroy(&dev->lock);
+
++ kfree(dev->config.ctx_restore_mid_bb[0].cs);
+ kfree(dev->config.ctx_restore_post_bb[0].cs);
+ kfree(dev);
+ }
+--
+2.51.0
+
--- /dev/null
+From bcfdf0590ed0eba3886325866c8113e1eb355097 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Jan 2026 16:45:46 -0800
+Subject: drm/xe: Do not preempt fence signaling CS instructions
+
+From: Matthew Brost <matthew.brost@intel.com>
+
+[ Upstream commit cdc8a1e11f4d5b480ec750e28010c357185b95a6 ]
+
+If a batch buffer is complete, it makes little sense to preempt the
+fence signaling instructions in the ring, as the largest portion of the
+work (the batch buffer) is already done and fence signaling consists of
+only a few instructions. If these instructions are preempted, the GuC
+would need to perform a context switch just to signal the fence, which
+is costly and delays fence signaling. Avoid this scenario by disabling
+preemption immediately after the BB start instruction and re-enabling it
+after executing the fence signaling instructions.
+
+Fixes: dd08ebf6c352 ("drm/xe: Introduce a new DRM driver for Intel GPUs")
+Cc: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Cc: Carlos Santa <carlos.santa@intel.com>
+Signed-off-by: Matthew Brost <matthew.brost@intel.com>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Link: https://patch.msgid.link/20260115004546.58060-1-matthew.brost@intel.com
+(cherry picked from commit 2bcbf2dcde0c839a73af664a3c77d4e77d58a3eb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_ring_ops.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_ring_ops.c b/drivers/gpu/drm/xe/xe_ring_ops.c
+index ac0c6dcffe156..803c652f5af91 100644
+--- a/drivers/gpu/drm/xe/xe_ring_ops.c
++++ b/drivers/gpu/drm/xe/xe_ring_ops.c
+@@ -267,6 +267,9 @@ static void __emit_job_gen12_simple(struct xe_sched_job *job, struct xe_lrc *lrc
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+@@ -332,6 +335,9 @@ static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ if (job->user_fence.used) {
+ i = emit_flush_dw(dw, i);
+ i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
+@@ -384,6 +390,9 @@ static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
+
+ i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
+
++ /* Don't preempt fence signaling */
++ dw[i++] = MI_ARB_ON_OFF | MI_ARB_DISABLE;
++
+ i = emit_render_cache_flush(job, dw, i);
+
+ if (job->user_fence.used)
+--
+2.51.0
+
--- /dev/null
+From 11f4c62d37ed6514c1de81bfc579ccba99f2c1c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Feb 2026 17:53:08 -0500
+Subject: drm/xe/gsc: Fix GSC proxy cleanup on early initialization failure
+
+From: Zhanjun Dong <zhanjun.dong@intel.com>
+
+[ Upstream commit b3368ecca9538b88ddf982ea99064860fd5add97 ]
+
+xe_gsc_proxy_remove undoes what is done in both xe_gsc_proxy_init and
+xe_gsc_proxy_start; however, if we fail between those 2 calls, it is
+possible that the HW forcewake access hasn't been initialized yet and so
+we hit errors when the cleanup code tries to write GSC register. To
+avoid that, split the cleanup in 2 functions so that the HW cleanup is
+only called if the HW setup was completed successfully.
+
+Since the HW cleanup (interrupt disabling) is now removed from
+xe_gsc_proxy_remove, the cleanup on error paths in xe_gsc_proxy_start
+must be updated to disable interrupts before returning.
+
+Fixes: ff6cd29b690b ("drm/xe: Cleanup unwind of gt initialization")
+Signed-off-by: Zhanjun Dong <zhanjun.dong@intel.com>
+Reviewed-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Signed-off-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
+Link: https://patch.msgid.link/20260220225308.101469-1-zhanjun.dong@intel.com
+(cherry picked from commit 2b37c401b265c07b46408b5cb36a4b757c9b5060)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_gsc_proxy.c | 43 +++++++++++++++++++++++++------
+ drivers/gpu/drm/xe/xe_gsc_types.h | 2 ++
+ 2 files changed, 37 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_gsc_proxy.c b/drivers/gpu/drm/xe/xe_gsc_proxy.c
+index 464282a89eef3..a6f6f0ea56526 100644
+--- a/drivers/gpu/drm/xe/xe_gsc_proxy.c
++++ b/drivers/gpu/drm/xe/xe_gsc_proxy.c
+@@ -435,16 +435,12 @@ static int proxy_channel_alloc(struct xe_gsc *gsc)
+ return 0;
+ }
+
+-static void xe_gsc_proxy_remove(void *arg)
++static void xe_gsc_proxy_stop(struct xe_gsc *gsc)
+ {
+- struct xe_gsc *gsc = arg;
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int fw_ref = 0;
+
+- if (!gsc->proxy.component_added)
+- return;
+-
+ /* disable HECI2 IRQs */
+ xe_pm_runtime_get(xe);
+ fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+@@ -458,6 +454,30 @@ static void xe_gsc_proxy_remove(void *arg)
+ xe_pm_runtime_put(xe);
+
+ xe_gsc_wait_for_worker_completion(gsc);
++ gsc->proxy.started = false;
++}
++
++static void xe_gsc_proxy_remove(void *arg)
++{
++ struct xe_gsc *gsc = arg;
++ struct xe_gt *gt = gsc_to_gt(gsc);
++ struct xe_device *xe = gt_to_xe(gt);
++
++ if (!gsc->proxy.component_added)
++ return;
++
++ /*
++ * GSC proxy start is an async process that can be ongoing during
++ * Xe module load/unload. Using devm managed action to register
++ * xe_gsc_proxy_stop could cause issues if Xe module unload has
++ * already started when the action is registered, potentially leading
++ * to the cleanup being called at the wrong time. Therefore, instead
++ * of registering a separate devm action to undo what is done in
++ * proxy start, we call it from here, but only if the start has
++ * completed successfully (tracked with the 'started' flag).
++ */
++ if (gsc->proxy.started)
++ xe_gsc_proxy_stop(gsc);
+
+ component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
+ gsc->proxy.component_added = false;
+@@ -513,6 +533,7 @@ int xe_gsc_proxy_init(struct xe_gsc *gsc)
+ */
+ int xe_gsc_proxy_start(struct xe_gsc *gsc)
+ {
++ struct xe_gt *gt = gsc_to_gt(gsc);
+ int err;
+
+ /* enable the proxy interrupt in the GSC shim layer */
+@@ -524,12 +545,18 @@ int xe_gsc_proxy_start(struct xe_gsc *gsc)
+ */
+ err = xe_gsc_proxy_request_handler(gsc);
+ if (err)
+- return err;
++ goto err_irq_disable;
+
+ if (!xe_gsc_proxy_init_done(gsc)) {
+- xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
+- return -EIO;
++ xe_gt_err(gt, "GSC FW reports proxy init not completed\n");
++ err = -EIO;
++ goto err_irq_disable;
+ }
+
++ gsc->proxy.started = true;
+ return 0;
++
++err_irq_disable:
++ gsc_proxy_irq_toggle(gsc, false);
++ return err;
+ }
+diff --git a/drivers/gpu/drm/xe/xe_gsc_types.h b/drivers/gpu/drm/xe/xe_gsc_types.h
+index 97c056656df05..5aaa2a75861fd 100644
+--- a/drivers/gpu/drm/xe/xe_gsc_types.h
++++ b/drivers/gpu/drm/xe/xe_gsc_types.h
+@@ -58,6 +58,8 @@ struct xe_gsc {
+ struct mutex mutex;
+ /** @proxy.component_added: whether the component has been added */
+ bool component_added;
++ /** @proxy.started: whether the proxy has been started */
++ bool started;
+ /** @proxy.bo: object to store message to and from the GSC */
+ struct xe_bo *bo;
+ /** @proxy.to_gsc: map of the memory used to send messages to the GSC */
+--
+2.51.0
+
--- /dev/null
+From 7d48362005a28325d4ffc7b3cca8182ded3b8c59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 22:26:58 +0100
+Subject: drm/xe/queue: Call fini on exec queue creation fail
+
+From: Tomasz Lis <tomasz.lis@intel.com>
+
+[ Upstream commit 99f9b5343cae80eb0dfe050baf6c86d722b3ba2e ]
+
+Every call to queue init should have a corresponding fini call.
+Skipping this would mean skipping removal of the queue from GuC list
+(which is part of guc_id allocation). A damaged queue stored in
+exec_queue_lookup list would lead to invalid memory reference,
+sooner or later.
+
+Call fini to free guc_id. This must be done before any internal
+LRCs are freed.
+
+Since the finalization with this extra call became very similar to
+__xe_exec_queue_fini(), reuse that. To make this reuse possible,
+alter xe_lrc_put() so it can survive NULL parameters, like other
+similar functions.
+
+v2: Reuse _xe_exec_queue_fini(). Make xe_lrc_put() aware of NULLs.
+
+Fixes: 3c1fa4aa60b1 ("drm/xe: Move queue init before LRC creation")
+Signed-off-by: Tomasz Lis <tomasz.lis@intel.com>
+Reviewed-by: Matthew Brost <matthew.brost@intel.com> (v1)
+Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
+Link: https://patch.msgid.link/20260226212701.2937065-2-tomasz.lis@intel.com
+(cherry picked from commit 393e5fea6f7d7054abc2c3d97a4cfe8306cd6079)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_exec_queue.c | 23 +++++++++++------------
+ drivers/gpu/drm/xe/xe_lrc.h | 3 ++-
+ 2 files changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
+index 779d7e7e2d2ec..1e774fa1fa190 100644
+--- a/drivers/gpu/drm/xe/xe_exec_queue.c
++++ b/drivers/gpu/drm/xe/xe_exec_queue.c
+@@ -185,6 +185,16 @@ static struct xe_exec_queue *__xe_exec_queue_alloc(struct xe_device *xe,
+ return q;
+ }
+
++static void __xe_exec_queue_fini(struct xe_exec_queue *q)
++{
++ int i;
++
++ q->ops->fini(q);
++
++ for (i = 0; i < q->width; ++i)
++ xe_lrc_put(q->lrc[i]);
++}
++
+ static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
+ {
+ int i, err;
+@@ -239,21 +249,10 @@ static int __xe_exec_queue_init(struct xe_exec_queue *q, u32 exec_queue_flags)
+ return 0;
+
+ err_lrc:
+- for (i = i - 1; i >= 0; --i)
+- xe_lrc_put(q->lrc[i]);
++ __xe_exec_queue_fini(q);
+ return err;
+ }
+
+-static void __xe_exec_queue_fini(struct xe_exec_queue *q)
+-{
+- int i;
+-
+- q->ops->fini(q);
+-
+- for (i = 0; i < q->width; ++i)
+- xe_lrc_put(q->lrc[i]);
+-}
+-
+ struct xe_exec_queue *xe_exec_queue_create(struct xe_device *xe, struct xe_vm *vm,
+ u32 logical_mask, u16 width,
+ struct xe_hw_engine *hwe, u32 flags,
+diff --git a/drivers/gpu/drm/xe/xe_lrc.h b/drivers/gpu/drm/xe/xe_lrc.h
+index 2fb628da5c433..96ae31df3359f 100644
+--- a/drivers/gpu/drm/xe/xe_lrc.h
++++ b/drivers/gpu/drm/xe/xe_lrc.h
+@@ -73,7 +73,8 @@ static inline struct xe_lrc *xe_lrc_get(struct xe_lrc *lrc)
+ */
+ static inline void xe_lrc_put(struct xe_lrc *lrc)
+ {
+- kref_put(&lrc->refcount, xe_lrc_destroy);
++ if (lrc)
++ kref_put(&lrc->refcount, xe_lrc_destroy);
+ }
+
+ /**
+--
+2.51.0
+
--- /dev/null
+From 0d3d2dfcfe854b0d958413b609c42cac0c81a521 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Feb 2026 17:28:11 +0000
+Subject: drm/xe/reg_sr: Fix leak on xa_store failure
+
+From: Shuicheng Lin <shuicheng.lin@intel.com>
+
+[ Upstream commit 3091723785def05ebfe6a50866f87a044ae314ba ]
+
+Free the newly allocated entry when xa_store() fails to avoid a memory
+leak on the error path.
+
+v2: use goto fail_free. (Bala)
+
+Fixes: e5283bd4dfec ("drm/xe/reg_sr: Remove register pool")
+Cc: Balasubramani Vivekanandan <balasubramani.vivekanandan@intel.com>
+Cc: Matt Roper <matthew.d.roper@intel.com>
+Signed-off-by: Shuicheng Lin <shuicheng.lin@intel.com>
+Reviewed-by: Matt Roper <matthew.d.roper@intel.com>
+Link: https://patch.msgid.link/20260204172810.1486719-2-shuicheng.lin@intel.com
+Signed-off-by: Matt Roper <matthew.d.roper@intel.com>
+(cherry picked from commit 6bc6fec71ac45f52db609af4e62bdb96b9f5fadb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_reg_sr.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/xe/xe_reg_sr.c b/drivers/gpu/drm/xe/xe_reg_sr.c
+index fc8447a838c4f..6b9edc7ca4115 100644
+--- a/drivers/gpu/drm/xe/xe_reg_sr.c
++++ b/drivers/gpu/drm/xe/xe_reg_sr.c
+@@ -101,10 +101,12 @@ int xe_reg_sr_add(struct xe_reg_sr *sr,
+ *pentry = *e;
+ ret = xa_err(xa_store(&sr->xa, idx, pentry, GFP_KERNEL));
+ if (ret)
+- goto fail;
++ goto fail_free;
+
+ return 0;
+
++fail_free:
++ kfree(pentry);
+ fail:
+ xe_gt_err(gt,
+ "discarding save-restore reg %04lx (clear: %08x, set: %08x, masked: %s, mcr: %s): ret=%d\n",
+--
+2.51.0
+
--- /dev/null
+From c961e8e1b9e7ef7998fe8c8aa23bbf83e8ab7cd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 16:14:20 +0200
+Subject: e1000e: clear DPG_EN after reset to avoid autonomous power-gating
+
+From: Vitaly Lifshits <vitaly.lifshits@intel.com>
+
+[ Upstream commit 0942fc6d324eb9c6b16187b2aa994c0823557f06 ]
+
+Panther Lake systems introduced an autonomous power gating feature for
+the integrated Gigabit Ethernet in shutdown state (S5) state. As part of
+it, the reset value of DPG_EN bit was changed to 1. Clear this bit after
+performing hardware reset to avoid errors such as Tx/Rx hangs, or packet
+loss/corruption.
+
+Fixes: 0c9183ce61bc ("e1000e: Add support for the next LOM generation")
+Signed-off-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Avigail Dahan <avigailx.dahan@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/e1000e/defines.h | 1 +
+ drivers/net/ethernet/intel/e1000e/ich8lan.c | 9 +++++++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
+index ba331899d1861..d4a1041e456dc 100644
+--- a/drivers/net/ethernet/intel/e1000e/defines.h
++++ b/drivers/net/ethernet/intel/e1000e/defines.h
+@@ -33,6 +33,7 @@
+
+ /* Extended Device Control */
+ #define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
++#define E1000_CTRL_EXT_DPG_EN 0x00000008 /* Dynamic Power Gating Enable */
+ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+ #define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index 0ff8688ac3b84..2dceb5548a786 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -4932,6 +4932,15 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+ reg |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, reg);
+
++ /* The hardware reset value of the DPG_EN bit is 1.
++ * Clear DPG_EN to prevent unexpected autonomous power gating.
++ */
++ if (hw->mac.type >= e1000_pch_ptp) {
++ reg = er32(CTRL_EXT);
++ reg &= ~E1000_CTRL_EXT_DPG_EN;
++ ew32(CTRL_EXT, reg);
++ }
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1dcd85a7e7d44021b329006d086e8aeb776b641b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Feb 2026 13:51:51 +0200
+Subject: HID: multitouch: new class MT_CLS_EGALAX_P80H84
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit a2e70a89fa58133521b2deae4427d35776bda935 ]
+
+Fixes: f9e82295eec1 ("HID: multitouch: add eGalaxTouch P80H84 support")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-multitouch.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index dde15d131a73e..b8a748bbf0fd8 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -235,6 +235,7 @@ static void mt_post_parse(struct mt_device *td, struct mt_application *app);
+ #define MT_CLS_SMART_TECH 0x0113
+ #define MT_CLS_APPLE_TOUCHBAR 0x0114
+ #define MT_CLS_YOGABOOK9I 0x0115
++#define MT_CLS_EGALAX_P80H84 0x0116
+ #define MT_CLS_SIS 0x0457
+
+ #define MT_DEFAULT_MAXCONTACT 10
+@@ -449,6 +450,11 @@ static const struct mt_class mt_classes[] = {
+ MT_QUIRK_YOGABOOK9I,
+ .export_all_inputs = true
+ },
++ { .name = MT_CLS_EGALAX_P80H84,
++ .quirks = MT_QUIRK_ALWAYS_VALID |
++ MT_QUIRK_IGNORE_DUPLICATES |
++ MT_QUIRK_CONTACT_CNT_ACCURATE,
++ },
+ { }
+ };
+
+@@ -2233,8 +2239,9 @@ static const struct hid_device_id mt_devices[] = {
+ { .driver_data = MT_CLS_EGALAX_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C000) },
+- { .driver_data = MT_CLS_EGALAX,
+- MT_USB_DEVICE(USB_VENDOR_ID_DWAV,
++ { .driver_data = MT_CLS_EGALAX_P80H84,
++ HID_DEVICE(HID_BUS_ANY, HID_GROUP_MULTITOUCH_WIN_8,
++ USB_VENDOR_ID_DWAV,
+ USB_DEVICE_ID_DWAV_EGALAX_MULTITOUCH_C002) },
+
+ /* Elan devices */
+--
+2.51.0
+
--- /dev/null
+From 14bf95cd4052daef3aaffa171fd83843fce892fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 01:03:31 +0800
+Subject: hwmon: (aht10) Fix initialization commands for AHT20
+
+From: Hao Yu <haoyufine@gmail.com>
+
+[ Upstream commit b7497b5a99f54ab8dcda5b14a308385b2fb03d8d ]
+
+According to the AHT20 datasheet (updated to V1.0 after the 2023.09
+version), the initialization command for AHT20 is 0b10111110 (0xBE).
+The previous sequence (0xE1) used in earlier versions is no longer
+compatible with newer AHT20 sensors. Update the initialization
+command to ensure the sensor is properly initialized.
+
+While at it, use binary notation for DHT20_CMD_INIT to match the notation
+used in the datasheet.
+
+Fixes: d2abcb5cc885 ("hwmon: (aht10) Add support for compatible aht20")
+Signed-off-by: Hao Yu <haoyufine@gmail.com>
+Link: https://lore.kernel.org/r/20260222170332.1616-3-haoyufine@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/aht10.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
+index 007befdba9776..4ce019d2cc80e 100644
+--- a/drivers/hwmon/aht10.c
++++ b/drivers/hwmon/aht10.c
+@@ -37,7 +37,9 @@
+ #define AHT10_CMD_MEAS 0b10101100
+ #define AHT10_CMD_RST 0b10111010
+
+-#define DHT20_CMD_INIT 0x71
++#define AHT20_CMD_INIT 0b10111110
++
++#define DHT20_CMD_INIT 0b01110001
+
+ /*
+ * Flags in the answer byte/command
+@@ -341,7 +343,7 @@ static int aht10_probe(struct i2c_client *client)
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
+- data->init_cmd = AHT10_CMD_INIT;
++ data->init_cmd = AHT20_CMD_INIT;
+ break;
+ case dht20:
+ data->meas_size = AHT20_MEAS_SIZE;
+--
+2.51.0
+
--- /dev/null
+From 9edafab0d6933979be130dc4c3f90f0b9568c97e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:14 -0800
+Subject: hwmon: (it87) Check the it87_lock() return value
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 07ed4f05bbfd2bc014974dcc4297fd3aa1cb88c0 ]
+
+Return early in it87_resume() if it87_lock() fails instead of ignoring the
+return value of that function. This patch suppresses a Clang thread-safety
+warning.
+
+Cc: Frank Crawford <frank@crawford.emu.id.au>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Jean Delvare <jdelvare@suse.com>
+Cc: linux-hwmon@vger.kernel.org
+Fixes: 376e1a937b30 ("hwmon: (it87) Add calls to smbus_enable/smbus_disable as required")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20260223220102.2158611-15-bart.vanassche@linux.dev
+[groeck: Declare 'ret' at the beginning of it87_resume()]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/it87.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index e233aafa8856c..5cfb98a0512f0 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -3590,10 +3590,13 @@ static int it87_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct it87_data *data = dev_get_drvdata(dev);
++ int err;
+
+ it87_resume_sio(pdev);
+
+- it87_lock(data);
++ err = it87_lock(data);
++ if (err)
++ return err;
+
+ it87_check_pwm(dev);
+ it87_check_limit_regs(data);
+--
+2.51.0
+
--- /dev/null
+From 5c024df3e5647061b7ac2db6072eef018f37febd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jan 2026 09:51:11 -0800
+Subject: hwmon: (macsmc) Fix overflows, underflows, and sign extension
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 579b86f3c26fee97996e68c1cbfb7461711f3de3 ]
+
+The macsmc-hwmon driver experienced several issues related to value
+scaling and type conversion:
+
+1. macsmc_hwmon_read_f32_scaled() clipped values to INT_MAX/INT_MIN.
+ On 64-bit systems, hwmon supports long values, so clipping to
+ 32-bit range was premature and caused loss of range for high-power
+ sensors. Changed it to use long and clip to LONG_MAX/LONG_MIN.
+2. The overflow check in macsmc_hwmon_read_f32_scaled() used 1UL,
+ which is 32-bit on some platforms. Switched to 1ULL.
+3. macsmc_hwmon_read_key() used a u32 temporary variable for f32
+ values. When assigned to a 64-bit long, negative values were
+ zero-extended instead of sign-extended, resulting in large
+ positive numbers.
+4. macsmc_hwmon_read_ioft_scaled() used mult_frac() which could
+ overflow during intermediate multiplication. Switched to
+ mul_u64_u32_div() to handle the 64-bit multiplication safely.
+5. ioft values (unsigned 48.16) could overflow long when scaled
+ by 1,000,000. Added explicit clipping to LONG_MAX in the caller.
+6. macsmc_hwmon_write_f32() truncated its long argument to int,
+ potentially causing issues for large values.
+
+Fix these issues by using appropriate types and helper functions.
+
+Fixes: 785205fd8139 ("hwmon: Add Apple Silicon SMC hwmon driver")
+Cc: James Calligeros <jcalligeros99@gmail.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Neal Gompa <neal@gompa.dev>
+Cc: Janne Grunau <j@jannau.net>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Link: https://lore.kernel.org/r/20260129175112.3751907-3-linux@roeck-us.net
+Reviewed-by: James Calligeros <jcalligeros99@gmail.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/macsmc-hwmon.c | 28 ++++++++++++++++------------
+ 1 file changed, 16 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/hwmon/macsmc-hwmon.c b/drivers/hwmon/macsmc-hwmon.c
+index 40d25c81b4435..1500ec2cc9f83 100644
+--- a/drivers/hwmon/macsmc-hwmon.c
++++ b/drivers/hwmon/macsmc-hwmon.c
+@@ -22,6 +22,7 @@
+
+ #include <linux/bitfield.h>
+ #include <linux/hwmon.h>
++#include <linux/math64.h>
+ #include <linux/mfd/macsmc.h>
+ #include <linux/module.h>
+ #include <linux/of.h>
+@@ -130,7 +131,7 @@ static int macsmc_hwmon_read_ioft_scaled(struct apple_smc *smc, smc_key key,
+ if (ret < 0)
+ return ret;
+
+- *p = mult_frac(val, scale, 65536);
++ *p = mul_u64_u32_div(val, scale, 65536);
+
+ return 0;
+ }
+@@ -140,7 +141,7 @@ static int macsmc_hwmon_read_ioft_scaled(struct apple_smc *smc, smc_key key,
+ * them.
+ */
+ static int macsmc_hwmon_read_f32_scaled(struct apple_smc *smc, smc_key key,
+- int *p, int scale)
++ long *p, int scale)
+ {
+ u32 fval;
+ u64 val;
+@@ -162,21 +163,21 @@ static int macsmc_hwmon_read_f32_scaled(struct apple_smc *smc, smc_key key,
+ val = 0;
+ else if (exp < 0)
+ val >>= -exp;
+- else if (exp != 0 && (val & ~((1UL << (64 - exp)) - 1))) /* overflow */
++ else if (exp != 0 && (val & ~((1ULL << (64 - exp)) - 1))) /* overflow */
+ val = U64_MAX;
+ else
+ val <<= exp;
+
+ if (fval & FLT_SIGN_MASK) {
+- if (val > (-(s64)INT_MIN))
+- *p = INT_MIN;
++ if (val > (u64)LONG_MAX + 1)
++ *p = LONG_MIN;
+ else
+- *p = -val;
++ *p = -(long)val;
+ } else {
+- if (val > INT_MAX)
+- *p = INT_MAX;
++ if (val > (u64)LONG_MAX)
++ *p = LONG_MAX;
+ else
+- *p = val;
++ *p = (long)val;
+ }
+
+ return 0;
+@@ -195,7 +196,7 @@ static int macsmc_hwmon_read_key(struct apple_smc *smc,
+ switch (sensor->info.type_code) {
+ /* 32-bit IEEE 754 float */
+ case __SMC_KEY('f', 'l', 't', ' '): {
+- u32 flt_ = 0;
++ long flt_ = 0;
+
+ ret = macsmc_hwmon_read_f32_scaled(smc, sensor->macsmc_key,
+ &flt_, scale);
+@@ -214,7 +215,10 @@ static int macsmc_hwmon_read_key(struct apple_smc *smc,
+ if (ret)
+ return ret;
+
+- *val = (long)ioft;
++ if (ioft > LONG_MAX)
++ *val = LONG_MAX;
++ else
++ *val = (long)ioft;
+ break;
+ }
+ default:
+@@ -224,7 +228,7 @@ static int macsmc_hwmon_read_key(struct apple_smc *smc,
+ return 0;
+ }
+
+-static int macsmc_hwmon_write_f32(struct apple_smc *smc, smc_key key, int value)
++static int macsmc_hwmon_write_f32(struct apple_smc *smc, smc_key key, long value)
+ {
+ u64 val;
+ u32 fval = 0;
+--
+2.51.0
+
--- /dev/null
+From 2883ff02f27401daa3c0649ea278f911923de393 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jan 2026 09:51:10 -0800
+Subject: hwmon: (macsmc) Fix regressions in Apple Silicon SMC hwmon driver
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 5dd69b864911ae3847365e8bafe7854e79fbeecb ]
+
+The recently added macsmc-hwmon driver contained several critical
+bugs in its sensor population logic and float conversion routines.
+
+Specifically:
+- The voltage sensor population loop used the wrong prefix ("volt-"
+ instead of "voltage-") and incorrectly assigned sensors to the
+ temperature sensor array (hwmon->temp.sensors) instead of the
+ voltage sensor array (hwmon->volt.sensors). This would lead to
+ out-of-bounds memory access or data corruption when both temperature
+ and voltage sensors were present.
+- The float conversion in macsmc_hwmon_write_f32() had flawed exponent
+ logic for values >= 2^24 and lacked masking for the mantissa, which
+ could lead to incorrect values being written to the SMC.
+
+Fix these issues to ensure correct sensor registration and reliable
+manual fan control.
+
+Confirm that the reported overflow in FIELD_PREP is fixed by declaring
+macsmc_hwmon_write_f32() as __always_inline for a compile test.
+
+Fixes: 785205fd8139 ("hwmon: Add Apple Silicon SMC hwmon driver")
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Closes: https://lore.kernel.org/linux-hwmon/20260119195817.GA1035354@ax162/
+Cc: James Calligeros <jcalligeros99@gmail.com>
+Cc: Nathan Chancellor <nathan@kernel.org>
+Cc: Neal Gompa <neal@gompa.dev>
+Cc: Janne Grunau <j@jannau.net>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Nathan Chancellor <nathan@kernel.org> # build only
+Link: https://lore.kernel.org/r/20260129175112.3751907-2-linux@roeck-us.net
+Reviewed-by: James Calligeros <jcalligeros99@gmail.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/macsmc-hwmon.c | 25 +++++++++++--------------
+ 1 file changed, 11 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/hwmon/macsmc-hwmon.c b/drivers/hwmon/macsmc-hwmon.c
+index 1c0bbec7e8ebc..40d25c81b4435 100644
+--- a/drivers/hwmon/macsmc-hwmon.c
++++ b/drivers/hwmon/macsmc-hwmon.c
+@@ -228,25 +228,22 @@ static int macsmc_hwmon_write_f32(struct apple_smc *smc, smc_key key, int value)
+ {
+ u64 val;
+ u32 fval = 0;
+- int exp = 0, neg;
++ int exp, neg;
+
++ neg = value < 0;
+ val = abs(value);
+- neg = val != value;
+
+ if (val) {
+- int msb = __fls(val) - exp;
+-
+- if (msb > 23) {
+- val >>= msb - FLT_MANT_BIAS;
+- exp -= msb - FLT_MANT_BIAS;
+- } else if (msb < 23) {
+- val <<= FLT_MANT_BIAS - msb;
+- exp += msb;
+- }
++ exp = __fls(val);
++
++ if (exp > 23)
++ val >>= exp - 23;
++ else
++ val <<= 23 - exp;
+
+ fval = FIELD_PREP(FLT_SIGN_MASK, neg) |
+ FIELD_PREP(FLT_EXP_MASK, exp + FLT_EXP_BIAS) |
+- FIELD_PREP(FLT_MANT_MASK, val);
++ FIELD_PREP(FLT_MANT_MASK, val & FLT_MANT_MASK);
+ }
+
+ return apple_smc_write_u32(smc, key, fval);
+@@ -663,8 +660,8 @@ static int macsmc_hwmon_populate_sensors(struct macsmc_hwmon *hwmon,
+ if (!hwmon->volt.sensors)
+ return -ENOMEM;
+
+- for_each_child_of_node_with_prefix(hwmon_node, key_node, "volt-") {
+- sensor = &hwmon->temp.sensors[hwmon->temp.count];
++ for_each_child_of_node_with_prefix(hwmon_node, key_node, "voltage-") {
++ sensor = &hwmon->volt.sensors[hwmon->volt.count];
+ if (!macsmc_hwmon_create_sensor(hwmon->dev, hwmon->smc, key_node, sensor)) {
+ sensor->attrs = HWMON_I_INPUT;
+
+--
+2.51.0
+
--- /dev/null
+From bc1a2d32e3210d23b04567c1c14040d4619c4ac9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 22:20:39 +0100
+Subject: hwmon: (max6639) fix inverted polarity
+
+From: Olivier Sobrie <olivier@sobrie.be>
+
+[ Upstream commit 170a4b21f49b3dcff3115b4c90758f0a0d77375a ]
+
+According to MAX6639 documentation:
+
+ D1: PWM Output Polarity. PWM output is low at
+ 100% duty cycle when this bit is set to zero. PWM
+ output is high at 100% duty cycle when this bit is set
+ to 1.
+
+Up to commit 0f33272b60ed ("hwmon: (max6639) : Update hwmon init using
+info structure"), the polarity was set to high (0x2) when no platform
+data was set. After the patch, the polarity register wasn't set anymore
+if no platform data was specified. Nowadays, since commit 7506ebcd662b
+("hwmon: (max6639) : Configure based on DT property"), it is always set
+to low which doesn't match with the comment above and change the
+behavior compared to versions prior 0f33272b60ed.
+
+Fixes: 0f33272b60ed ("hwmon: (max6639) : Update hwmon init using info structure")
+Signed-off-by: Olivier Sobrie <olivier@sobrie.be>
+Link: https://lore.kernel.org/r/20260304212039.570274-1-olivier@sobrie.be
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/max6639.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c
+index 99140a2ca9955..48fde4f1a1561 100644
+--- a/drivers/hwmon/max6639.c
++++ b/drivers/hwmon/max6639.c
+@@ -610,7 +610,7 @@ static int max6639_init_client(struct i2c_client *client,
+ return err;
+
+ /* Fans PWM polarity high by default */
+- err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x00);
++ err = regmap_write(data->regmap, MAX6639_REG_FAN_CONFIG2a(i), 0x02);
+ if (err)
+ return err;
+
+--
+2.51.0
+
--- /dev/null
+From e7b6bc2b4d9330285a280948709cfa8b88a01723 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 18:41:15 -0700
+Subject: i2c: i801: Revert "i2c: i801: replace acpi_lock with I2C bus lock"
+
+From: Charles Haithcock <chaithco@redhat.com>
+
+[ Upstream commit cfc69c2e6c699c96949f7b0455195b0bfb7dc715 ]
+
+This reverts commit f707d6b9e7c18f669adfdb443906d46cfbaaa0c1.
+
+Under rare circumstances, multiple udev threads can collect i801 device
+info on boot and walk i801_acpi_io_handler somewhat concurrently. The
+first will note the area is reserved by acpi to prevent further touches.
+This ultimately causes the area to be deregistered. The second will
+enter i801_acpi_io_handler after the area is unregistered but before a
+check can be made that the area is unregistered. i2c_lock_bus relies on
+the now unregistered area containing lock_ops to lock the bus. The end
+result is a kernel panic on boot with the following backtrace;
+
+[ 14.971872] ioatdma 0000:09:00.2: enabling device (0100 -> 0102)
+[ 14.971873] BUG: kernel NULL pointer dereference, address: 0000000000000000
+[ 14.971880] #PF: supervisor read access in kernel mode
+[ 14.971884] #PF: error_code(0x0000) - not-present page
+[ 14.971887] PGD 0 P4D 0
+[ 14.971894] Oops: 0000 [#1] PREEMPT SMP PTI
+[ 14.971900] CPU: 5 PID: 956 Comm: systemd-udevd Not tainted 5.14.0-611.5.1.el9_7.x86_64 #1
+[ 14.971905] Hardware name: XXXXXXXXXXXXXXXXXXXXXXX BIOS 1.20.10.SV91 01/30/2023
+[ 14.971908] RIP: 0010:i801_acpi_io_handler+0x2d/0xb0 [i2c_i801]
+[ 14.971929] Code: 00 00 49 8b 40 20 41 57 41 56 4d 8b b8 30 04 00 00 49 89 ce 41 55 41 89 d5 41 54 49 89 f4 be 02 00 00 00 55 4c 89 c5 53 89 fb <48> 8b 00 4c 89 c7 e8 18 61 54 e9 80 bd 80 04 00 00 00 75 09 4c 3b
+[ 14.971933] RSP: 0018:ffffbaa841483838 EFLAGS: 00010282
+[ 14.971938] RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffff9685e01ba568
+[ 14.971941] RDX: 0000000000000008 RSI: 0000000000000002 RDI: 0000000000000000
+[ 14.971944] RBP: ffff9685ca22f028 R08: ffff9685ca22f028 R09: ffff9685ca22f028
+[ 14.971948] R10: 000000000000000b R11: 0000000000000580 R12: 0000000000000580
+[ 14.971951] R13: 0000000000000008 R14: ffff9685e01ba568 R15: ffff9685c222f000
+[ 14.971954] FS: 00007f8287c0ab40(0000) GS:ffff96a47f940000(0000) knlGS:0000000000000000
+[ 14.971959] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 14.971963] CR2: 0000000000000000 CR3: 0000000168090001 CR4: 00000000003706f0
+[ 14.971966] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 14.971968] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 14.971972] Call Trace:
+[ 14.971977] <TASK>
+[ 14.971981] ? show_trace_log_lvl+0x1c4/0x2df
+[ 14.971994] ? show_trace_log_lvl+0x1c4/0x2df
+[ 14.972003] ? acpi_ev_address_space_dispatch+0x16e/0x3c0
+[ 14.972014] ? __die_body.cold+0x8/0xd
+[ 14.972021] ? page_fault_oops+0x132/0x170
+[ 14.972028] ? exc_page_fault+0x61/0x150
+[ 14.972036] ? asm_exc_page_fault+0x22/0x30
+[ 14.972045] ? i801_acpi_io_handler+0x2d/0xb0 [i2c_i801]
+[ 14.972061] acpi_ev_address_space_dispatch+0x16e/0x3c0
+[ 14.972069] ? __pfx_i801_acpi_io_handler+0x10/0x10 [i2c_i801]
+[ 14.972085] acpi_ex_access_region+0x5b/0xd0
+[ 14.972093] acpi_ex_field_datum_io+0x73/0x2e0
+[ 14.972100] acpi_ex_read_data_from_field+0x8e/0x230
+[ 14.972106] acpi_ex_resolve_node_to_value+0x23d/0x310
+[ 14.972114] acpi_ds_evaluate_name_path+0xad/0x110
+[ 14.972121] acpi_ds_exec_end_op+0x321/0x510
+[ 14.972127] acpi_ps_parse_loop+0xf7/0x680
+[ 14.972136] acpi_ps_parse_aml+0x17a/0x3d0
+[ 14.972143] acpi_ps_execute_method+0x137/0x270
+[ 14.972150] acpi_ns_evaluate+0x1f4/0x2e0
+[ 14.972158] acpi_evaluate_object+0x134/0x2f0
+[ 14.972164] acpi_evaluate_integer+0x50/0xe0
+[ 14.972173] ? vsnprintf+0x24b/0x570
+[ 14.972181] acpi_ac_get_state.part.0+0x23/0x70
+[ 14.972189] get_ac_property+0x4e/0x60
+[ 14.972195] power_supply_show_property+0x90/0x1f0
+[ 14.972205] add_prop_uevent+0x29/0x90
+[ 14.972213] power_supply_uevent+0x109/0x1d0
+[ 14.972222] dev_uevent+0x10e/0x2f0
+[ 14.972228] uevent_show+0x8e/0x100
+[ 14.972236] dev_attr_show+0x19/0x40
+[ 14.972246] sysfs_kf_seq_show+0x9b/0x100
+[ 14.972253] seq_read_iter+0x120/0x4b0
+[ 14.972262] ? selinux_file_permission+0x106/0x150
+[ 14.972273] vfs_read+0x24f/0x3a0
+[ 14.972284] ksys_read+0x5f/0xe0
+[ 14.972291] do_syscall_64+0x5f/0xe0
+...
+
+The kernel panic is mitigated by setting limiting the count of udev
+children to 1. Revert to using the acpi_lock to continue protecting
+marking the area as owned by firmware without relying on a lock in
+a potentially unmapped region of memory.
+
+Fixes: f707d6b9e7c1 ("i2c: i801: replace acpi_lock with I2C bus lock")
+Signed-off-by: Charles Haithcock <chaithco@redhat.com>
+[wsa: added Fixes-tag and updated comment stating the importance of the lock]
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/busses/i2c-i801.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
+index 9e1789725edf7..32a3cef02c7b5 100644
+--- a/drivers/i2c/busses/i2c-i801.c
++++ b/drivers/i2c/busses/i2c-i801.c
+@@ -310,9 +310,10 @@ struct i801_priv {
+
+ /*
+ * If set to true the host controller registers are reserved for
+- * ACPI AML use.
++ * ACPI AML use. Needs extra protection by acpi_lock.
+ */
+ bool acpi_reserved;
++ struct mutex acpi_lock;
+ };
+
+ #define FEATURE_SMBUS_PEC BIT(0)
+@@ -894,8 +895,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ int hwpec, ret;
+ struct i801_priv *priv = i2c_get_adapdata(adap);
+
+- if (priv->acpi_reserved)
++ mutex_lock(&priv->acpi_lock);
++ if (priv->acpi_reserved) {
++ mutex_unlock(&priv->acpi_lock);
+ return -EBUSY;
++ }
+
+ pm_runtime_get_sync(&priv->pci_dev->dev);
+
+@@ -935,6 +939,7 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
+ iowrite8(SMBHSTSTS_INUSE_STS | STATUS_FLAGS, SMBHSTSTS(priv));
+
+ pm_runtime_put_autosuspend(&priv->pci_dev->dev);
++ mutex_unlock(&priv->acpi_lock);
+ return ret;
+ }
+
+@@ -1465,7 +1470,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ * further access from the driver itself. This device is now owned
+ * by the system firmware.
+ */
+- i2c_lock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
++ mutex_lock(&priv->acpi_lock);
+
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
+ priv->acpi_reserved = true;
+@@ -1485,7 +1490,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
+ else
+ status = acpi_os_write_port(address, (u32)*value, bits);
+
+- i2c_unlock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
++ mutex_unlock(&priv->acpi_lock);
+
+ return status;
+ }
+@@ -1545,6 +1550,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ priv->adapter.dev.parent = &dev->dev;
+ acpi_use_parent_companion(&priv->adapter.dev);
+ priv->adapter.retries = 3;
++ mutex_init(&priv->acpi_lock);
+
+ priv->pci_dev = dev;
+ priv->features = id->driver_data;
+--
+2.51.0
+
--- /dev/null
+From 75f05ee4cfc146312b584668bdba8d925cd6db56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 7 Feb 2026 11:50:23 +0100
+Subject: i40e: Fix preempt count leak in napi poll tracepoint
+
+From: Thomas Gleixner <tglx@kernel.org>
+
+[ Upstream commit 4b3d54a85bd37ebf2d9836f0d0de775c0ff21af9 ]
+
+Using get_cpu() in the tracepoint assignment causes an obvious preempt
+count leak because nothing invokes put_cpu() to undo it:
+
+ softirq: huh, entered softirq 3 NET_RX with preempt_count 00000100, exited with 00000101?
+
+This clearly has seen a lot of testing in the last 3+ years...
+
+Use smp_processor_id() instead.
+
+Fixes: 6d4d584a7ea8 ("i40e: Add i40e_napi_poll tracepoint")
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Cc: Tony Nguyen <anthony.l.nguyen@intel.com>
+Cc: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Cc: intel-wired-lan@lists.osuosl.org
+Cc: netdev@vger.kernel.org
+Reviewed-by: Joe Damato <joe@dama.to>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_trace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h
+index 759f3d1c4c8f0..dde0ccd789ed1 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_trace.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h
+@@ -88,7 +88,7 @@ TRACE_EVENT(i40e_napi_poll,
+ __entry->rx_clean_complete = rx_clean_complete;
+ __entry->tx_clean_complete = tx_clean_complete;
+ __entry->irq_num = q->irq_num;
+- __entry->curr_cpu = get_cpu();
++ __entry->curr_cpu = smp_processor_id();
+ __assign_str(qname);
+ __assign_str(dev_name);
+ __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask),
+--
+2.51.0
+
--- /dev/null
+From 3aa5d391eba6e52028d05948b5e40d72b77299f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:46 +0100
+Subject: i40e: fix registering XDP RxQ info
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8f497dc8a61429cc004720aa8e713743355d80cf ]
+
+Current way of handling XDP RxQ info in i40e has a problem, where frag_size
+is not updated when xsk_buff_pool is detached or when MTU is changed, this
+leads to growing tail always failing for multi-buffer packets.
+
+Couple XDP RxQ info registering with buffer allocations and unregistering
+with cleaning the ring.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-6-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 34 ++++++++++++---------
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 +--
+ 2 files changed, 22 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 02de186dcc8f5..bc00bd4f439be 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3583,18 +3583,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+- err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->queue_index,
+- ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
+- if (err)
+- return err;
+- }
+-
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+@@ -3606,17 +3596,23 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ }
+
+ skip:
+@@ -3654,7 +3650,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* set the context in the HMC */
+@@ -3663,7 +3660,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* configure Rx buffer alignment */
+@@ -3671,7 +3669,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_info(&vsi->back->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto unreg_xdp;
+ }
+ clear_ring_build_skb_enabled(ring);
+ } else {
+@@ -3701,6 +3700,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ return 0;
++unreg_xdp:
++ if (ring->vsi->type == I40E_VSI_MAIN)
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
++
++ return err;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index cc0b9efc2637a..816179c7e2712 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1470,6 +1470,9 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ if (!rx_ring->rx_bi)
+ return;
+
++ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
++
+ if (rx_ring->xsk_pool) {
+ i40e_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+@@ -1527,8 +1530,6 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+ {
+ i40e_clean_rx_ring(rx_ring);
+- if (rx_ring->vsi->type == I40E_VSI_MAIN)
+- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+--
+2.51.0
+
--- /dev/null
+From 7dcd847c14ca03d4a6fabb7e285ada5b54adaef2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:47 +0100
+Subject: i40e: use xdp.frame_sz as XDP RxQ info frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit c69d22c6c46a1d792ba8af3d8d6356fdc0e6f538 ]
+
+The only user of frag_size field in XDP RxQ info is
+bpf_xdp_frags_increase_tail(). It clearly expects whole buffer size instead
+of DMA write size. Different assumptions in i40e driver configuration lead
+to negative tailroom.
+
+Set frag_size to the same value as frame_sz in shared pages mode, use new
+helper to set frag_size when AF_XDP ZC is active.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-7-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index bc00bd4f439be..598739220dfb9 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3569,6 +3569,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
++ u32 xdp_frame_sz;
+ int err = 0;
+ bool ok;
+
+@@ -3578,6 +3579,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
++ xdp_frame_sz = i40e_rx_pg_size(ring) / 2;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+@@ -3585,11 +3587,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
++ xdp_frame_sz = xsk_pool_get_rx_frag_step(ring->xsk_pool);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3605,7 +3608,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3616,7 +3619,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ skip:
+- xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
++ xdp_init_buff(&ring->xdp, xdp_frame_sz, &ring->xdp_rxq);
+
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+--
+2.51.0
+
--- /dev/null
+From c18686dd4537b24542ce3df3d40a1c1110250b79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 15:57:14 +0000
+Subject: iavf: fix netdev->max_mtu to respect actual hardware limit
+
+From: Kohei Enju <kohei@enjuk.jp>
+
+[ Upstream commit b84852170153671bb0fa6737a6e48370addd8e1a ]
+
+iavf sets LIBIE_MAX_MTU as netdev->max_mtu, ignoring vf_res->max_mtu
+from PF [1]. This allows setting an MTU beyond the actual hardware
+limit, causing TX queue timeouts [2].
+
+Set correct netdev->max_mtu using vf_res->max_mtu from the PF.
+
+Note that currently PF drivers such as ice/i40e set the frame size in
+vf_res->max_mtu, not MTU. Convert vf_res->max_mtu to MTU before setting
+netdev->max_mtu.
+
+[1]
+ # ip -j -d link show $DEV | jq '.[0].max_mtu'
+ 16356
+
+[2]
+ iavf 0000:00:05.0 enp0s5: NETDEV WATCHDOG: CPU: 1: transmit queue 0 timed out 5692 ms
+ iavf 0000:00:05.0 enp0s5: NIC Link is Up Speed is 10 Gbps Full Duplex
+ iavf 0000:00:05.0 enp0s5: NETDEV WATCHDOG: CPU: 6: transmit queue 3 timed out 5312 ms
+ iavf 0000:00:05.0 enp0s5: NIC Link is Up Speed is 10 Gbps Full Duplex
+ ...
+
+Fixes: 5fa4caff59f2 ("iavf: switch to Page Pool")
+Signed-off-by: Kohei Enju <kohei@enjuk.jp>
+Reviewed-by: Alexander Lobakin <aleksander.lobakin@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/iavf/iavf_main.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
+index 4b0fc8f354bc9..53a0366fbf998 100644
+--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
++++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
+@@ -2797,7 +2797,22 @@ static void iavf_init_config_adapter(struct iavf_adapter *adapter)
+ netdev->watchdog_timeo = 5 * HZ;
+
+ netdev->min_mtu = ETH_MIN_MTU;
+- netdev->max_mtu = LIBIE_MAX_MTU;
++
++ /* PF/VF API: vf_res->max_mtu is max frame size (not MTU).
++ * Convert to MTU.
++ */
++ if (!adapter->vf_res->max_mtu) {
++ netdev->max_mtu = LIBIE_MAX_MTU;
++ } else if (adapter->vf_res->max_mtu < LIBETH_RX_LL_LEN + ETH_MIN_MTU ||
++ adapter->vf_res->max_mtu >
++ LIBETH_RX_LL_LEN + LIBIE_MAX_MTU) {
++ netdev_warn_once(adapter->netdev,
++ "invalid max frame size %d from PF, using default MTU %d",
++ adapter->vf_res->max_mtu, LIBIE_MAX_MTU);
++ netdev->max_mtu = LIBIE_MAX_MTU;
++ } else {
++ netdev->max_mtu = adapter->vf_res->max_mtu - LIBETH_RX_LL_LEN;
++ }
+
+ if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
+--
+2.51.0
+
--- /dev/null
+From 4cd5ae5dd07966d9e7b131537e79d70ee55e0c5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:45 +0100
+Subject: ice: change XDP RxQ frag_size from DMA write length to xdp.frame_sz
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit e142dc4ef0f451b7ef99d09aaa84e9389af629d7 ]
+
+The only user of frag_size field in XDP RxQ info is
+bpf_xdp_frags_increase_tail(). It clearly expects whole buff size instead
+of DMA write size. Different assumptions in ice driver configuration lead
+to negative tailroom.
+
+This allows to trigger kernel panic, when using
+XDP_ADJUST_TAIL_GROW_MULTI_BUFF xskxceiver test and changing packet size to
+6912 and the requested offset to a huge value, e.g.
+XSK_UMEM__MAX_FRAME_SIZE * 100.
+
+Due to other quirks of the ZC configuration in ice, panic is not observed
+in ZC mode, but tailroom growing still fails when it should not.
+
+Use fill queue buffer truesize instead of DMA write size in XDP RxQ info.
+Fix ZC mode too by using the new helper.
+
+Fixes: 2fba7dc5157b ("ice: Add support for XDP multi-buffer on Rx side")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-5-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_base.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 2c117ca7c76aa..5a6da2d501213 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -661,7 +661,6 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ {
+ struct device *dev = ice_pf_to_dev(ring->vsi->back);
+ u32 num_bufs = ICE_DESC_UNUSED(ring);
+- u32 rx_buf_len;
+ int err;
+
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF ||
+@@ -672,12 +671,12 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ return err;
+
+ if (ring->xsk_pool) {
+- rx_buf_len =
+- xsk_pool_get_rx_frame_size(ring->xsk_pool);
++ u32 frag_size =
++ xsk_pool_get_rx_frag_step(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+- rx_buf_len);
++ frag_size);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -697,7 +696,7 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ ring->truesize);
+ if (err)
+ goto err_destroy_fq;
+
+--
+2.51.0
+
--- /dev/null
+From 092cee93c9a0cfd6c6b1fed279464607ed2159ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Dec 2025 14:29:48 +0100
+Subject: ice: fix adding AQ LLDP filter for VF
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit eef33aa44935d001747ca97703c08dd6f9031162 ]
+
+The referenced commit came from a misunderstanding of the FW LLDP filter
+AQ (Admin Queue) command due to the error in the internal documentation.
+Contrary to the assumptions in the original commit, VFs can be added and
+deleted from this filter without any problems. Introduced dev_info message
+proved to be useful, so reverting the whole commit does not make sense.
+
+Without this fix, trusted VFs do not receive LLDP traffic, if there is an
+AQ LLDP filter on PF. When trusted VF attempts to add an LLDP multicast
+MAC address, the following message can be seen in dmesg on host:
+
+ice 0000:33:00.0: Failed to add Rx LLDP rule on VSI 20 error: -95
+
+Revert checking VSI type when adding LLDP filter through AQ.
+
+Fixes: 4d5a1c4e6d49 ("ice: do not add LLDP-specific filter if not necessary")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
+index 785bf5cc1b25e..a400bf4f239aa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_common.c
++++ b/drivers/net/ethernet/intel/ice/ice_common.c
+@@ -6429,7 +6429,7 @@ int ice_lldp_fltr_add_remove(struct ice_hw *hw, struct ice_vsi *vsi, bool add)
+ struct ice_aqc_lldp_filter_ctrl *cmd;
+ struct libie_aq_desc desc;
+
+- if (vsi->type != ICE_VSI_PF || !ice_fw_supports_lldp_fltr_ctrl(hw))
++ if (!ice_fw_supports_lldp_fltr_ctrl(hw))
+ return -EOPNOTSUPP;
+
+ cmd = libie_aq_raw(&desc);
+--
+2.51.0
+
--- /dev/null
+From 29aca02471d69dccbb3fc5090c7a8333b5037bcd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Feb 2026 11:17:54 +0100
+Subject: ice: fix crash in ethtool offline loopback test
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit a9c354e656597aededa027d63d2ff0973f6b033f ]
+
+Since the conversion of ice to page pool, the ethtool loopback test
+crashes:
+
+ BUG: kernel NULL pointer dereference, address: 000000000000000c
+ #PF: supervisor write access in kernel mode
+ #PF: error_code(0x0002) - not-present page
+ PGD 1100f1067 P4D 0
+ Oops: Oops: 0002 [#1] SMP NOPTI
+ CPU: 23 UID: 0 PID: 5904 Comm: ethtool Kdump: loaded Not tainted 6.19.0-0.rc7.260128g1f97d9dcf5364.49.eln154.x86_64 #1 PREEMPT(lazy)
+ Hardware name: [...]
+ RIP: 0010:ice_alloc_rx_bufs+0x1cd/0x310 [ice]
+ Code: 83 6c 24 30 01 66 41 89 47 08 0f 84 c0 00 00 00 41 0f b7 dc 48 8b 44 24 18 48 c1 e3 04 41 bb 00 10 00 00 48 8d 2c 18 8b 04 24 <89> 45 0c 41 8b 4d 00 49 d3 e3 44 3b 5c 24 24 0f 83 ac fe ff ff 44
+ RSP: 0018:ff7894738aa1f768 EFLAGS: 00010246
+ RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: 0000000000000700 RDI: 0000000000000000
+ RBP: 0000000000000000 R08: ff16dcae79880200 R09: 0000000000000019
+ R10: 0000000000000001 R11: 0000000000001000 R12: 0000000000000000
+ R13: 0000000000000000 R14: 0000000000000000 R15: ff16dcae6c670000
+ FS: 00007fcf428850c0(0000) GS:ff16dcb149710000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000000000000000c CR3: 0000000121227005 CR4: 0000000000773ef0
+ PKRU: 55555554
+ Call Trace:
+ <TASK>
+ ice_vsi_cfg_rxq+0xca/0x460 [ice]
+ ice_vsi_cfg_rxqs+0x54/0x70 [ice]
+ ice_loopback_test+0xa9/0x520 [ice]
+ ice_self_test+0x1b9/0x280 [ice]
+ ethtool_self_test+0xe5/0x200
+ __dev_ethtool+0x1106/0x1a90
+ dev_ethtool+0xbe/0x1a0
+ dev_ioctl+0x258/0x4c0
+ sock_do_ioctl+0xe3/0x130
+ __x64_sys_ioctl+0xb9/0x100
+ do_syscall_64+0x7c/0x700
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+ [...]
+
+It crashes because we have not initialized libeth for the rx ring.
+
+Fix it by treating ICE_VSI_LB VSIs slightly more like normal PF VSIs and
+letting them have a q_vector. It's just a dummy, because the loopback
+test does not use interrupts, but it contains a napi struct that can be
+passed to libeth_rx_fq_create() called from ice_vsi_cfg_rxq() ->
+ice_rxq_pp_create().
+
+Fixes: 93f53db9f9dc ("ice: switch to Page Pool")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_base.c | 5 ++++-
+ drivers/net/ethernet/intel/ice/ice_ethtool.c | 4 ++++
+ drivers/net/ethernet/intel/ice/ice_lib.c | 15 ++++++++++-----
+ 3 files changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index eadb1e3d12b3a..f0da50df6791c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -124,6 +124,8 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, u16 v_idx)
+ if (vsi->type == ICE_VSI_VF) {
+ ice_calc_vf_reg_idx(vsi->vf, q_vector);
+ goto out;
++ } else if (vsi->type == ICE_VSI_LB) {
++ goto skip_alloc;
+ } else if (vsi->type == ICE_VSI_CTRL && vsi->vf) {
+ struct ice_vsi *ctrl_vsi = ice_get_vf_ctrl_vsi(pf, vsi);
+
+@@ -662,7 +664,8 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ u32 rx_buf_len;
+ int err;
+
+- if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
++ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF ||
++ ring->vsi->type == ICE_VSI_LB) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->q_index,
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 3565a5d96c6d1..e9f2618950c80 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -1289,6 +1289,10 @@ static u64 ice_loopback_test(struct net_device *netdev)
+ test_vsi->netdev = netdev;
+ tx_ring = test_vsi->tx_rings[0];
+ rx_ring = test_vsi->rx_rings[0];
++ /* Dummy q_vector and napi. Fill the minimum required for
++ * ice_rxq_pp_create().
++ */
++ rx_ring->q_vector->napi.dev = netdev;
+
+ if (ice_lbtest_prepare_rings(test_vsi)) {
+ ret = 2;
+diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
+index d47af94f31a99..bad67e4dc044f 100644
+--- a/drivers/net/ethernet/intel/ice/ice_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_lib.c
+@@ -107,10 +107,6 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
+ if (!vsi->rxq_map)
+ goto err_rxq_map;
+
+- /* There is no need to allocate q_vectors for a loopback VSI. */
+- if (vsi->type == ICE_VSI_LB)
+- return 0;
+-
+ /* allocate memory for q_vector pointers */
+ vsi->q_vectors = devm_kcalloc(dev, vsi->num_q_vectors,
+ sizeof(*vsi->q_vectors), GFP_KERNEL);
+@@ -239,6 +235,8 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi)
+ case ICE_VSI_LB:
+ vsi->alloc_txq = 1;
+ vsi->alloc_rxq = 1;
++ /* A dummy q_vector, no actual IRQ. */
++ vsi->num_q_vectors = 1;
+ break;
+ default:
+ dev_warn(ice_pf_to_dev(pf), "Unknown VSI type %d\n", vsi_type);
+@@ -2424,14 +2422,21 @@ static int ice_vsi_cfg_def(struct ice_vsi *vsi)
+ }
+ break;
+ case ICE_VSI_LB:
+- ret = ice_vsi_alloc_rings(vsi);
++ ret = ice_vsi_alloc_q_vectors(vsi);
+ if (ret)
+ goto unroll_vsi_init;
+
++ ret = ice_vsi_alloc_rings(vsi);
++ if (ret)
++ goto unroll_alloc_q_vector;
++
+ ret = ice_vsi_alloc_ring_stats(vsi);
+ if (ret)
+ goto unroll_vector_base;
+
++ /* Simply map the dummy q_vector to the only rx_ring */
++ vsi->rx_rings[0]->q_vector = vsi->q_vectors[0];
++
+ break;
+ default:
+ /* clean up the resources and exit */
+--
+2.51.0
+
--- /dev/null
+From a3ca954cf9e68f63028ea23bcd52ebe42bd86fc7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Jan 2026 03:26:44 +0000
+Subject: ice: Fix memory leak in ice_set_ringparam()
+
+From: Zilin Guan <zilin@seu.edu.cn>
+
+[ Upstream commit fe868b499d16f55bbeea89992edb98043c9de416 ]
+
+In ice_set_ringparam, tx_rings and xdp_rings are allocated before
+rx_rings. If the allocation of rx_rings fails, the code jumps to
+the done label leaking both tx_rings and xdp_rings. Furthermore, if
+the setup of an individual Rx ring fails during the loop, the code jumps
+to the free_tx label which releases tx_rings but leaks xdp_rings.
+
+Fix this by introducing a free_xdp label and updating the error paths to
+ensure both xdp_rings and tx_rings are properly freed if rx_rings
+allocation or setup fails.
+
+Compile tested only. Issue found using a prototype static analysis tool
+and code review.
+
+Fixes: fcea6f3da546 ("ice: Add stats and ethtool support")
+Fixes: efc2214b6047 ("ice: Add support for XDP")
+Signed-off-by: Zilin Guan <zilin@seu.edu.cn>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_ethtool.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index e9f2618950c80..5377550a2b6e1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3322,7 +3322,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ rx_rings = kcalloc(vsi->num_rxq, sizeof(*rx_rings), GFP_KERNEL);
+ if (!rx_rings) {
+ err = -ENOMEM;
+- goto done;
++ goto free_xdp;
+ }
+
+ ice_for_each_rxq(vsi, i) {
+@@ -3349,7 +3349,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ }
+ kfree(rx_rings);
+ err = -ENOMEM;
+- goto free_tx;
++ goto free_xdp;
+ }
+ }
+
+@@ -3402,6 +3402,13 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ }
+ goto done;
+
++free_xdp:
++ if (xdp_rings) {
++ ice_for_each_xdp_txq(vsi, i)
++ ice_free_tx_ring(&xdp_rings[i]);
++ kfree(xdp_rings);
++ }
++
+ free_tx:
+ /* error cleanup if the Rx allocations failed after getting Tx */
+ if (tx_rings) {
+--
+2.51.0
+
--- /dev/null
+From 4f43baa138fe69903ca998545ac470d4e4ad99d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:44 +0100
+Subject: ice: fix rxq info registering in mbuf packets
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 02852b47c706772af795d3e28fca99fc9b923b2c ]
+
+XDP RxQ info contains frag_size, which depends on the MTU. This makes the
+old way of registering RxQ info before calculating new buffer sizes
+invalid. Currently, it leads to frag_size being outdated, making it
+sometimes impossible to grow tailroom in a mbuf packet. E.g. fragments are
+actually 3K+, but frag size is still as if MTU was 1500.
+
+Always register new XDP RxQ info after reconfiguring memory pools.
+
+Fixes: 2fba7dc5157b ("ice: Add support for XDP multi-buffer on Rx side")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-4-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_base.c | 26 ++++++--------------
+ drivers/net/ethernet/intel/ice/ice_ethtool.c | 1 +
+ drivers/net/ethernet/intel/ice/ice_txrx.c | 4 ++-
+ drivers/net/ethernet/intel/ice/ice_xsk.c | 3 +++
+ 4 files changed, 14 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index f0da50df6791c..2c117ca7c76aa 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -666,23 +666,12 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF ||
+ ring->vsi->type == ICE_VSI_LB) {
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+- err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->q_index,
+- ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
+- if (err)
+- return err;
+- }
+-
+ ice_rx_xsk_pool(ring);
+ err = ice_realloc_rx_xdp_bufs(ring, ring->xsk_pool);
+ if (err)
+ return err;
+
+ if (ring->xsk_pool) {
+- xdp_rxq_info_unreg(&ring->xdp_rxq);
+-
+ rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+@@ -705,14 +694,13 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ if (err)
+ return err;
+
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+- err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->q_index,
+- ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
+- if (err)
+- goto err_destroy_fq;
+- }
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->q_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ goto err_destroy_fq;
++
+ xdp_rxq_info_attach_page_pool(&ring->xdp_rxq,
+ ring->pp);
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+index 5377550a2b6e1..1b343c53874e1 100644
+--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
++++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
+@@ -3332,6 +3332,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring,
+ rx_rings[i].cached_phctime = pf->ptp.cached_phc_time;
+ rx_rings[i].desc = NULL;
+ rx_rings[i].xdp_buf = NULL;
++ rx_rings[i].xdp_rxq = (struct xdp_rxq_info){ };
+
+ /* this is to allow wr32 to have something to write to
+ * during early allocation of Rx buffers
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index ad76768a42323..f47b96ceb9a47 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -560,7 +560,9 @@ void ice_clean_rx_ring(struct ice_rx_ring *rx_ring)
+ i = 0;
+ }
+
+- if (rx_ring->vsi->type == ICE_VSI_PF &&
++ if ((rx_ring->vsi->type == ICE_VSI_PF ||
++ rx_ring->vsi->type == ICE_VSI_SF ||
++ rx_ring->vsi->type == ICE_VSI_LB) &&
+ xdp_rxq_info_is_reg(&rx_ring->xdp_rxq)) {
+ xdp_rxq_info_detach_mem_model(&rx_ring->xdp_rxq);
+ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
+index 989ff1fd91103..102631398af3c 100644
+--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
++++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
+@@ -900,6 +900,9 @@ void ice_xsk_clean_rx_ring(struct ice_rx_ring *rx_ring)
+ u16 ntc = rx_ring->next_to_clean;
+ u16 ntu = rx_ring->next_to_use;
+
++ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
++
+ while (ntc != ntu) {
+ struct xdp_buff *xdp = *ice_xdp_buf(rx_ring, ntc);
+
+--
+2.51.0
+
--- /dev/null
+From 3dd8fa27638895f3fc8bb55854644b4b5efb8fea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Jan 2026 12:00:26 +0800
+Subject: ice: recap the VSI and QoS info after rebuild
+
+From: Aaron Ma <aaron.ma@canonical.com>
+
+[ Upstream commit 6aa07e23dd3ccd35a0100c06fcb6b6c3b01e7965 ]
+
+Fix IRDMA hardware initialization timeout (-110) after resume by
+separating VSI-dependent configuration from RDMA resource allocation,
+ensuring VSI is rebuilt before IRDMA accesses it.
+
+After resume from suspend, IRDMA hardware initialization fails:
+ ice: IRDMA hardware initialization FAILED init_state=4 status=-110
+
+Separate RDMA initialization into two phases:
+1. ice_init_rdma() - Allocate resources only (no VSI/QoS access, no plug)
+2. ice_rdma_finalize_setup() - Assign VSI/QoS info and plug device
+
+This allows:
+- ice_init_rdma() to stay in ice_resume() (mirrors ice_deinit_rdma()
+ in ice_suspend())
+- VSI assignment deferred until after ice_vsi_rebuild() completes
+- QoS info updated after ice_dcb_rebuild() completes
+- Device plugged only when control queues, VSI, and DCB are all ready
+
+Fixes: bc69ad74867db ("ice: avoid IRQ collision to fix init failure on ACPI S3 resume")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Aaron Ma <aaron.ma@canonical.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 1 +
+ drivers/net/ethernet/intel/ice/ice_idc.c | 44 +++++++++++++++++------
+ drivers/net/ethernet/intel/ice/ice_main.c | 7 +++-
+ 3 files changed, 41 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 00f75d87c73f9..15a7fcd888b26 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -990,6 +990,7 @@ int ice_schedule_reset(struct ice_pf *pf, enum ice_reset_req reset);
+ void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
+ int ice_plug_aux_dev(struct ice_pf *pf);
+ void ice_unplug_aux_dev(struct ice_pf *pf);
++void ice_rdma_finalize_setup(struct ice_pf *pf);
+ int ice_init_rdma(struct ice_pf *pf);
+ void ice_deinit_rdma(struct ice_pf *pf);
+ bool ice_is_wol_supported(struct ice_hw *hw);
+diff --git a/drivers/net/ethernet/intel/ice/ice_idc.c b/drivers/net/ethernet/intel/ice/ice_idc.c
+index 420d45c2558b6..ded029aa71d7d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_idc.c
++++ b/drivers/net/ethernet/intel/ice/ice_idc.c
+@@ -360,6 +360,39 @@ void ice_unplug_aux_dev(struct ice_pf *pf)
+ auxiliary_device_uninit(adev);
+ }
+
++/**
++ * ice_rdma_finalize_setup - Complete RDMA setup after VSI is ready
++ * @pf: ptr to ice_pf
++ *
++ * Sets VSI-dependent information and plugs aux device.
++ * Must be called after ice_init_rdma(), ice_vsi_rebuild(), and
++ * ice_dcb_rebuild() complete.
++ */
++void ice_rdma_finalize_setup(struct ice_pf *pf)
++{
++ struct device *dev = ice_pf_to_dev(pf);
++ struct iidc_rdma_priv_dev_info *privd;
++ int ret;
++
++ if (!ice_is_rdma_ena(pf) || !pf->cdev_info)
++ return;
++
++ privd = pf->cdev_info->iidc_priv;
++ if (!privd || !pf->vsi || !pf->vsi[0] || !pf->vsi[0]->netdev)
++ return;
++
++ /* Assign VSI info now that VSI is valid */
++ privd->netdev = pf->vsi[0]->netdev;
++ privd->vport_id = pf->vsi[0]->vsi_num;
++
++ /* Update QoS info after DCB has been rebuilt */
++ ice_setup_dcb_qos_info(pf, &privd->qos_info);
++
++ ret = ice_plug_aux_dev(pf);
++ if (ret)
++ dev_warn(dev, "Failed to plug RDMA aux device: %d\n", ret);
++}
++
+ /**
+ * ice_init_rdma - initializes PF for RDMA use
+ * @pf: ptr to ice_pf
+@@ -398,22 +431,14 @@ int ice_init_rdma(struct ice_pf *pf)
+ }
+
+ cdev->iidc_priv = privd;
+- privd->netdev = pf->vsi[0]->netdev;
+
+ privd->hw_addr = (u8 __iomem *)pf->hw.hw_addr;
+ cdev->pdev = pf->pdev;
+- privd->vport_id = pf->vsi[0]->vsi_num;
+
+ pf->cdev_info->rdma_protocol |= IIDC_RDMA_PROTOCOL_ROCEV2;
+- ice_setup_dcb_qos_info(pf, &privd->qos_info);
+- ret = ice_plug_aux_dev(pf);
+- if (ret)
+- goto err_plug_aux_dev;
++
+ return 0;
+
+-err_plug_aux_dev:
+- pf->cdev_info->adev = NULL;
+- xa_erase(&ice_aux_id, pf->aux_idx);
+ err_alloc_xa:
+ kfree(privd);
+ err_privd_alloc:
+@@ -432,7 +457,6 @@ void ice_deinit_rdma(struct ice_pf *pf)
+ if (!ice_is_rdma_ena(pf))
+ return;
+
+- ice_unplug_aux_dev(pf);
+ xa_erase(&ice_aux_id, pf->aux_idx);
+ kfree(pf->cdev_info->iidc_priv);
+ kfree(pf->cdev_info);
+diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
+index d04605d3e61af..dddf1ae31952d 100644
+--- a/drivers/net/ethernet/intel/ice/ice_main.c
++++ b/drivers/net/ethernet/intel/ice/ice_main.c
+@@ -5138,6 +5138,9 @@ int ice_load(struct ice_pf *pf)
+ if (err)
+ goto err_init_rdma;
+
++ /* Finalize RDMA: VSI already created, assign info and plug device */
++ ice_rdma_finalize_setup(pf);
++
+ ice_service_task_restart(pf);
+
+ clear_bit(ICE_DOWN, pf->state);
+@@ -5169,6 +5172,7 @@ void ice_unload(struct ice_pf *pf)
+
+ devl_assert_locked(priv_to_devlink(pf));
+
++ ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
+ ice_deinit_features(pf);
+ ice_tc_indir_block_unregister(vsi);
+@@ -5595,6 +5599,7 @@ static int ice_suspend(struct device *dev)
+ */
+ disabled = ice_service_task_stop(pf);
+
++ ice_unplug_aux_dev(pf);
+ ice_deinit_rdma(pf);
+
+ /* Already suspended?, then there is nothing to do */
+@@ -7803,7 +7808,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
+
+ ice_health_clear(pf);
+
+- ice_plug_aux_dev(pf);
++ ice_rdma_finalize_setup(pf);
+ if (ice_is_feature_supported(pf, ICE_F_SRIOV_LAG))
+ ice_lag_rebuild(pf);
+
+--
+2.51.0
+
--- /dev/null
+From 219fb250926dd6380f22ddbb2a9d25b94b1c9b18 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 26 Jan 2026 21:55:59 +0000
+Subject: idpf: change IRQ naming to match netdev and ethtool queue numbering
+
+From: Brian Vazquez <brianvv@google.com>
+
+[ Upstream commit 1500a8662d2d41d6bb03e034de45ddfe6d7d362d ]
+
+The code uses the vidx for the IRQ name but that doesn't match ethtool
+reporting nor netdev naming, this makes it hard to tune the device and
+associate queues with IRQs. Sequentially requesting irqs starting from
+'0' makes the output consistent.
+
+This commit changes the interrupt numbering but preserves the name
+format, maintaining ABI compatibility. Existing tools relying on the old
+numbering are already non-functional, as they lack a useful correlation
+to the interrupts.
+
+Before:
+
+ethtool -L eth1 tx 1 combined 3
+
+grep . /proc/irq/*/*idpf*/../smp_affinity_list
+/proc/irq/67/idpf-Mailbox-0/../smp_affinity_list:0-55,112-167
+/proc/irq/68/idpf-eth1-TxRx-1/../smp_affinity_list:0
+/proc/irq/70/idpf-eth1-TxRx-3/../smp_affinity_list:1
+/proc/irq/71/idpf-eth1-TxRx-4/../smp_affinity_list:2
+/proc/irq/72/idpf-eth1-Tx-5/../smp_affinity_list:3
+
+ethtool -S eth1 | grep -v ': 0'
+NIC statistics:
+ tx_q-0_pkts: 1002
+ tx_q-1_pkts: 2679
+ tx_q-2_pkts: 1113
+ tx_q-3_pkts: 1192 <----- tx_q-3 vs idpf-eth1-Tx-5
+ rx_q-0_pkts: 1143
+ rx_q-1_pkts: 3172
+ rx_q-2_pkts: 1074
+
+After:
+
+ethtool -L eth1 tx 1 combined 3
+
+grep . /proc/irq/*/*idpf*/../smp_affinity_list
+
+/proc/irq/67/idpf-Mailbox-0/../smp_affinity_list:0-55,112-167
+/proc/irq/68/idpf-eth1-TxRx-0/../smp_affinity_list:0
+/proc/irq/70/idpf-eth1-TxRx-1/../smp_affinity_list:1
+/proc/irq/71/idpf-eth1-TxRx-2/../smp_affinity_list:2
+/proc/irq/72/idpf-eth1-Tx-3/../smp_affinity_list:3
+
+ethtool -S eth1 | grep -v ': 0'
+NIC statistics:
+ tx_q-0_pkts: 118
+ tx_q-1_pkts: 134
+ tx_q-2_pkts: 228
+ tx_q-3_pkts: 138 <--- tx_q-3 matches idpf-eth1-Tx-3
+ rx_q-0_pkts: 111
+ rx_q-1_pkts: 366
+ rx_q-2_pkts: 120
+
+Fixes: d4d558718266 ("idpf: initialize interrupts and enable vport")
+Signed-off-by: Brian Vazquez <brianvv@google.com>
+Reviewed-by: Brett Creeley <brett.creeley@amd.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Tested-by: Samuel Salin <Samuel.salin@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index c558bb9c4dcbb..d365564831b0b 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -4038,7 +4038,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport)
+ continue;
+
+ name = kasprintf(GFP_KERNEL, "%s-%s-%s-%d", drv_name, if_name,
+- vec_name, vidx);
++ vec_name, vector);
+
+ err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0,
+ name, q_vector);
+--
+2.51.0
+
--- /dev/null
+From 4212684e67f3b8ba8efb85ea0bdc2dac68cf883c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Jan 2026 12:01:13 -0600
+Subject: idpf: Fix flow rule delete failure due to invalid validation
+
+From: Sreedevi Joshi <sreedevi.joshi@intel.com>
+
+[ Upstream commit 2c31557336a8e4d209ed8d4513cef2c0f15e7ef4 ]
+
+When deleting a flow rule using "ethtool -N <dev> delete <location>",
+idpf_sideband_action_ena() incorrectly validates fsp->ring_cookie even
+though ethtool doesn't populate this field for delete operations. The
+uninitialized ring_cookie may randomly match RX_CLS_FLOW_DISC or
+RX_CLS_FLOW_WAKE, causing validation to fail and preventing legitimate
+rule deletions. Remove the unnecessary sideband action enable check and
+ring_cookie validation during delete operations since action validation
+is not required when removing existing rules.
+
+Fixes: ada3e24b84a0 ("idpf: add flow steering support")
+Signed-off-by: Sreedevi Joshi <sreedevi.joshi@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_ethtool.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+index 2efa3c08aba5c..49cefb973f4da 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+@@ -307,9 +307,6 @@ static int idpf_del_flow_steer(struct net_device *netdev,
+ vport_config = vport->adapter->vport_config[np->vport_idx];
+ user_config = &vport_config->user_config;
+
+- if (!idpf_sideband_action_ena(vport, fsp))
+- return -EOPNOTSUPP;
+-
+ rule = kzalloc(struct_size(rule, rule_info, 1), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+--
+2.51.0
+
--- /dev/null
+From 1b0c3a251eb8e3dae34dd5436789353a169b980f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jan 2026 06:47:28 +0000
+Subject: idpf: increment completion queue next_to_clean in sw marker wait
+ routine
+
+From: Li Li <boolli@google.com>
+
+[ Upstream commit 712896ac4bce38a965a1c175f6e7804ed0381334 ]
+
+Currently, in idpf_wait_for_sw_marker_completion(), when an
+IDPF_TXD_COMPLT_SW_MARKER packet is found, the routine breaks out of
+the for loop and does not increment the next_to_clean counter. This
+causes the subsequent NAPI polls to run into the same
+IDPF_TXD_COMPLT_SW_MARKER packet again and print out the following:
+
+ [ 23.261341] idpf 0000:05:00.0 eth1: Unknown TX completion type: 5
+
+Instead, we should increment next_to_clean regardless when an
+IDPF_TXD_COMPLT_SW_MARKER packet is found.
+
+Tested: with the patch applied, we do not see the errors above from NAPI
+polls anymore.
+
+Fixes: 9d39447051a0 ("idpf: remove SW marker handling from NAPI")
+Signed-off-by: Li Li <boolli@google.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/idpf/idpf_txrx.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+index f58f616d87fc4..c558bb9c4dcbb 100644
+--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
++++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+@@ -2326,7 +2326,7 @@ void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+
+ do {
+ struct idpf_splitq_4b_tx_compl_desc *tx_desc;
+- struct idpf_tx_queue *target;
++ struct idpf_tx_queue *target = NULL;
+ u32 ctype_gen, id;
+
+ tx_desc = flow ? &complq->comp[ntc].common :
+@@ -2346,14 +2346,14 @@ void idpf_wait_for_sw_marker_completion(const struct idpf_tx_queue *txq)
+ target = complq->txq_grp->txqs[id];
+
+ idpf_queue_clear(SW_MARKER, target);
+- if (target == txq)
+- break;
+
+ next:
+ if (unlikely(++ntc == complq->desc_count)) {
+ ntc = 0;
+ gen_flag = !gen_flag;
+ }
++ if (target == txq)
++ break;
+ } while (time_before(jiffies, timeout));
+
+ idpf_queue_assign(GEN_CHK, complq, gen_flag);
+--
+2.51.0
+
--- /dev/null
+From bd91ea0e7ee8b87891634b1f28ac90e513fc3709 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 22 Jan 2026 15:16:52 +0100
+Subject: igb: Fix trigger of incorrect irq in igb_xsk_wakeup
+
+From: Vivek Behera <vivek.behera@siemens.com>
+
+[ Upstream commit d4c13ab36273a8c318ba06799793cc1f5d9c6fa1 ]
+
+The current implementation in the igb_xsk_wakeup expects
+the Rx and Tx queues to share the same irq. This would lead
+to triggering of incorrect irq in split irq configuration.
+This patch addresses this issue which could impact environments
+with 2 active cpu cores
+or when the number of queues is reduced to 2 or less
+
+cat /proc/interrupts | grep eno2
+ 167: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 0-edge eno2
+ 168: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 1-edge eno2-rx-0
+ 169: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 2-edge eno2-rx-1
+ 170: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 3-edge eno2-tx-0
+ 171: 0 0 0 0 IR-PCI-MSIX-0000:08:00.0
+ 4-edge eno2-tx-1
+
+Furthermore it uses the flags input argument to trigger either rx, tx or
+both rx and tx irqs as specified in the ndo_xsk_wakeup api documentation
+
+Fixes: 80f6ccf9f116 ("igb: Introduce XSK data structures and helpers")
+Signed-off-by: Vivek Behera <vivek.behera@siemens.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Suggested-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Tested-by: Saritha Sanigani <sarithax.sanigani@intel.com> (A Contingent Worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igb/igb_xsk.c | 38 +++++++++++++++++++-----
+ 1 file changed, 30 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igb/igb_xsk.c b/drivers/net/ethernet/intel/igb/igb_xsk.c
+index 30ce5fbb5b776..ce4a7b58cad2f 100644
+--- a/drivers/net/ethernet/intel/igb/igb_xsk.c
++++ b/drivers/net/ethernet/intel/igb/igb_xsk.c
+@@ -524,6 +524,16 @@ bool igb_xmit_zc(struct igb_ring *tx_ring, struct xsk_buff_pool *xsk_pool)
+ return nb_pkts < budget;
+ }
+
++static u32 igb_sw_irq_prep(struct igb_q_vector *q_vector)
++{
++ u32 eics = 0;
++
++ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
++ eics = q_vector->eims_value;
++
++ return eics;
++}
++
+ int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+ {
+ struct igb_adapter *adapter = netdev_priv(dev);
+@@ -542,20 +552,32 @@ int igb_xsk_wakeup(struct net_device *dev, u32 qid, u32 flags)
+
+ ring = adapter->tx_ring[qid];
+
+- if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
+- return -ENETDOWN;
+-
+ if (!READ_ONCE(ring->xsk_pool))
+ return -EINVAL;
+
+- if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) {
++ if (flags & XDP_WAKEUP_TX) {
++ if (test_bit(IGB_RING_FLAG_TX_DISABLED, &ring->flags))
++ return -ENETDOWN;
++
++ eics |= igb_sw_irq_prep(ring->q_vector);
++ }
++
++ if (flags & XDP_WAKEUP_RX) {
++ /* If IGB_FLAG_QUEUE_PAIRS is active, the q_vector
++ * and NAPI is shared between RX and TX.
++ * If NAPI is already running it would be marked as missed
++ * from the TX path, making this RX call a NOP
++ */
++ ring = adapter->rx_ring[qid];
++ eics |= igb_sw_irq_prep(ring->q_vector);
++ }
++
++ if (eics) {
+ /* Cause software interrupt */
+- if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+- eics |= ring->q_vector->eims_value;
++ if (adapter->flags & IGB_FLAG_HAS_MSIX)
+ wr32(E1000_EICS, eics);
+- } else {
++ else
+ wr32(E1000_ICS, E1000_ICS_RXDMT0);
+- }
+ }
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 6a42ed6259d00e904c3ddb21cf6741daf638b434 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 20 Jan 2026 08:52:16 +0100
+Subject: igc: Fix trigger of incorrect irq in igc_xsk_wakeup function
+
+From: Vivek Behera <vivek.behera@siemens.com>
+
+[ Upstream commit 554a1c34c11a057d01819ce9bb04653a8ffc8071 ]
+
+This patch addresses the issue where the igc_xsk_wakeup function
+was triggering an incorrect IRQ for tx-0 when the i226 is configured
+with only 2 combined queues or in an environment with 2 active CPU cores.
+This prevented XDP Zero-copy send functionality in such split IRQ
+configurations.
+
+The fix implements the correct logic for extracting q_vectors saved
+during rx and tx ring allocation and utilizes flags provided by the
+ndo_xsk_wakeup API to trigger the appropriate IRQ.
+
+Fixes: fc9df2a0b520 ("igc: Enable RX via AF_XDP zero-copy")
+Fixes: 15fd021bc427 ("igc: Add Tx hardware timestamp request for AF_XDP zero-copy packet")
+Signed-off-by: Vivek Behera <vivek.behera@siemens.com>
+Reviewed-by: Jacob Keller <jacob.keller@intel.com>
+Reviewed-by: Aleksandr loktinov <aleksandr.loktionov@intel.com>
+Reviewed-by: Piotr Kwapulinski <piotr.kwapulinski@intel.com>
+Reviewed-by: Song Yoong Siang <yoong.siang.song@intel.com>
+Tested-by: Avigail Dahan <avigailx.dahan@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc_main.c | 34 ++++++++++++++++-------
+ drivers/net/ethernet/intel/igc/igc_ptp.c | 3 +-
+ 2 files changed, 26 insertions(+), 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 89a321a344d26..4439eeb378c1f 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -6908,28 +6908,29 @@ static int igc_xdp_xmit(struct net_device *dev, int num_frames,
+ return nxmit;
+ }
+
+-static void igc_trigger_rxtxq_interrupt(struct igc_adapter *adapter,
+- struct igc_q_vector *q_vector)
++static u32 igc_sw_irq_prep(struct igc_q_vector *q_vector)
+ {
+- struct igc_hw *hw = &adapter->hw;
+ u32 eics = 0;
+
+- eics |= q_vector->eims_value;
+- wr32(IGC_EICS, eics);
++ if (!napi_if_scheduled_mark_missed(&q_vector->napi))
++ eics = q_vector->eims_value;
++
++ return eics;
+ }
+
+ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+ {
+ struct igc_adapter *adapter = netdev_priv(dev);
+- struct igc_q_vector *q_vector;
++ struct igc_hw *hw = &adapter->hw;
+ struct igc_ring *ring;
++ u32 eics = 0;
+
+ if (test_bit(__IGC_DOWN, &adapter->state))
+ return -ENETDOWN;
+
+ if (!igc_xdp_is_enabled(adapter))
+ return -ENXIO;
+-
++ /* Check if queue_id is valid. Tx and Rx queue numbers are always same */
+ if (queue_id >= adapter->num_rx_queues)
+ return -EINVAL;
+
+@@ -6938,9 +6939,22 @@ int igc_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
+ if (!ring->xsk_pool)
+ return -ENXIO;
+
+- q_vector = adapter->q_vector[queue_id];
+- if (!napi_if_scheduled_mark_missed(&q_vector->napi))
+- igc_trigger_rxtxq_interrupt(adapter, q_vector);
++ if (flags & XDP_WAKEUP_RX)
++ eics |= igc_sw_irq_prep(ring->q_vector);
++
++ if (flags & XDP_WAKEUP_TX) {
++ /* If IGC_FLAG_QUEUE_PAIRS is active, the q_vector
++ * and NAPI is shared between RX and TX.
++ * If NAPI is already running it would be marked as missed
++ * from the RX path, making this TX call a NOP
++ */
++ ring = adapter->tx_ring[queue_id];
++ eics |= igc_sw_irq_prep(ring->q_vector);
++ }
++
++ if (eics)
++ /* Cause software interrupt */
++ wr32(IGC_EICS, eics);
+
+ return 0;
+ }
+diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c
+index 7aae83c108fd7..44ee193867661 100644
+--- a/drivers/net/ethernet/intel/igc/igc_ptp.c
++++ b/drivers/net/ethernet/intel/igc/igc_ptp.c
+@@ -550,7 +550,8 @@ static void igc_ptp_free_tx_buffer(struct igc_adapter *adapter,
+ tstamp->buffer_type = 0;
+
+ /* Trigger txrx interrupt for transmit completion */
+- igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index, 0);
++ igc_xsk_wakeup(adapter->netdev, tstamp->xsk_queue_index,
++ XDP_WAKEUP_TX);
+
+ return;
+ }
+--
+2.51.0
+
--- /dev/null
+From a20eff34b3d6fa97b3ae2a25fa3ca091d06b1ee6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 17:26:03 +0000
+Subject: indirect_call_wrapper: do not reevaluate function pointer
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 710f5c76580306cdb9ec51fac8fcf6a8faff7821 ]
+
+We have an increasing number of READ_ONCE(xxx->function)
+combined with INDIRECT_CALL_[1234]() helpers.
+
+Unfortunately this forces INDIRECT_CALL_[1234]() to read
+xxx->function many times, which is not what we wanted.
+
+Fix these macros so that xxx->function value is not reloaded.
+
+$ scripts/bloat-o-meter -t vmlinux.0 vmlinux
+add/remove: 0/0 grow/shrink: 1/65 up/down: 122/-1084 (-962)
+Function old new delta
+ip_push_pending_frames 59 181 +122
+ip6_finish_output 687 681 -6
+__udp_enqueue_schedule_skb 1078 1072 -6
+ioam6_output 2319 2312 -7
+xfrm4_rcv_encap_finish2 64 56 -8
+xfrm4_output 297 289 -8
+vrf_ip_local_out 278 270 -8
+vrf_ip6_local_out 278 270 -8
+seg6_input_finish 64 56 -8
+rpl_output 700 692 -8
+ipmr_forward_finish 124 116 -8
+ip_forward_finish 143 135 -8
+ip6mr_forward2_finish 100 92 -8
+ip6_forward_finish 73 65 -8
+input_action_end_bpf 1091 1083 -8
+dst_input 52 44 -8
+__xfrm6_output 801 793 -8
+__xfrm4_output 83 75 -8
+bpf_input 500 491 -9
+__tcp_check_space 530 521 -9
+input_action_end_dt6 291 280 -11
+vti6_tnl_xmit 1634 1622 -12
+bpf_xmit 1203 1191 -12
+rpl_input 497 483 -14
+rawv6_send_hdrinc 1355 1341 -14
+ndisc_send_skb 1030 1016 -14
+ipv6_srh_rcv 1377 1363 -14
+ip_send_unicast_reply 1253 1239 -14
+ip_rcv_finish 226 212 -14
+ip6_rcv_finish 300 286 -14
+input_action_end_x_core 205 191 -14
+input_action_end_x 355 341 -14
+input_action_end_t 205 191 -14
+input_action_end_dx6_finish 127 113 -14
+input_action_end_dx4_finish 373 359 -14
+input_action_end_dt4 426 412 -14
+input_action_end_core 186 172 -14
+input_action_end_b6_encap 292 278 -14
+input_action_end_b6 198 184 -14
+igmp6_send 1332 1318 -14
+ip_sublist_rcv 864 848 -16
+ip6_sublist_rcv 1091 1075 -16
+ipv6_rpl_srh_rcv 1937 1920 -17
+xfrm_policy_queue_process 1246 1228 -18
+seg6_output_core 903 885 -18
+mld_sendpack 856 836 -20
+NF_HOOK 756 736 -20
+vti_tunnel_xmit 1447 1426 -21
+input_action_end_dx6 664 642 -22
+input_action_end 1502 1480 -22
+sock_sendmsg_nosec 134 111 -23
+ip6mr_forward2 388 364 -24
+sock_recvmsg_nosec 134 109 -25
+seg6_input_core 836 810 -26
+ip_send_skb 172 146 -26
+ip_local_out 140 114 -26
+ip6_local_out 140 114 -26
+__sock_sendmsg 162 136 -26
+__ip_queue_xmit 1196 1170 -26
+__ip_finish_output 405 379 -26
+ipmr_queue_fwd_xmit 373 346 -27
+sock_recvmsg 173 145 -28
+ip6_xmit 1635 1607 -28
+xfrm_output_resume 1418 1389 -29
+ip_build_and_send_pkt 625 591 -34
+dst_output 504 432 -72
+Total: Before=25217686, After=25216724, chg -0.00%
+
+Fixes: 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260227172603.1700433-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/indirect_call_wrapper.h | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
+index 35227d47cfc98..dc272b514a01b 100644
+--- a/include/linux/indirect_call_wrapper.h
++++ b/include/linux/indirect_call_wrapper.h
+@@ -16,22 +16,26 @@
+ */
+ #define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
++ typeof(f) __f1 = (f); \
++ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+- likely(f == f2) ? f2(__VA_ARGS__) : \
+- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
++ typeof(f) __f2 = (f); \
++ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
++ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f3) ? f3(__VA_ARGS__) : \
+- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
++ typeof(f) __f3 = (f); \
++ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
++ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f4) ? f4(__VA_ARGS__) : \
+- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
++ typeof(f) __f4 = (f); \
++ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
++ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
+ })
+
+ #define INDIRECT_CALLABLE_DECLARE(f) f
+--
+2.51.0
+
--- /dev/null
+From 2c7bf029ba0a8346e3eb8fc9691ca12146652652 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:35:45 +0000
+Subject: inet: annotate data-races around isk->inet_num
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 29252397bcc1e0a1f85e5c3bee59c325f5c26341 ]
+
+UDP/TCP lookups are using RCU, thus isk->inet_num accesses
+should use READ_ONCE() and WRITE_ONCE() where needed.
+
+Fixes: 3ab5aee7fe84 ("net: Convert TCP & DCCP hash tables to use RCU / hlist_nulls")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260225203545.1512417-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/inet6_hashtables.h | 2 +-
+ include/net/inet_hashtables.h | 2 +-
+ include/net/ip.h | 2 +-
+ net/ipv4/inet_hashtables.c | 8 ++++----
+ net/ipv4/tcp_diag.c | 2 +-
+ net/ipv6/inet6_hashtables.c | 3 ++-
+ 6 files changed, 10 insertions(+), 9 deletions(-)
+
+diff --git a/include/net/inet6_hashtables.h b/include/net/inet6_hashtables.h
+index 282e29237d936..c16de5b7963fd 100644
+--- a/include/net/inet6_hashtables.h
++++ b/include/net/inet6_hashtables.h
+@@ -175,7 +175,7 @@ static inline bool inet6_match(const struct net *net, const struct sock *sk,
+ {
+ if (!net_eq(sock_net(sk), net) ||
+ sk->sk_family != AF_INET6 ||
+- sk->sk_portpair != ports ||
++ READ_ONCE(sk->sk_portpair) != ports ||
+ !ipv6_addr_equal(&sk->sk_v6_daddr, saddr) ||
+ !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return false;
+diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
+index ac05a52d9e138..5a979dcab5383 100644
+--- a/include/net/inet_hashtables.h
++++ b/include/net/inet_hashtables.h
+@@ -345,7 +345,7 @@ static inline bool inet_match(const struct net *net, const struct sock *sk,
+ int dif, int sdif)
+ {
+ if (!net_eq(sock_net(sk), net) ||
+- sk->sk_portpair != ports ||
++ READ_ONCE(sk->sk_portpair) != ports ||
+ sk->sk_addrpair != cookie)
+ return false;
+
+diff --git a/include/net/ip.h b/include/net/ip.h
+index 69d5cef460040..7f9abd457e018 100644
+--- a/include/net/ip.h
++++ b/include/net/ip.h
+@@ -101,7 +101,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+
+ ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
+ ipcm->addr = inet->inet_saddr;
+- ipcm->protocol = inet->inet_num;
++ ipcm->protocol = READ_ONCE(inet->inet_num);
+ }
+
+ #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
+diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
+index f5826ec4bcaa8..46817b4c141b6 100644
+--- a/net/ipv4/inet_hashtables.c
++++ b/net/ipv4/inet_hashtables.c
+@@ -200,7 +200,7 @@ static bool inet_bind2_bucket_addr_match(const struct inet_bind2_bucket *tb2,
+ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
+ struct inet_bind2_bucket *tb2, unsigned short port)
+ {
+- inet_sk(sk)->inet_num = port;
++ WRITE_ONCE(inet_sk(sk)->inet_num, port);
+ inet_csk(sk)->icsk_bind_hash = tb;
+ inet_csk(sk)->icsk_bind2_hash = tb2;
+ sk_add_bind_node(sk, &tb2->owners);
+@@ -224,7 +224,7 @@ static void __inet_put_port(struct sock *sk)
+ spin_lock(&head->lock);
+ tb = inet_csk(sk)->icsk_bind_hash;
+ inet_csk(sk)->icsk_bind_hash = NULL;
+- inet_sk(sk)->inet_num = 0;
++ WRITE_ONCE(inet_sk(sk)->inet_num, 0);
+ sk->sk_userlocks &= ~SOCK_CONNECT_BIND;
+
+ spin_lock(&head2->lock);
+@@ -352,7 +352,7 @@ static inline int compute_score(struct sock *sk, const struct net *net,
+ {
+ int score = -1;
+
+- if (net_eq(sock_net(sk), net) && sk->sk_num == hnum &&
++ if (net_eq(sock_net(sk), net) && READ_ONCE(sk->sk_num) == hnum &&
+ !ipv6_only_sock(sk)) {
+ if (sk->sk_rcv_saddr != daddr)
+ return -1;
+@@ -1206,7 +1206,7 @@ int __inet_hash_connect(struct inet_timewait_death_row *death_row,
+
+ sk->sk_hash = 0;
+ inet_sk(sk)->inet_sport = 0;
+- inet_sk(sk)->inet_num = 0;
++ WRITE_ONCE(inet_sk(sk)->inet_num, 0);
+
+ if (tw)
+ inet_twsk_bind_unhash(tw, hinfo);
+diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
+index d83efd91f461c..7935702e394b2 100644
+--- a/net/ipv4/tcp_diag.c
++++ b/net/ipv4/tcp_diag.c
+@@ -509,7 +509,7 @@ static void tcp_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
+ if (r->sdiag_family != AF_UNSPEC &&
+ sk->sk_family != r->sdiag_family)
+ goto next_normal;
+- if (r->id.idiag_sport != htons(sk->sk_num) &&
++ if (r->id.idiag_sport != htons(READ_ONCE(sk->sk_num)) &&
+ r->id.idiag_sport)
+ goto next_normal;
+ if (r->id.idiag_dport != sk->sk_dport &&
+diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
+index 5e1da088d8e11..182d38e6d6d8d 100644
+--- a/net/ipv6/inet6_hashtables.c
++++ b/net/ipv6/inet6_hashtables.c
+@@ -95,7 +95,8 @@ static inline int compute_score(struct sock *sk, const struct net *net,
+ {
+ int score = -1;
+
+- if (net_eq(sock_net(sk), net) && inet_sk(sk)->inet_num == hnum &&
++ if (net_eq(sock_net(sk), net) &&
++ READ_ONCE(inet_sk(sk)->inet_num) == hnum &&
+ sk->sk_family == PF_INET6) {
+ if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, daddr))
+ return -1;
+--
+2.51.0
+
--- /dev/null
+From 03bc9e79049b149a0a2e96efdf0ab3418e3dc4d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 11:45:48 -0800
+Subject: ipv6: fix NULL pointer deref in ip6_rt_get_dev_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2ffb4f5c2ccb2fa1c049dd11899aee7967deef5a ]
+
+l3mdev_master_dev_rcu() can return NULL when the slave device is being
+un-slaved from a VRF. All other callers deal with this, but we lost
+the fallback to loopback in ip6_rt_pcpu_alloc() -> ip6_rt_get_dev_rcu()
+with commit 4832c30d5458 ("net: ipv6: put host and anycast routes on
+device with address").
+
+ KASAN: null-ptr-deref in range [0x0000000000000108-0x000000000000010f]
+ RIP: 0010:ip6_rt_pcpu_alloc (net/ipv6/route.c:1418)
+ Call Trace:
+ ip6_pol_route (net/ipv6/route.c:2318)
+ fib6_rule_lookup (net/ipv6/fib6_rules.c:115)
+ ip6_route_output_flags (net/ipv6/route.c:2607)
+ vrf_process_v6_outbound (drivers/net/vrf.c:437)
+
+I was tempted to rework the un-slaving code to clear the flag first
+and insert synchronize_rcu() before we remove the upper. But looks like
+the explicit fallback to loopback_dev is an established pattern.
+And I guess avoiding the synchronize_rcu() is nice, too.
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260301194548.927324-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index cd229974b7974..e7d90a28948a4 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1063,7 +1063,8 @@ static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
+ */
+ if (netif_is_l3_slave(dev) &&
+ !rt6_need_strict(&res->f6i->fib6_dst.addr))
+- dev = l3mdev_master_dev_rcu(dev);
++ dev = l3mdev_master_dev_rcu(dev) ? :
++ dev_net(dev)->loopback_dev;
+ else if (!netif_is_l3_master(dev))
+ dev = dev_net(dev)->loopback_dev;
+ /* last case is netif_is_l3_master(dev) is true in which
+--
+2.51.0
+
--- /dev/null
+From 1c1292fcf84906bb2e31a96ad6ce223f697a2cf3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:14:10 +0545
+Subject: kunit: tool: copy caller args in run_kernel to prevent mutation
+
+From: Shuvam Pandey <shuvampandey1@gmail.com>
+
+[ Upstream commit 40804c4974b8df2adab72f6475d343eaff72b7f6 ]
+
+run_kernel() appended KUnit flags directly to the caller-provided args
+list. When exec_tests() calls run_kernel() repeatedly (e.g. with
+--run_isolated), each call mutated the same list, causing later runs
+to inherit stale filter_glob values and duplicate kunit.enable flags.
+
+Fix this by copying args at the start of run_kernel(). Add a regression
+test that calls run_kernel() twice with the same list and verifies the
+original remains unchanged.
+
+Fixes: ff9e09a3762f ("kunit: tool: support running each suite/test separately")
+Signed-off-by: Shuvam Pandey <shuvampandey1@gmail.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_kernel.py | 6 ++++--
+ tools/testing/kunit/kunit_tool_test.py | 26 ++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index 260d8d9aa1db4..2998e1bc088b2 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -346,8 +346,10 @@ class LinuxSourceTree:
+ return self.validate_config(build_dir)
+
+ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
+- if not args:
+- args = []
++ # Copy to avoid mutating the caller-supplied list. exec_tests() reuses
++ # the same args across repeated run_kernel() calls (e.g. --run_isolated),
++ # so appending to the original would accumulate stale flags on each call.
++ args = list(args) if args else []
+ if filter_glob:
+ args.append('kunit.filter_glob=' + filter_glob)
+ if filter:
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index bbba921e0eacb..ed45bac1548d9 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -489,6 +489,32 @@ class LinuxSourceTreeTest(unittest.TestCase):
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
++ def test_run_kernel_args_not_mutated(self):
++ """Verify run_kernel() copies args so callers can reuse them."""
++ start_calls = []
++
++ def fake_start(start_args, unused_build_dir):
++ start_calls.append(list(start_args))
++ return subprocess.Popen(['printf', 'KTAP version 1\n'],
++ text=True, stdout=subprocess.PIPE)
++
++ with tempfile.TemporaryDirectory('') as build_dir:
++ tree = kunit_kernel.LinuxSourceTree(build_dir,
++ kunitconfig_paths=[os.devnull])
++ with mock.patch.object(tree._ops, 'start', side_effect=fake_start), \
++ mock.patch.object(kunit_kernel.subprocess, 'call'):
++ kernel_args = ['mem=1G']
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test1'):
++ pass
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test2'):
++ pass
++ self.assertEqual(kernel_args, ['mem=1G'],
++ 'run_kernel() should not modify caller args')
++ self.assertIn('kunit.filter_glob=suite.test1', start_calls[0])
++ self.assertIn('kunit.filter_glob=suite.test2', start_calls[1])
++
+ def test_build_reconfig_no_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+--
+2.51.0
+
--- /dev/null
+From b05124120ccdf1ae91dc12139b00d117abc84fec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Feb 2026 10:11:40 +0100
+Subject: libie: don't unroll if fwlog isn't supported
+
+From: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+
+[ Upstream commit 636cc3bd12f499c74eaf5dc9a7d5b832f1bb24ed ]
+
+The libie_fwlog_deinit() function can be called during driver unload
+even when firmware logging was never properly initialized. This led to call
+trace:
+
+[ 148.576156] Oops: Oops: 0000 [#1] SMP NOPTI
+[ 148.576167] CPU: 80 UID: 0 PID: 12843 Comm: rmmod Kdump: loaded Not tainted 6.17.0-rc7next-queue-3oct-01915-g06d79d51cf51 #1 PREEMPT(full)
+[ 148.576177] Hardware name: HPE ProLiant DL385 Gen10 Plus/ProLiant DL385 Gen10 Plus, BIOS A42 07/18/2020
+[ 148.576182] RIP: 0010:__dev_printk+0x16/0x70
+[ 148.576196] Code: 1f 44 00 00 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 90 0f 1f 44 00 00 41 55 41 54 49 89 d4 55 48 89 fd 53 48 85 f6 74 3c <4c> 8b 6e 50 48 89 f3 4d 85 ed 75 03 4c 8b 2e 48 89 df e8 f3 27 98
+[ 148.576204] RSP: 0018:ffffd2fd7ea17a48 EFLAGS: 00010202
+[ 148.576211] RAX: ffffd2fd7ea17aa0 RBX: ffff8eb288ae2000 RCX: 0000000000000000
+[ 148.576217] RDX: ffffd2fd7ea17a70 RSI: 00000000000000c8 RDI: ffffffffb68d3d88
+[ 148.576222] RBP: ffffffffb68d3d88 R08: 0000000000000000 R09: 0000000000000000
+[ 148.576227] R10: 00000000000000c8 R11: ffff8eb2b1a49400 R12: ffffd2fd7ea17a70
+[ 148.576231] R13: ffff8eb3141fb000 R14: ffffffffc1215b48 R15: ffffffffc1215bd8
+[ 148.576236] FS: 00007f5666ba6740(0000) GS:ffff8eb2472b9000(0000) knlGS:0000000000000000
+[ 148.576242] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 148.576247] CR2: 0000000000000118 CR3: 000000011ad17000 CR4: 0000000000350ef0
+[ 148.576252] Call Trace:
+[ 148.576258] <TASK>
+[ 148.576269] _dev_warn+0x7c/0x96
+[ 148.576290] libie_fwlog_deinit+0x112/0x117 [libie_fwlog]
+[ 148.576303] ixgbe_remove+0x63/0x290 [ixgbe]
+[ 148.576342] pci_device_remove+0x42/0xb0
+[ 148.576354] device_release_driver_internal+0x19c/0x200
+[ 148.576365] driver_detach+0x48/0x90
+[ 148.576372] bus_remove_driver+0x6d/0xf0
+[ 148.576383] pci_unregister_driver+0x2e/0xb0
+[ 148.576393] ixgbe_exit_module+0x1c/0xd50 [ixgbe]
+[ 148.576430] __do_sys_delete_module.isra.0+0x1bc/0x2e0
+[ 148.576446] do_syscall_64+0x7f/0x980
+
+It can be reproduced by trying to unload ixgbe driver in recovery mode.
+
+Fix that by checking if fwlog is supported before doing unroll.
+
+Fixes: 641585bc978e ("ixgbe: fwlog support for e610")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/libie/fwlog.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/libie/fwlog.c b/drivers/net/ethernet/intel/libie/fwlog.c
+index f39cc11cb7c56..5d890d9d3c4d5 100644
+--- a/drivers/net/ethernet/intel/libie/fwlog.c
++++ b/drivers/net/ethernet/intel/libie/fwlog.c
+@@ -1051,6 +1051,10 @@ void libie_fwlog_deinit(struct libie_fwlog *fwlog)
+ {
+ int status;
+
++ /* if FW logging isn't supported it means no configuration was done */
++ if (!libie_fwlog_supported(fwlog))
++ return;
++
+ /* make sure FW logging is disabled to not put the FW in a weird state
+ * for the next driver load
+ */
+--
+2.51.0
+
--- /dev/null
+From 1f3b622d00f6b8c951958ded77335762951b8dce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 7 Jan 2026 13:22:57 +0100
+Subject: module: Remove duplicate freeing of lockdep classes
+
+From: Petr Pavlu <petr.pavlu@suse.com>
+
+[ Upstream commit a7b4bc094fbaa7dc7b7b91ae33549bbd7eefaac1 ]
+
+In the error path of load_module(), under the free_module label, the
+code calls lockdep_free_key_range() to release lock classes associated
+with the MOD_DATA, MOD_RODATA and MOD_RO_AFTER_INIT module regions, and
+subsequently invokes module_deallocate().
+
+Since commit ac3b43283923 ("module: replace module_layout with
+module_memory"), the module_deallocate() function calls free_mod_mem(),
+which releases the lock classes as well and considers all module
+regions.
+
+Attempting to free these classes twice is unnecessary. Remove the
+redundant code in load_module().
+
+Fixes: ac3b43283923 ("module: replace module_layout with module_memory")
+Signed-off-by: Petr Pavlu <petr.pavlu@suse.com>
+Reviewed-by: Daniel Gomez <da.gomez@samsung.com>
+Reviewed-by: Aaron Tomlin <atomlin@atomlin.com>
+Acked-by: Song Liu <song@kernel.org>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Sami Tolvanen <samitolvanen@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/module/main.c | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/kernel/module/main.c b/kernel/module/main.c
+index 710ee30b3beab..bcd259505c8b3 100644
+--- a/kernel/module/main.c
++++ b/kernel/module/main.c
+@@ -3544,12 +3544,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
+ mutex_unlock(&module_mutex);
+ free_module:
+ mod_stat_bump_invalid(info, flags);
+- /* Free lock-classes; relies on the preceding sync_rcu() */
+- for_class_mod_mem_type(type, core_data) {
+- lockdep_free_key_range(mod->mem[type].base,
+- mod->mem[type].size);
+- }
+-
+ module_memory_restore_rox(mod);
+ module_deallocate(mod, info);
+ free_copy:
+--
+2.51.0
+
--- /dev/null
+From 29b02b7d21bfcc516480e49a78b37f202d3a565a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 13:15:47 +0000
+Subject: net: annotate data-races around sk->sk_{data_ready,write_space}
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 2ef2b20cf4e04ac8a6ba68493f8780776ff84300 ]
+
+skmsg (and probably other layers) are changing these pointers
+while other cpus might read them concurrently.
+
+Add corresponding READ_ONCE()/WRITE_ONCE() annotations
+for UDP, TCP and AF_UNIX.
+
+Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface")
+Reported-by: syzbot+87f770387a9e5dc6b79b@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/699ee9fc.050a0220.1cd54b.0009.GAE@google.com/
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: Jakub Sitnicki <jakub@cloudflare.com>
+Cc: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260225131547.1085509-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/skmsg.c | 14 +++++++-------
+ net/ipv4/tcp.c | 4 ++--
+ net/ipv4/tcp_bpf.c | 2 +-
+ net/ipv4/tcp_input.c | 14 ++++++++------
+ net/ipv4/tcp_minisocks.c | 2 +-
+ net/ipv4/udp.c | 2 +-
+ net/ipv4/udp_bpf.c | 2 +-
+ net/unix/af_unix.c | 8 ++++----
+ 8 files changed, 25 insertions(+), 23 deletions(-)
+
+diff --git a/net/core/skmsg.c b/net/core/skmsg.c
+index ddde93dd8bc6d..12fbb0545c712 100644
+--- a/net/core/skmsg.c
++++ b/net/core/skmsg.c
+@@ -1205,8 +1205,8 @@ void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock)
+ return;
+
+ psock->saved_data_ready = sk->sk_data_ready;
+- sk->sk_data_ready = sk_psock_strp_data_ready;
+- sk->sk_write_space = sk_psock_write_space;
++ WRITE_ONCE(sk->sk_data_ready, sk_psock_strp_data_ready);
++ WRITE_ONCE(sk->sk_write_space, sk_psock_write_space);
+ }
+
+ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+@@ -1216,8 +1216,8 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock)
+ if (!psock->saved_data_ready)
+ return;
+
+- sk->sk_data_ready = psock->saved_data_ready;
+- psock->saved_data_ready = NULL;
++ WRITE_ONCE(sk->sk_data_ready, psock->saved_data_ready);
++ WRITE_ONCE(psock->saved_data_ready, NULL);
+ strp_stop(&psock->strp);
+ }
+
+@@ -1296,8 +1296,8 @@ void sk_psock_start_verdict(struct sock *sk, struct sk_psock *psock)
+ return;
+
+ psock->saved_data_ready = sk->sk_data_ready;
+- sk->sk_data_ready = sk_psock_verdict_data_ready;
+- sk->sk_write_space = sk_psock_write_space;
++ WRITE_ONCE(sk->sk_data_ready, sk_psock_verdict_data_ready);
++ WRITE_ONCE(sk->sk_write_space, sk_psock_write_space);
+ }
+
+ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
+@@ -1308,6 +1308,6 @@ void sk_psock_stop_verdict(struct sock *sk, struct sk_psock *psock)
+ if (!psock->saved_data_ready)
+ return;
+
+- sk->sk_data_ready = psock->saved_data_ready;
++ WRITE_ONCE(sk->sk_data_ready, psock->saved_data_ready);
+ psock->saved_data_ready = NULL;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 81666571ecfb5..699212cd6c226 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -1397,7 +1397,7 @@ int tcp_sendmsg_locked(struct sock *sk, struct msghdr *msg, size_t size)
+ err = sk_stream_error(sk, flags, err);
+ /* make sure we wake any epoll edge trigger waiter */
+ if (unlikely(tcp_rtx_and_write_queues_empty(sk) && err == -EAGAIN)) {
+- sk->sk_write_space(sk);
++ READ_ONCE(sk->sk_write_space)(sk);
+ tcp_chrono_stop(sk, TCP_CHRONO_SNDBUF_LIMITED);
+ }
+ if (binding)
+@@ -4131,7 +4131,7 @@ int do_tcp_setsockopt(struct sock *sk, int level, int optname,
+ break;
+ case TCP_NOTSENT_LOWAT:
+ WRITE_ONCE(tp->notsent_lowat, val);
+- sk->sk_write_space(sk);
++ READ_ONCE(sk->sk_write_space)(sk);
+ break;
+ case TCP_INQ:
+ if (val > 1 || val < 0)
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index ca8a5cb8e569d..d3d6a47af5270 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -725,7 +725,7 @@ int tcp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ WRITE_ONCE(sk->sk_prot->unhash, psock->saved_unhash);
+ tcp_update_ulp(sk, psock->sk_proto, psock->saved_write_space);
+ } else {
+- sk->sk_write_space = psock->saved_write_space;
++ WRITE_ONCE(sk->sk_write_space, psock->saved_write_space);
+ /* Pairs with lockless read in sk_clone_lock() */
+ sock_replace_proto(sk, psock->sk_proto);
+ }
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index aa4f5bf765596..adec44313772b 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5167,7 +5167,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
+
+ if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ tcp_drop_reason(sk, skb, SKB_DROP_REASON_PROTO_MEM);
+ return;
+ }
+@@ -5377,7 +5377,7 @@ int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
+ void tcp_data_ready(struct sock *sk)
+ {
+ if (tcp_epollin_ready(sk, sk->sk_rcvlowat) || sock_flag(sk, SOCK_DONE))
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ }
+
+ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+@@ -5433,7 +5433,7 @@ static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
+ inet_csk(sk)->icsk_ack.pending |=
+ (ICSK_ACK_NOMEM | ICSK_ACK_NOW);
+ inet_csk_schedule_ack(sk);
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+
+ if (skb_queue_len(&sk->sk_receive_queue) && skb->len) {
+ reason = SKB_DROP_REASON_PROTO_MEM;
+@@ -5875,7 +5875,9 @@ static void tcp_new_space(struct sock *sk)
+ tp->snd_cwnd_stamp = tcp_jiffies32;
+ }
+
+- INDIRECT_CALL_1(sk->sk_write_space, sk_stream_write_space, sk);
++ INDIRECT_CALL_1(READ_ONCE(sk->sk_write_space),
++ sk_stream_write_space,
++ sk);
+ }
+
+ /* Caller made space either from:
+@@ -6091,7 +6093,7 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, const struct tcphdr *t
+ BUG();
+ WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp);
+ if (!sock_flag(sk, SOCK_DEAD))
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ }
+ }
+ }
+@@ -7557,7 +7559,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ sock_put(fastopen_sk);
+ goto drop_and_free;
+ }
+- sk->sk_data_ready(sk);
++ READ_ONCE(sk->sk_data_ready)(sk);
+ bh_unlock_sock(fastopen_sk);
+ sock_put(fastopen_sk);
+ } else {
+diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
+index 0742a41687ffc..12f69cc285577 100644
+--- a/net/ipv4/tcp_minisocks.c
++++ b/net/ipv4/tcp_minisocks.c
+@@ -988,7 +988,7 @@ enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
+ reason = tcp_rcv_state_process(child, skb);
+ /* Wakeup parent, send SIGIO */
+ if (state == TCP_SYN_RECV && child->sk_state != state)
+- parent->sk_data_ready(parent);
++ READ_ONCE(parent->sk_data_ready)(parent);
+ } else {
+ /* Alas, it is possible again, because we do lookup
+ * in main socket hash table and lock on listening
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index ee63af0ef42cc..37258b54a357e 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1786,7 +1786,7 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
+ * using prepare_to_wait_exclusive().
+ */
+ while (nb) {
+- INDIRECT_CALL_1(sk->sk_data_ready,
++ INDIRECT_CALL_1(READ_ONCE(sk->sk_data_ready),
+ sock_def_readable, sk);
+ nb--;
+ }
+diff --git a/net/ipv4/udp_bpf.c b/net/ipv4/udp_bpf.c
+index 91233e37cd97a..779a3a03762f1 100644
+--- a/net/ipv4/udp_bpf.c
++++ b/net/ipv4/udp_bpf.c
+@@ -158,7 +158,7 @@ int udp_bpf_update_proto(struct sock *sk, struct sk_psock *psock, bool restore)
+ int family = sk->sk_family == AF_INET ? UDP_BPF_IPV4 : UDP_BPF_IPV6;
+
+ if (restore) {
+- sk->sk_write_space = psock->saved_write_space;
++ WRITE_ONCE(sk->sk_write_space, psock->saved_write_space);
+ sock_replace_proto(sk, psock->sk_proto);
+ return 0;
+ }
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index f6d56e70c7a2c..6965b9a49d68a 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1785,7 +1785,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr_unsized *uad
+ __skb_queue_tail(&other->sk_receive_queue, skb);
+ spin_unlock(&other->sk_receive_queue.lock);
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+ sock_put(other);
+ return 0;
+
+@@ -2278,7 +2278,7 @@ static int unix_dgram_sendmsg(struct socket *sock, struct msghdr *msg,
+ scm_stat_add(other, skb);
+ skb_queue_tail(&other->sk_receive_queue, skb);
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+ sock_put(other);
+ scm_destroy(&scm);
+ return len;
+@@ -2351,7 +2351,7 @@ static int queue_oob(struct sock *sk, struct msghdr *msg, struct sock *other,
+
+ sk_send_sigurg(other);
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+
+ return 0;
+ out_unlock:
+@@ -2477,7 +2477,7 @@ static int unix_stream_sendmsg(struct socket *sock, struct msghdr *msg,
+ spin_unlock(&other->sk_receive_queue.lock);
+
+ unix_state_unlock(other);
+- other->sk_data_ready(other);
++ READ_ONCE(other->sk_data_ready)(other);
+ sent += size;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From a9875f3d27d5a11edc5ac156d143f84815a7cc84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:56 +0100
+Subject: net: bridge: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit e5e890630533bdc15b26a34bb8e7ef539bdf1322 ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. Then, if neigh_suppress is enabled and an ICMPv6
+Neighbor Discovery packet reaches the bridge, br_do_suppress_nd() will
+dereference ipv6_stub->nd_tbl which is NULL, passing it to
+neigh_lookup(). This causes a kernel NULL pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000268
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x16/0xe0
+ [...]
+ Call Trace:
+ <IRQ>
+ ? neigh_lookup+0x16/0xe0
+ br_do_suppress_nd+0x160/0x290 [bridge]
+ br_handle_frame_finish+0x500/0x620 [bridge]
+ br_handle_frame+0x353/0x440 [bridge]
+ __netif_receive_skb_core.constprop.0+0x298/0x1110
+ __netif_receive_skb_one_core+0x3d/0xa0
+ process_backlog+0xa0/0x140
+ __napi_poll+0x2c/0x170
+ net_rx_action+0x2c4/0x3a0
+ handle_softirqs+0xd0/0x270
+ do_softirq+0x3f/0x60
+
+Fix this by replacing IS_ENABLED(IPV6) call with ipv6_mod_enabled() in
+the callers. This is in essence disabling NS/NA suppression when IPv6 is
+disabled.
+
+Fixes: ed842faeb2bd ("bridge: suppress nd pkts on BR_NEIGH_SUPPRESS ports")
+Reported-by: Guruprasad C P <gurucp2005@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAHXs0ORzd62QOG-Fttqa2Cx_A_VFp=utE2H2VTX5nqfgs7LDxQ@mail.gmail.com/
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260304120357.9778-1-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_device.c | 2 +-
+ net/bridge/br_input.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index a818fdc22da9a..525d4eccd194a 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -74,7 +74,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ br_do_proxy_suppress_arp(skb, br, vid, NULL);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 1405f1061a549..2cbae0f9ae1f0 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -170,7 +170,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ (skb->protocol == htons(ETH_P_ARP) ||
+ skb->protocol == htons(ETH_P_RARP))) {
+ br_do_proxy_suppress_arp(skb, br, vid, p);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+--
+2.51.0
+
--- /dev/null
+From 5d202c434b247a1c8a949a50bbaf979668daf504 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 16:32:56 -0800
+Subject: net: devmem: use READ_ONCE/WRITE_ONCE on binding->dev
+
+From: Bobby Eshleman <bobbyeshleman@meta.com>
+
+[ Upstream commit 40bf00ec2ee271df5ba67593991760adf8b5d0ed ]
+
+binding->dev is protected on the write-side in
+mp_dmabuf_devmem_uninstall() against concurrent writes, but due to the
+concurrent bare reads in net_devmem_get_binding() and
+validate_xmit_unreadable_skb() it should be wrapped in a
+READ_ONCE/WRITE_ONCE pair to make sure no compiler optimizations play
+with the underlying register in unforeseen ways.
+
+Doesn't present a critical bug because the known compiler optimizations
+don't result in bad behavior. There is no tearing on u64, and load
+omissions/invented loads would only break if additional binding->dev
+references were inlined together (they aren't right now).
+
+This just more strictly follows the linux memory model (i.e.,
+"Lock-Protected Writes With Lockless Reads" in
+tools/memory-model/Documentation/access-marking.txt).
+
+Fixes: bd61848900bf ("net: devmem: Implement TX path")
+Signed-off-by: Bobby Eshleman <bobbyeshleman@meta.com>
+Link: https://patch.msgid.link/20260302-devmem-membar-fix-v2-1-5b33c9cbc28b@meta.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/dev.c | 2 +-
+ net/core/devmem.c | 6 ++++--
+ 2 files changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/core/dev.c b/net/core/dev.c
+index 062415cc3e5a4..d45be2357a5ce 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -3983,7 +3983,7 @@ static struct sk_buff *validate_xmit_unreadable_skb(struct sk_buff *skb,
+ if (shinfo->nr_frags > 0) {
+ niov = netmem_to_net_iov(skb_frag_netmem(&shinfo->frags[0]));
+ if (net_is_devmem_iov(niov) &&
+- net_devmem_iov_binding(niov)->dev != dev)
++ READ_ONCE(net_devmem_iov_binding(niov)->dev) != dev)
+ goto out_free;
+ }
+
+diff --git a/net/core/devmem.c b/net/core/devmem.c
+index ec4217d6c0b4f..e9c5d75091800 100644
+--- a/net/core/devmem.c
++++ b/net/core/devmem.c
+@@ -387,7 +387,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,
+ * net_device.
+ */
+ dst_dev = dst_dev_rcu(dst);
+- if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {
++ if (unlikely(!dst_dev) ||
++ unlikely(dst_dev != READ_ONCE(binding->dev))) {
+ err = -ENODEV;
+ goto out_unlock;
+ }
+@@ -504,7 +505,8 @@ static void mp_dmabuf_devmem_uninstall(void *mp_priv,
+ xa_erase(&binding->bound_rxqs, xa_idx);
+ if (xa_empty(&binding->bound_rxqs)) {
+ mutex_lock(&binding->lock);
+- binding->dev = NULL;
++ ASSERT_EXCLUSIVE_WRITER(binding->dev);
++ WRITE_ONCE(binding->dev, NULL);
+ mutex_unlock(&binding->lock);
+ }
+ break;
+--
+2.51.0
+
--- /dev/null
+From 7c5fd49f69637a6371e99f0e1397c1928102e9d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 18:13:14 -0300
+Subject: net: dsa: realtek: rtl8365mb: fix rtl8365mb_phy_ocp_write return
+ value
+
+From: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+
+[ Upstream commit 7cbe98f7bef965241a5908d50d557008cf998aee ]
+
+Function rtl8365mb_phy_ocp_write() always returns 0, even when an error
+occurs during register access. This patch fixes the return value to
+propagate the actual error code from regmap operations.
+
+Link: https://lore.kernel.org/netdev/a2dfde3c-d46f-434b-9d16-1e251e449068@yahoo.com/
+Fixes: 2796728460b8 ("net: dsa: realtek: rtl8365mb: serialize indirect PHY register access")
+Signed-off-by: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Luiz Angelo Daros de Luca <luizluca@gmail.com>
+Reviewed-by: Linus Walleij <linusw@kernel.org>
+Link: https://patch.msgid.link/20260301-realtek_namiltd_fix1-v1-1-43a6bb707f9c@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/realtek/rtl8365mb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
+index c575e164368c8..f938a3f701cc9 100644
+--- a/drivers/net/dsa/realtek/rtl8365mb.c
++++ b/drivers/net/dsa/realtek/rtl8365mb.c
+@@ -769,7 +769,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
+ out:
+ rtl83xx_unlock(priv);
+
+- return 0;
++ return ret;
+ }
+
+ static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
+--
+2.51.0
+
--- /dev/null
+From 0d34858dcfc7ce2e212287f71931a7f57e7a3cfb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:49 +0100
+Subject: net: enetc: use truesize as XDP RxQ info frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit f8e18abf183dbd636a8725532c7f5aa58957de84 ]
+
+The only user of frag_size field in XDP RxQ info is
+bpf_xdp_frags_increase_tail(). It clearly expects truesize instead of DMA
+write size. Different assumptions in enetc driver configuration lead to
+negative tailroom.
+
+Set frag_size to the same value as frame_sz.
+
+Fixes: 2768b2e2f7d2 ("net: enetc: register XDP RX queues with frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-9-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c
+index e380a4f398556..9fdd448e602f1 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc.c
++++ b/drivers/net/ethernet/freescale/enetc/enetc.c
+@@ -3468,7 +3468,7 @@ static int enetc_int_vector_init(struct enetc_ndev_priv *priv, int i,
+ priv->rx_ring[i] = bdr;
+
+ err = __xdp_rxq_info_reg(&bdr->xdp.rxq, priv->ndev, i, 0,
+- ENETC_RXB_DMA_SIZE_XDP);
++ ENETC_RXB_TRUESIZE);
+ if (err)
+ goto free_vector;
+
+--
+2.51.0
+
--- /dev/null
+From 39545695d6f3fccb08bbde458b24df053a2de392 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 18:56:39 +0100
+Subject: net: ethernet: mtk_eth_soc: Reset prog ptr to old_prog in case of
+ error in mtk_xdp_setup()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 0abc73c8a40fd64ac1739c90bb4f42c418d27a5e ]
+
+Reset eBPF program pointer to old_prog and do not decrease its ref-count
+if mtk_open routine in mtk_xdp_setup() fails.
+
+Fixes: 7c26c20da5d42 ("net: ethernet: mtk_eth_soc: add basic XDP support")
+Suggested-by: Paolo Valerio <pvalerio@redhat.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260303-mtk-xdp-prog-ptr-fix-v2-1-97b6dbbe240f@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index e68997a29191b..8d3e15bc867d2 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3749,12 +3749,21 @@ static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
++
++ if (netif_running(dev) && need_update) {
++ int err;
++
++ err = mtk_open(dev);
++ if (err) {
++ rcu_assign_pointer(eth->prog, old_prog);
++
++ return err;
++ }
++ }
++
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+- if (netif_running(dev) && need_update)
+- return mtk_open(dev);
+-
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 69d427991555f8f6d79d0cb19ecbb4d55b5d43dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 23:43:59 +0530
+Subject: net: ethernet: ti: am65-cpsw-nuss/cpsw-ale: Fix multicast entry
+ handling in ALE table
+
+From: Chintan Vankar <c-vankar@ti.com>
+
+[ Upstream commit be11a537224d72b906db6b98510619770298c8a4 ]
+
+In the current implementation, flushing multicast entries in MAC mode
+incorrectly deletes entries for all ports instead of only the target port,
+disrupting multicast traffic on other ports. The cause is adding multicast
+entries by setting only host port bit, and not setting the MAC port bits.
+
+Fix this by setting the MAC port's bit in the port mask while adding the
+multicast entry. Also fix the flush logic to preserve the host port bit
+during removal of MAC port and free ALE entries when mask contains only
+host port.
+
+Fixes: 5c50a856d550 ("drivers: net: ethernet: cpsw: add multicast address to ALE table")
+Signed-off-by: Chintan Vankar <c-vankar@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260224181359.2055322-1-c-vankar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ drivers/net/ethernet/ti/cpsw_ale.c | 9 ++++-----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 5924db6be3fea..9679180504330 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -391,7 +391,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+- port_mask = ALE_PORT_HOST;
++ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index fbe35af615a6f..9632ad3741de1 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -455,14 +455,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+- mask &= ~port_mask;
++ mask &= (~port_mask | ALE_PORT_HOST);
+
+- /* free if only remaining port is host port */
+- if (mask)
++ if (mask == 0x0 || mask == ALE_PORT_HOST)
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+- else
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+--
+2.51.0
+
--- /dev/null
+From 5bb7a9c07dd48ef69d9fbcd074af44185cd62b2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 14:02:47 +0800
+Subject: net: ipv4: fix ARM64 alignment fault in multipath hash seed
+
+From: Yung Chih Su <yuuchihsu@gmail.com>
+
+[ Upstream commit 4ee7fa6cf78ff26d783d39e2949d14c4c1cd5e7f ]
+
+`struct sysctl_fib_multipath_hash_seed` contains two u32 fields
+(user_seed and mp_seed), making it an 8-byte structure with a 4-byte
+alignment requirement.
+
+In `fib_multipath_hash_from_keys()`, the code evaluates the entire
+struct atomically via `READ_ONCE()`:
+
+ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
+
+While this silently works on GCC by falling back to unaligned regular
+loads which the ARM64 kernel tolerates, it causes a fatal kernel panic
+when compiled with Clang and LTO enabled.
+
+Commit e35123d83ee3 ("arm64: lto: Strengthen READ_ONCE() to acquire
+when CONFIG_LTO=y") strengthens `READ_ONCE()` to use Load-Acquire
+instructions (`ldar` / `ldapr`) to prevent compiler reordering bugs
+under Clang LTO. Since the macro evaluates the full 8-byte struct,
+Clang emits a 64-bit `ldar` instruction. ARM64 architecture strictly
+requires `ldar` to be naturally aligned, thus executing it on a 4-byte
+aligned address triggers a strict Alignment Fault (FSC = 0x21).
+
+Fix the read side by moving the `READ_ONCE()` directly to the `u32`
+member, which emits a safe 32-bit `ldar Wn`.
+
+Furthermore, Eric Dumazet pointed out that `WRITE_ONCE()` on the entire
+struct in `proc_fib_multipath_hash_set_seed()` is also flawed. Analysis
+shows that Clang splits this 8-byte write into two separate 32-bit
+`str` instructions. While this avoids an alignment fault, it destroys
+atomicity and exposes a tear-write vulnerability. Fix this by
+explicitly splitting the write into two 32-bit `WRITE_ONCE()`
+operations.
+
+Finally, add the missing `READ_ONCE()` when reading `user_seed` in
+`proc_fib_multipath_hash_seed()` to ensure proper pairing and
+concurrency safety.
+
+Fixes: 4ee2a8cace3f ("net: ipv4: Add a sysctl to set multipath hash seed")
+Signed-off-by: Yung Chih Su <yuuchihsu@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20260302060247.7066-1-yuuchihsu@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/ip_fib.h | 2 +-
+ net/ipv4/sysctl_net_ipv4.c | 5 +++--
+ 2 files changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
+index b4495c38e0a01..318593743b6e1 100644
+--- a/include/net/ip_fib.h
++++ b/include/net/ip_fib.h
+@@ -559,7 +559,7 @@ static inline u32 fib_multipath_hash_from_keys(const struct net *net,
+ siphash_aligned_key_t hash_key;
+ u32 mp_seed;
+
+- mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed;
++ mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.mp_seed);
+ fib_multipath_hash_construct_key(&hash_key, mp_seed);
+
+ return flow_hash_from_keys_seed(keys, &hash_key);
+diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
+index a1a50a5c80dc1..a96875e32050a 100644
+--- a/net/ipv4/sysctl_net_ipv4.c
++++ b/net/ipv4/sysctl_net_ipv4.c
+@@ -486,7 +486,8 @@ static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed)
+ proc_fib_multipath_hash_rand_seed),
+ };
+
+- WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new);
++ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.user_seed, new.user_seed);
++ WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed.mp_seed, new.mp_seed);
+ }
+
+ static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write,
+@@ -500,7 +501,7 @@ static int proc_fib_multipath_hash_seed(const struct ctl_table *table, int write
+ int ret;
+
+ mphs = &net->ipv4.sysctl_fib_multipath_hash_seed;
+- user_seed = mphs->user_seed;
++ user_seed = READ_ONCE(mphs->user_seed);
+
+ tmp = *table;
+ tmp.data = &user_seed;
+--
+2.51.0
+
--- /dev/null
+From 5d25eba73f37b2432be0af5f3310cc7c6fc89f7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 19:38:13 +0800
+Subject: net: ipv6: fix panic when IPv4 route references loopback IPv6 nexthop
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 21ec92774d1536f71bdc90b0e3d052eff99cf093 ]
+
+When a standalone IPv6 nexthop object is created with a loopback device
+(e.g., "ip -6 nexthop add id 100 dev lo"), fib6_nh_init() misclassifies
+it as a reject route. This is because nexthop objects have no destination
+prefix (fc_dst=::), causing fib6_is_reject() to match any loopback
+nexthop. The reject path skips fib_nh_common_init(), leaving
+nhc_pcpu_rth_output unallocated. If an IPv4 route later references this
+nexthop, __mkroute_output() dereferences NULL nhc_pcpu_rth_output and
+panics.
+
+Simplify the check in fib6_nh_init() to only match explicit reject
+routes (RTF_REJECT) instead of using fib6_is_reject(). The loopback
+promotion heuristic in fib6_is_reject() is handled separately by
+ip6_route_info_create_nh(). After this change, the three cases behave
+as follows:
+
+1. Explicit reject route ("ip -6 route add unreachable 2001:db8::/64"):
+ RTF_REJECT is set, enters reject path, skips fib_nh_common_init().
+ No behavior change.
+
+2. Implicit loopback reject route ("ip -6 route add 2001:db8::/32 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. ip6_route_info_create_nh() still promotes it to reject
+ afterward. nhc_pcpu_rth_output is allocated but unused, which is
+ harmless.
+
+3. Standalone nexthop object ("ip -6 nexthop add id 100 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. nhc_pcpu_rth_output is properly allocated, fixing the crash
+ when IPv4 routes reference this nexthop.
+
+Suggested-by: Ido Schimmel <idosch@nvidia.com>
+Fixes: 493ced1ac47c ("ipv4: Allow routes to use nexthop objects")
+Reported-by: syzbot+334190e097a98a1b81bb@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698f8482.a70a0220.2c38d7.00ca.GAE@google.com/T/
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260304113817.294966-2-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index e7d90a28948a4..e01331d965313 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3584,7 +3584,6 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
+ struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
+- int addr_type;
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
+@@ -3626,11 +3625,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+
+ fib6_nh->fib_nh_weight = 1;
+
+- /* We cannot add true routes via loopback here,
+- * they would result in kernel looping; promote them to reject routes
++ /* Reset the nexthop device to the loopback device in case of reject
++ * routes.
+ */
+- addr_type = ipv6_addr_type(&cfg->fc_dst);
+- if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
++ if (cfg->fc_flags & RTF_REJECT) {
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != net->loopback_dev) {
+ if (dev) {
+--
+2.51.0
+
--- /dev/null
+From 48476a67b63003bf4f2fcc1ee64d8501a927df42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 18:32:37 +0200
+Subject: net: nfc: nci: Fix zero-length proprietary notifications
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit f7d92f11bd33a6eb49c7c812255ef4ab13681f0f ]
+
+NCI NFC controllers may have proprietary OIDs with zero-length payload.
+One example is: drivers/nfc/nxp-nci/core.c, NXP_NCI_RF_TXLDO_ERROR_NTF.
+
+Allow a zero length payload in proprietary notifications *only*.
+
+Before:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+-- >8 --
+
+After:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x23, plen=0
+kernel: nci: nci_ntf_packet: unknown ntf opcode 0x123
+kernel: nfc nfc0: NFC: RF transmitter couldn't start. Bad power and/or configuration?
+-- >8 --
+
+After fixing the hardware:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 27
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x5, plen=24
+kernel: nci: nci_rf_intf_activated_ntf_packet: rf_discovery_id 1
+-- >8 --
+
+Fixes: d24b03535e5e ("nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260302163238.140576-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index e419e020a70a3..46681bdaeabff 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1482,10 +1482,20 @@ static bool nci_valid_size(struct sk_buff *skb)
+ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
+
+ if (skb->len < hdr_size ||
+- !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
++
++ if (!nci_plen(skb->data)) {
++ /* Allow zero length in proprietary notifications (0x20 - 0x3F). */
++ if (nci_opcode_oid(nci_opcode(skb->data)) >= 0x20 &&
++ nci_mt(skb->data) == NCI_MT_NTF_PKT)
++ return true;
++
++ /* Disallow zero length otherwise. */
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From a6da7e36f89c32963e3eeee3f1282433b313b874 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 17:26:31 +0100
+Subject: net: Provide a PREEMPT_RT specific check for netdev_queue::_xmit_lock
+
+From: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+
+[ Upstream commit b824c3e16c1904bf80df489e293d1e3cbf98896d ]
+
+After acquiring netdev_queue::_xmit_lock the number of the CPU owning
+the lock is recorded in netdev_queue::xmit_lock_owner. This works as
+long as the BH context is not preemptible.
+
+On PREEMPT_RT the softirq context is preemptible and without the
+softirq-lock it is possible to have multiple user in __dev_queue_xmit()
+submitting a skb on the same CPU. This is fine in general but this means
+also that the current CPU is recorded as netdev_queue::xmit_lock_owner.
+This in turn leads to the recursion alert and the skb is dropped.
+
+Instead checking the for CPU number, that owns the lock, PREEMPT_RT can
+check if the lockowner matches the current task.
+
+Add netif_tx_owned() which returns true if the current context owns the
+lock by comparing the provided CPU number with the recorded number. This
+resembles the current check by negating the condition (the current check
+returns true if the lock is not owned).
+On PREEMPT_RT use rt_mutex_owner() to return the lock owner and compare
+the current task against it.
+Use the new helper in __dev_queue_xmit() and netif_local_xmit_active()
+which provides a similar check.
+Update comments regarding pairing READ_ONCE().
+
+Reported-by: Bert Karwatzki <spasswolf@web.de>
+Closes: https://lore.kernel.org/all/20260216134333.412332-1-spasswolf@web.de
+Fixes: 3253cb49cbad4 ("softirq: Allow to drop the softirq-BKL lock on PREEMPT_RT")
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Reported-by: Bert Karwatzki <spasswolf@web.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+Link: https://patch.msgid.link/20260302162631.uGUyIqDT@linutronix.de
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/netdevice.h | 27 ++++++++++++++++++++++-----
+ net/core/dev.c | 5 +----
+ net/core/netpoll.c | 2 +-
+ 3 files changed, 24 insertions(+), 10 deletions(-)
+
+diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
+index d99b0fbc1942a..6655b0c6e42b4 100644
+--- a/include/linux/netdevice.h
++++ b/include/linux/netdevice.h
+@@ -4708,7 +4708,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits)
+ static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu)
+ {
+ spin_lock(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, cpu);
+ }
+
+@@ -4726,7 +4726,7 @@ static inline void __netif_tx_release(struct netdev_queue *txq)
+ static inline void __netif_tx_lock_bh(struct netdev_queue *txq)
+ {
+ spin_lock_bh(&txq->_xmit_lock);
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
+
+@@ -4735,7 +4735,7 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+ bool ok = spin_trylock(&txq->_xmit_lock);
+
+ if (likely(ok)) {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, smp_processor_id());
+ }
+ return ok;
+@@ -4743,14 +4743,14 @@ static inline bool __netif_tx_trylock(struct netdev_queue *txq)
+
+ static inline void __netif_tx_unlock(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
+ spin_unlock(&txq->_xmit_lock);
+ }
+
+ static inline void __netif_tx_unlock_bh(struct netdev_queue *txq)
+ {
+- /* Pairs with READ_ONCE() in __dev_queue_xmit() */
++ /* Pairs with READ_ONCE() in netif_tx_owned() */
+ WRITE_ONCE(txq->xmit_lock_owner, -1);
+ spin_unlock_bh(&txq->_xmit_lock);
+ }
+@@ -4843,6 +4843,23 @@ static inline void netif_tx_disable(struct net_device *dev)
+ local_bh_enable();
+ }
+
++#ifndef CONFIG_PREEMPT_RT
++static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
++{
++ /* Other cpus might concurrently change txq->xmit_lock_owner
++ * to -1 or to their cpu id, but not to our id.
++ */
++ return READ_ONCE(txq->xmit_lock_owner) == cpu;
++}
++
++#else
++static inline bool netif_tx_owned(struct netdev_queue *txq, unsigned int cpu)
++{
++ return rt_mutex_owner(&txq->_xmit_lock.lock) == current;
++}
++
++#endif
++
+ static inline void netif_addr_lock(struct net_device *dev)
+ {
+ unsigned char nest_level = 0;
+diff --git a/net/core/dev.c b/net/core/dev.c
+index d45be2357a5ce..994e21a697c39 100644
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -4814,10 +4814,7 @@ int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev)
+ if (dev->flags & IFF_UP) {
+ int cpu = smp_processor_id(); /* ok because BHs are off */
+
+- /* Other cpus might concurrently change txq->xmit_lock_owner
+- * to -1 or to their cpu id, but not to our id.
+- */
+- if (READ_ONCE(txq->xmit_lock_owner) != cpu) {
++ if (!netif_tx_owned(txq, cpu)) {
+ bool is_list = false;
+
+ if (dev_xmit_recursion())
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 09f72f10813cc..5af14f14a3623 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -132,7 +132,7 @@ static int netif_local_xmit_active(struct net_device *dev)
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
+
+- if (READ_ONCE(txq->xmit_lock_owner) == smp_processor_id())
++ if (netif_tx_owned(txq, smp_processor_id()))
+ return 1;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 3a4d47d1d6069ecfe5b7e3888c2daa0492b6e986 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 13:23:36 -0700
+Subject: net/rds: Fix circular locking dependency in rds_tcp_tune
+
+From: Allison Henderson <achender@kernel.org>
+
+[ Upstream commit 6a877ececd6daa002a9a0002cd0fbca6592a9244 ]
+
+syzbot reported a circular locking dependency in rds_tcp_tune() where
+sk_net_refcnt_upgrade() is called while holding the socket lock:
+
+======================================================
+WARNING: possible circular locking dependency detected
+======================================================
+kworker/u10:8/15040 is trying to acquire lock:
+ffffffff8e9aaf80 (fs_reclaim){+.+.}-{0:0},
+at: __kmalloc_cache_noprof+0x4b/0x6f0
+
+but task is already holding lock:
+ffff88805a3c1ce0 (k-sk_lock-AF_INET6){+.+.}-{0:0},
+at: rds_tcp_tune+0xd7/0x930
+
+The issue occurs because sk_net_refcnt_upgrade() performs memory
+allocation (via get_net_track() -> ref_tracker_alloc()) while the
+socket lock is held, creating a circular dependency with fs_reclaim.
+
+Fix this by moving sk_net_refcnt_upgrade() outside the socket lock
+critical section. This is safe because the fields modified by the
+sk_net_refcnt_upgrade() call (sk_net_refcnt, ns_tracker) are not
+accessed by any concurrent code path at this point.
+
+v2:
+ - Corrected fixes tag
+ - check patch line wrap nits
+ - ai commentary nits
+
+Reported-by: syzbot+2e2cf5331207053b8106@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2e2cf5331207053b8106
+Fixes: 3a58f13a881e ("net: rds: acquire refcount on TCP sockets")
+Signed-off-by: Allison Henderson <achender@kernel.org>
+Link: https://patch.msgid.link/20260227202336.167757-1-achender@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rds/tcp.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 3cc2f303bf786..b66dfcc3efaa0 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -495,18 +495,24 @@ bool rds_tcp_tune(struct socket *sock)
+ struct rds_tcp_net *rtn;
+
+ tcp_sock_set_nodelay(sock->sk);
+- lock_sock(sk);
+ /* TCP timer functions might access net namespace even after
+ * a process which created this net namespace terminated.
+ */
+ if (!sk->sk_net_refcnt) {
+- if (!maybe_get_net(net)) {
+- release_sock(sk);
++ if (!maybe_get_net(net))
+ return false;
+- }
++ /*
++ * sk_net_refcnt_upgrade() must be called before lock_sock()
++ * because it does a GFP_KERNEL allocation, which can trigger
++ * fs_reclaim and create a circular lock dependency with the
++ * socket lock. The fields it modifies (sk_net_refcnt,
++ * ns_tracker) are not accessed by any concurrent code path
++ * at this point.
++ */
+ sk_net_refcnt_upgrade(sk);
+ put_net(net);
+ }
++ lock_sock(sk);
+ rtn = net_generic(net, rds_tcp_netid);
+ if (rtn->sndbuf_size > 0) {
+ sk->sk_sndbuf = rtn->sndbuf_size;
+--
+2.51.0
+
--- /dev/null
+From 5a32a279368ce57795b5b4cac6433ee4443853ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 09:06:02 -0500
+Subject: net/sched: act_ife: Fix metalist update behavior
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit e2cedd400c3ec0302ffca2490e8751772906ac23 ]
+
+Whenever an ife action replace changes the metalist, instead of
+replacing the old data on the metalist, the current ife code is appending
+the new metadata. Aside from being innapropriate behavior, this may lead
+to an unbounded addition of metadata to the metalist which might cause an
+out of bounds error when running the encode op:
+
+[ 138.423369][ C1] ==================================================================
+[ 138.424317][ C1] BUG: KASAN: slab-out-of-bounds in ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.424906][ C1] Write of size 4 at addr ffff8880077f4ffe by task ife_out_out_bou/255
+[ 138.425778][ C1] CPU: 1 UID: 0 PID: 255 Comm: ife_out_out_bou Not tainted 7.0.0-rc1-00169-gfbdfa8da05b6 #624 PREEMPT(full)
+[ 138.425795][ C1] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+[ 138.425800][ C1] Call Trace:
+[ 138.425804][ C1] <IRQ>
+[ 138.425808][ C1] dump_stack_lvl (lib/dump_stack.c:122)
+[ 138.425828][ C1] print_report (mm/kasan/report.c:379 mm/kasan/report.c:482)
+[ 138.425839][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425844][ C1] ? __virt_addr_valid (./arch/x86/include/asm/preempt.h:95 (discriminator 1) ./include/linux/rcupdate.h:975 (discriminator 1) ./include/linux/mmzone.h:2207 (discriminator 1) arch/x86/mm/physaddr.c:54 (discriminator 1))
+[ 138.425853][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425859][ C1] kasan_report (mm/kasan/report.c:221 mm/kasan/report.c:597)
+[ 138.425868][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425878][ C1] kasan_check_range (mm/kasan/generic.c:186 (discriminator 1) mm/kasan/generic.c:200 (discriminator 1))
+[ 138.425884][ C1] __asan_memset (mm/kasan/shadow.c:84 (discriminator 2))
+[ 138.425889][ C1] ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425893][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:171)
+[ 138.425898][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425903][ C1] ife_encode_meta_u16 (net/sched/act_ife.c:57)
+[ 138.425910][ C1] ? __pfx_do_raw_spin_lock (kernel/locking/spinlock_debug.c:114)
+[ 138.425916][ C1] ? __asan_memcpy (mm/kasan/shadow.c:105 (discriminator 3))
+[ 138.425921][ C1] ? __pfx_ife_encode_meta_u16 (net/sched/act_ife.c:45)
+[ 138.425927][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425931][ C1] tcf_ife_act (net/sched/act_ife.c:847 net/sched/act_ife.c:879)
+
+To solve this issue, fix the replace behavior by adding the metalist to
+the ife rcu data structure.
+
+Fixes: aa9fd9a325d51 ("sched: act: ife: update parameters via rcu handling")
+Reported-by: Ruitong Liu <cnitlrt@gmail.com>
+Tested-by: Ruitong Liu <cnitlrt@gmail.com>
+Co-developed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20260304140603.76500-1-jhs@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tc_act/tc_ife.h | 4 +-
+ net/sched/act_ife.c | 93 ++++++++++++++++++-------------------
+ 2 files changed, 45 insertions(+), 52 deletions(-)
+
+diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
+index c7f24a2da1cad..24d4d5a62b3c2 100644
+--- a/include/net/tc_act/tc_ife.h
++++ b/include/net/tc_act/tc_ife.h
+@@ -13,15 +13,13 @@ struct tcf_ife_params {
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+-
++ struct list_head metalist;
+ struct rcu_head rcu;
+ };
+
+ struct tcf_ife_info {
+ struct tc_action common;
+ struct tcf_ife_params __rcu *params;
+- /* list of metaids allowed */
+- struct list_head metalist;
+ };
+ #define to_ife(a) ((struct tcf_ife_info *)a)
+
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 8e8f6af731d51..4ad01d4e820db 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -293,8 +293,8 @@ static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
+ /* called when adding new meta information
+ */
+ static int __add_metainfo(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool atomic, bool exists)
++ struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len, bool atomic)
+ {
+ struct tcf_meta_info *mi = NULL;
+ int ret = 0;
+@@ -313,45 +313,40 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
+ }
+ }
+
+- if (exists)
+- spin_lock_bh(&ife->tcf_lock);
+- list_add_tail(&mi->metalist, &ife->metalist);
+- if (exists)
+- spin_unlock_bh(&ife->tcf_lock);
++ list_add_tail(&mi->metalist, &p->metalist);
+
+ return ret;
+ }
+
+ static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid,
+- bool exists)
++ struct tcf_ife_params *p, u32 metaid)
+ {
+ int ret;
+
+ if (!try_module_get(ops->owner))
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
++ ret = __add_metainfo(ops, p, metaid, NULL, 0, true);
+ if (ret)
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool exists)
++static int add_metainfo(struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len)
+ {
+ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret;
+
+ if (!ops)
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
++ ret = __add_metainfo(ops, p, metaid, metaval, len, false);
+ if (ret)
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
++static int use_all_metadata(struct tcf_ife_params *p)
+ {
+ struct tcf_meta_ops *o;
+ int rc = 0;
+@@ -359,7 +354,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+
+ read_lock(&ife_mod_lock);
+ list_for_each_entry(o, &ifeoplist, list) {
+- rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
++ rc = add_metainfo_and_get_ops(o, p, o->metaid);
+ if (rc == 0)
+ installed += 1;
+ }
+@@ -371,7 +366,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+ return -EINVAL;
+ }
+
+-static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int dump_metalist(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+ struct tcf_meta_info *e;
+ struct nlattr *nest;
+@@ -379,14 +374,14 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ int total_encoded = 0;
+
+ /*can only happen on decode */
+- if (list_empty(&ife->metalist))
++ if (list_empty(&p->metalist))
+ return 0;
+
+ nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
+ if (!nest)
+ goto out_nlmsg_trim;
+
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry(e, &p->metalist, metalist) {
+ if (!e->ops->get(skb, e))
+ total_encoded += 1;
+ }
+@@ -403,13 +398,11 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ return -1;
+ }
+
+-/* under ife->tcf_lock */
+-static void _tcf_ife_cleanup(struct tc_action *a)
++static void __tcf_ife_cleanup(struct tcf_ife_params *p)
+ {
+- struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_meta_info *e, *n;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_safe(e, n, &p->metalist, metalist) {
+ list_del(&e->metalist);
+ if (e->metaval) {
+ if (e->ops->release)
+@@ -422,18 +415,23 @@ static void _tcf_ife_cleanup(struct tc_action *a)
+ }
+ }
+
++static void tcf_ife_cleanup_params(struct rcu_head *head)
++{
++ struct tcf_ife_params *p = container_of(head, struct tcf_ife_params,
++ rcu);
++
++ __tcf_ife_cleanup(p);
++ kfree(p);
++}
++
+ static void tcf_ife_cleanup(struct tc_action *a)
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_ife_params *p;
+
+- spin_lock_bh(&ife->tcf_lock);
+- _tcf_ife_cleanup(a);
+- spin_unlock_bh(&ife->tcf_lock);
+-
+ p = rcu_dereference_protected(ife->params, 1);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+ }
+
+ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+@@ -455,8 +453,7 @@ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+ return 0;
+ }
+
+-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+- bool exists, bool rtnl_held)
++static int populate_metalist(struct tcf_ife_params *p, struct nlattr **tb)
+ {
+ int len = 0;
+ int rc = 0;
+@@ -468,7 +465,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ val = nla_data(tb[i]);
+ len = nla_len(tb[i]);
+
+- rc = add_metainfo(ife, i, val, len, exists);
++ rc = add_metainfo(p, i, val, len);
+ if (rc)
+ return rc;
+ }
+@@ -523,6 +520,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
++ INIT_LIST_HEAD(&p->metalist);
+
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+@@ -567,8 +565,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ ife = to_ife(*a);
+- if (ret == ACT_P_CREATED)
+- INIT_LIST_HEAD(&ife->metalist);
+
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+@@ -600,8 +596,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ if (tb[TCA_IFE_METALST]) {
+- err = populate_metalist(ife, tb2, exists,
+- !(flags & TCA_ACT_FLAGS_NO_RTNL));
++ err = populate_metalist(p, tb2);
+ if (err)
+ goto metadata_parse_err;
+ } else {
+@@ -610,7 +605,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ * as we can. You better have at least one else we are
+ * going to bail out
+ */
+- err = use_all_metadata(ife, exists);
++ err = use_all_metadata(p);
+ if (err)
+ goto metadata_parse_err;
+ }
+@@ -626,13 +621,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+
+ return ret;
+ metadata_parse_err:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ release_idr:
++ __tcf_ife_cleanup(p);
+ kfree(p);
+ tcf_idr_release(*a, bind);
+ return err;
+@@ -679,7 +675,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
+ goto nla_put_failure;
+
+- if (dump_metalist(skb, ife)) {
++ if (dump_metalist(skb, p)) {
+ /*ignore failure to dump metalist */
+ pr_info("Failed to dump metalist\n");
+ }
+@@ -693,13 +689,13 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ return -1;
+ }
+
+-static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
++static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_params *p,
+ u16 metaid, u16 mlen, void *mdata)
+ {
+ struct tcf_meta_info *e;
+
+ /* XXX: use hash to speed up */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (metaid == e->metaid) {
+ if (e->ops) {
+ /* We check for decode presence already */
+@@ -716,10 +712,13 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ int action = ife->tcf_action;
++ struct tcf_ife_params *p;
+ u8 *ifehdr_end;
+ u8 *tlv_data;
+ u16 metalen;
+
++ p = rcu_dereference_bh(ife->params);
++
+ bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+ tcf_lastuse_update(&ife->tcf_tm);
+
+@@ -745,7 +744,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ return TC_ACT_SHOT;
+ }
+
+- if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
++ if (find_decode_metaid(skb, p, mtype, dlen, curr_data)) {
+ /* abuse overlimits to count when we receive metadata
+ * but dont have an ops for it
+ */
+@@ -769,12 +768,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ /*XXX: check if we can do this at install time instead of current
+ * send data path
+ **/
+-static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+- struct tcf_meta_info *e, *n;
++ struct tcf_meta_info *e;
+ int tot_run_sz = 0, run_sz = 0;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->check_presence) {
+ run_sz = e->ops->check_presence(skb, e);
+ tot_run_sz += run_sz;
+@@ -795,7 +794,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
+ where ORIGDATA = original ethernet header ...
+ */
+- u16 metalen = ife_get_sz(skb, ife);
++ u16 metalen = ife_get_sz(skb, p);
+ int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
+ unsigned int skboff = 0;
+ int new_len = skb->len + hdrm;
+@@ -833,25 +832,21 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ if (!ife_meta)
+ goto drop;
+
+- spin_lock(&ife->tcf_lock);
+-
+ /* XXX: we dont have a clever way of telling encode to
+ * not repeat some of the computations that are done by
+ * ops->presence_check...
+ */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->encode) {
+ err = e->ops->encode(skb, (void *)(ife_meta + skboff),
+ e);
+ }
+ if (err < 0) {
+ /* too corrupt to keep around if overwritten */
+- spin_unlock(&ife->tcf_lock);
+ goto drop;
+ }
+ skboff += err;
+ }
+- spin_unlock(&ife->tcf_lock);
+ oethh = (struct ethhdr *)skb->data;
+
+ if (!is_zero_ether_addr(p->eth_src))
+--
+2.51.0
+
--- /dev/null
+From 1e325a487deda3aebfb1165fc58052f77b0a291f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Feb 2026 23:53:07 +0900
+Subject: net: sched: avoid qdisc_reset_all_tx_gt() vs dequeue race for
+ lockless qdiscs
+
+From: Koichiro Den <den@valinux.co.jp>
+
+[ Upstream commit 7f083faf59d14c04e01ec05a7507f036c965acf8 ]
+
+When shrinking the number of real tx queues,
+netif_set_real_num_tx_queues() calls qdisc_reset_all_tx_gt() to flush
+qdiscs for queues which will no longer be used.
+
+qdisc_reset_all_tx_gt() currently serializes qdisc_reset() with
+qdisc_lock(). However, for lockless qdiscs, the dequeue path is
+serialized by qdisc_run_begin/end() using qdisc->seqlock instead, so
+qdisc_reset() can run concurrently with __qdisc_run() and free skbs
+while they are still being dequeued, leading to UAF.
+
+This can easily be reproduced on e.g. virtio-net by imposing heavy
+traffic while frequently changing the number of queue pairs:
+
+ iperf3 -ub0 -c $peer -t 0 &
+ while :; do
+ ethtool -L eth0 combined 1
+ ethtool -L eth0 combined 2
+ done
+
+With KASAN enabled, this leads to reports like:
+
+ BUG: KASAN: slab-use-after-free in __qdisc_run+0x133f/0x1760
+ ...
+ Call Trace:
+ <TASK>
+ ...
+ __qdisc_run+0x133f/0x1760
+ __dev_queue_xmit+0x248f/0x3550
+ ip_finish_output2+0xa42/0x2110
+ ip_output+0x1a7/0x410
+ ip_send_skb+0x2e6/0x480
+ udp_send_skb+0xb0a/0x1590
+ udp_sendmsg+0x13c9/0x1fc0
+ ...
+ </TASK>
+
+ Allocated by task 1270 on cpu 5 at 44.558414s:
+ ...
+ alloc_skb_with_frags+0x84/0x7c0
+ sock_alloc_send_pskb+0x69a/0x830
+ __ip_append_data+0x1b86/0x48c0
+ ip_make_skb+0x1e8/0x2b0
+ udp_sendmsg+0x13a6/0x1fc0
+ ...
+
+ Freed by task 1306 on cpu 3 at 44.558445s:
+ ...
+ kmem_cache_free+0x117/0x5e0
+ pfifo_fast_reset+0x14d/0x580
+ qdisc_reset+0x9e/0x5f0
+ netif_set_real_num_tx_queues+0x303/0x840
+ virtnet_set_channels+0x1bf/0x260 [virtio_net]
+ ethnl_set_channels+0x684/0xae0
+ ethnl_default_set_doit+0x31a/0x890
+ ...
+
+Serialize qdisc_reset_all_tx_gt() against the lockless dequeue path by
+taking qdisc->seqlock for TCQ_F_NOLOCK qdiscs, matching the
+serialization model already used by dev_reset_queue().
+
+Additionally clear QDISC_STATE_NON_EMPTY after reset so the qdisc state
+reflects an empty queue, avoiding needless re-scheduling.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Signed-off-by: Koichiro Den <den@valinux.co.jp>
+Link: https://patch.msgid.link/20260228145307.3955532-1-den@valinux.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index c3a7268b567e0..d5d55cb21686d 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -778,13 +778,23 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+ {
+ struct Qdisc *qdisc;
++ bool nolock;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ if (qdisc) {
++ nolock = qdisc->flags & TCQ_F_NOLOCK;
++
++ if (nolock)
++ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
++ if (nolock) {
++ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
++ spin_unlock_bh(&qdisc->seqlock);
++ }
+ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From 69120b00df996dbf465457246c8423be2d439ae9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:28 +0000
+Subject: net: stmmac: Defer VLAN HW configuration when interface is down
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 2cd70e3968f505996d5fefdf7ca684f0f4575734 ]
+
+VLAN register accesses on the MAC side require the PHY RX clock to be
+active. When the network interface is down, the PHY is suspended and
+the RX clock is unavailable, causing VLAN operations to fail with
+timeouts.
+
+The VLAN core automatically removes VID 0 after the interface goes down
+and re-adds it when it comes back up, so these timeouts happen during
+normal interface down/up:
+
+ # ip link set end1 down
+ renesas-gbeth 15c40000.ethernet end1: Timeout accessing MAC_VLAN_Tag_Filter
+ renesas-gbeth 15c40000.ethernet end1: failed to kill vid 0081/0
+
+Adding VLANs while the interface is down also fails:
+
+ # ip link add link end1 name end1.10 type vlan id 10
+ renesas-gbeth 15c40000.ethernet end1: Timeout accessing MAC_VLAN_Tag_Filter
+ RTNETLINK answers: Device or resource busy
+
+To fix this, check if the interface is up before accessing VLAN registers.
+The software state is always kept up to date regardless of interface state.
+
+When the interface is brought up, stmmac_vlan_restore() is called
+to write the VLAN state to hardware.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-5-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 3 ++
+ .../net/ethernet/stmicro/stmmac/stmmac_vlan.c | 42 ++++++++++---------
+ 2 files changed, 26 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 293efccb5d312..a76b2acffde5a 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6756,6 +6756,9 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ hash = 0;
+ }
+
++ if (!netif_running(priv->dev))
++ return 0;
++
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
+ }
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+index fcc34867405ed..e24efe3bfedbe 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+@@ -76,7 +76,9 @@ static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ }
+
+ hw->vlan_filter[0] = vid;
+- vlan_write_single(dev, vid);
++
++ if (netif_running(dev))
++ vlan_write_single(dev, vid);
+
+ return 0;
+ }
+@@ -97,12 +99,15 @@ static int vlan_add_hw_rx_fltr(struct net_device *dev,
+ return -EPERM;
+ }
+
+- ret = vlan_write_filter(dev, hw, index, val);
++ if (netif_running(dev)) {
++ ret = vlan_write_filter(dev, hw, index, val);
++ if (ret)
++ return ret;
++ }
+
+- if (!ret)
+- hw->vlan_filter[index] = val;
++ hw->vlan_filter[index] = val;
+
+- return ret;
++ return 0;
+ }
+
+ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+@@ -115,7 +120,9 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+- vlan_write_single(dev, 0);
++
++ if (netif_running(dev))
++ vlan_write_single(dev, 0);
+ }
+ return 0;
+ }
+@@ -124,22 +131,23 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) &&
+ ((hw->vlan_filter[i] & VLAN_TAG_DATA_VID) == vid)) {
+- ret = vlan_write_filter(dev, hw, i, 0);
+
+- if (!ret)
+- hw->vlan_filter[i] = 0;
+- else
+- return ret;
++ if (netif_running(dev)) {
++ ret = vlan_write_filter(dev, hw, i, 0);
++ if (ret)
++ return ret;
++ }
++
++ hw->vlan_filter[i] = 0;
+ }
+ }
+
+- return ret;
++ return 0;
+ }
+
+ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+ {
+- u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+@@ -149,12 +157,8 @@ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+- for (i = 0; i < hw->num_vlan; i++) {
+- if (hw->vlan_filter[i] & VLAN_TAG_DATA_VEN) {
+- val = hw->vlan_filter[i];
+- vlan_write_filter(dev, hw, i, val);
+- }
+- }
++ for (i = 0; i < hw->num_vlan; i++)
++ vlan_write_filter(dev, hw, i, hw->vlan_filter[i]);
+ }
+
+ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+--
+2.51.0
+
--- /dev/null
+From 9eea2bbf17895f8a0d3758b5a20e1db0678bae43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:25 +0000
+Subject: net: stmmac: Fix error handling in VLAN add and delete paths
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 35dfedce442c4060cfe5b98368bc9643fb995716 ]
+
+stmmac_vlan_rx_add_vid() updates active_vlans and the VLAN hash
+register before writing the HW filter entry. If the filter write
+fails, it leaves a stale VID in active_vlans and the hash register.
+
+stmmac_vlan_rx_kill_vid() has the reverse problem: it clears
+active_vlans before removing the HW filter. On failure, the VID is
+gone from active_vlans but still present in the HW filter table.
+
+To fix this, reorder the operations to update the hash table first,
+then attempt the HW filter operation. If the HW filter fails, roll
+back both the active_vlans bitmap and the hash table by calling
+stmmac_vlan_update() again.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-2-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index f98fd254315f6..74dc64c5156e9 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6781,9 +6781,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ clear_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto err_pm_put;
++ }
+ }
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6807,15 +6811,21 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
++ ret = stmmac_vlan_update(priv, is_double);
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ goto del_vlan_error;
++ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto del_vlan_error;
++ }
+ }
+
+- ret = stmmac_vlan_update(priv, is_double);
+-
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+--
+2.51.0
+
--- /dev/null
+From 6e3f220e71cc0870ca970e4fe943300ecaa9b3af Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:27 +0000
+Subject: net: stmmac: Fix VLAN HW state restore
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit bd7ad51253a76fb35886d01cfe9a37f0e4ed6709 ]
+
+When the network interface is opened or resumed, a DMA reset is performed,
+which resets all hardware state, including VLAN state. Currently, only
+the resume path is restoring the VLAN state via
+stmmac_restore_hw_vlan_rx_fltr(), but that is incomplete: the VLAN hash
+table and the VLAN_TAG control bits are not restored.
+
+Therefore, add stmmac_vlan_restore(), which restores the full VLAN
+state by updating both the HW filter entries and the hash table, and
+call it from both the open and resume paths.
+
+The VLAN restore is moved outside of phylink_rx_clk_stop_block/unblock
+in the resume path because receive clock stop is already disabled when
+stmmac supports VLAN.
+
+Also, remove the hash readback code in vlan_restore_hw_rx_fltr() that
+attempts to restore VTHM by reading VLAN_HASH_TABLE, as it always reads
+zero after DMA reset, making it dead code.
+
+Fixes: 3cd1cfcba26e ("net: stmmac: Implement VLAN Hash Filtering in XGMAC")
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-4-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 24 +++++++++++++++++--
+ .../net/ethernet/stmicro/stmmac/stmmac_vlan.c | 10 --------
+ 2 files changed, 22 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index a21c34d23681c..293efccb5d312 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -140,6 +140,7 @@ static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
+ static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+ static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
++static int stmmac_vlan_restore(struct stmmac_priv *priv);
+
+ #ifdef CONFIG_DEBUG_FS
+ static const struct net_device_ops stmmac_netdev_ops;
+@@ -4099,6 +4100,8 @@ static int __stmmac_open(struct net_device *dev,
+
+ phylink_start(priv->phylink);
+
++ stmmac_vlan_restore(priv);
++
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+@@ -6840,6 +6843,23 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ return ret;
+ }
+
++static int stmmac_vlan_restore(struct stmmac_priv *priv)
++{
++ int ret;
++
++ if (!(priv->dev->features & NETIF_F_VLAN_FEATURES))
++ return 0;
++
++ if (priv->hw->num_vlan)
++ stmmac_restore_hw_vlan_rx_fltr(priv, priv->dev, priv->hw);
++
++ ret = stmmac_vlan_update(priv, priv->num_double_vlans);
++ if (ret)
++ netdev_err(priv->dev, "Failed to restore VLANs\n");
++
++ return ret;
++}
++
+ static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+ {
+ struct stmmac_priv *priv = netdev_priv(dev);
+@@ -8233,10 +8253,10 @@ int stmmac_resume(struct device *dev)
+ stmmac_init_coalesce(priv);
+ phylink_rx_clk_stop_block(priv->phylink);
+ stmmac_set_rx_mode(ndev);
+-
+- stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+ phylink_rx_clk_stop_unblock(priv->phylink);
+
++ stmmac_vlan_restore(priv);
++
+ stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+index de1a70e1c86ef..fcc34867405ed 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+@@ -139,9 +139,6 @@ static int vlan_del_hw_rx_fltr(struct net_device *dev,
+ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+ {
+- void __iomem *ioaddr = hw->pcsr;
+- u32 value;
+- u32 hash;
+ u32 val;
+ int i;
+
+@@ -158,13 +155,6 @@ static void vlan_restore_hw_rx_fltr(struct net_device *dev,
+ vlan_write_filter(dev, hw, i, val);
+ }
+ }
+-
+- hash = readl(ioaddr + VLAN_HASH_TABLE);
+- if (hash & VLAN_VLHT) {
+- value = readl(ioaddr + VLAN_TAG);
+- value |= VLAN_VTHM;
+- writel(value, ioaddr + VLAN_TAG);
+- }
+ }
+
+ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+--
+2.51.0
+
--- /dev/null
+From c7d901f9b04126cc4e94a4793c796b94a8ac4a01 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:26 +0000
+Subject: net: stmmac: Improve double VLAN handling
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit e38200e361cbe331806dc454c76c11c7cd95e1b9 ]
+
+The double VLAN bits (EDVLP, ESVL, DOVLTC) are handled inconsistently
+between the two vlan_update_hash() implementations:
+
+- dwxgmac2_update_vlan_hash() explicitly clears the double VLAN bits when
+is_double is false, meaning that adding a 802.1Q VLAN will disable
+double VLAN mode:
+
+ $ ip link add link eth0 name eth0.200 type vlan id 200 protocol 802.1ad
+ $ ip link add link eth0 name eth0.100 type vlan id 100
+ # Double VLAN bits no longer set
+
+- vlan_update_hash() sets these bits and only clears them when the last
+VLAN has been removed, so double VLAN mode remains enabled even after all
+802.1AD VLANs are removed.
+
+Address both issues by tracking the number of active 802.1AD VLANs in
+priv->num_double_vlans. Pass this count to stmmac_vlan_update() so both
+implementations correctly set the double VLAN bits when any 802.1AD
+VLAN is active, and clear them only when none remain.
+
+Also update vlan_update_hash() to explicitly clear the double VLAN bits
+when is_double is false, matching the dwxgmac2 behavior.
+
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-3-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: bd7ad51253a7 ("net: stmmac: Fix VLAN HW state restore")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/stmicro/stmmac/stmmac.h | 1 +
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 16 ++++++++++++----
+ .../net/ethernet/stmicro/stmmac/stmmac_vlan.c | 8 ++++++++
+ 3 files changed, 21 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+index 012b0a477255d..a2e838688118b 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+@@ -324,6 +324,7 @@ struct stmmac_priv {
+ void __iomem *ptpaddr;
+ void __iomem *estaddr;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
++ unsigned int num_double_vlans;
+ int sfty_irq;
+ int sfty_ce_irq;
+ int sfty_ue_irq;
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 74dc64c5156e9..a21c34d23681c 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6762,6 +6762,7 @@ static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+ {
+ struct stmmac_priv *priv = netdev_priv(ndev);
++ unsigned int num_double_vlans;
+ bool is_double = false;
+ int ret;
+
+@@ -6773,7 +6774,8 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ is_double = true;
+
+ set_bit(vid, priv->active_vlans);
+- ret = stmmac_vlan_update(priv, is_double);
++ num_double_vlans = priv->num_double_vlans + is_double;
++ ret = stmmac_vlan_update(priv, num_double_vlans);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ goto err_pm_put;
+@@ -6783,11 +6785,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+- stmmac_vlan_update(priv, is_double);
++ stmmac_vlan_update(priv, priv->num_double_vlans);
+ goto err_pm_put;
+ }
+ }
+
++ priv->num_double_vlans = num_double_vlans;
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6800,6 +6804,7 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+ {
+ struct stmmac_priv *priv = netdev_priv(ndev);
++ unsigned int num_double_vlans;
+ bool is_double = false;
+ int ret;
+
+@@ -6811,7 +6816,8 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+- ret = stmmac_vlan_update(priv, is_double);
++ num_double_vlans = priv->num_double_vlans - is_double;
++ ret = stmmac_vlan_update(priv, num_double_vlans);
+ if (ret) {
+ set_bit(vid, priv->active_vlans);
+ goto del_vlan_error;
+@@ -6821,11 +6827,13 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret) {
+ set_bit(vid, priv->active_vlans);
+- stmmac_vlan_update(priv, is_double);
++ stmmac_vlan_update(priv, priv->num_double_vlans);
+ goto del_vlan_error;
+ }
+ }
+
++ priv->num_double_vlans = num_double_vlans;
++
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+index b18404dd5a8be..de1a70e1c86ef 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_vlan.c
+@@ -183,6 +183,10 @@ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
++ } else {
++ value &= ~VLAN_EDVLP;
++ value &= ~VLAN_ESVL;
++ value &= ~VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + VLAN_TAG);
+@@ -193,6 +197,10 @@ static void vlan_update_hash(struct mac_device_info *hw, u32 hash,
+ value |= VLAN_EDVLP;
+ value |= VLAN_ESVL;
+ value |= VLAN_DOVLTC;
++ } else {
++ value &= ~VLAN_EDVLP;
++ value &= ~VLAN_ESVL;
++ value &= ~VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + VLAN_TAG);
+--
+2.51.0
+
--- /dev/null
+From d2f06ea50525474b11b80aaa3cabbb6ffdde6e13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 15:53:56 +0530
+Subject: net: ti: icssg-prueth: Fix ping failure after offload mode setup when
+ link speed is not 1G
+
+From: MD Danish Anwar <danishanwar@ti.com>
+
+[ Upstream commit 147792c395db870756a0dc87ce656c75ae7ab7e8 ]
+
+When both eth interfaces with links up are added to a bridge or hsr
+interface, ping fails if the link speed is not 1Gbps (e.g., 100Mbps).
+
+The issue is seen because when switching to offload (bridge/hsr) mode,
+prueth_emac_restart() restarts the firmware and clears DRAM with
+memset_io(), setting all memory to 0. This includes PORT_LINK_SPEED_OFFSET
+which firmware reads for link speed. The value 0 corresponds to
+FW_LINK_SPEED_1G (0x00), so for 1Gbps links the default value is correct
+and ping works. For 100Mbps links, the firmware needs FW_LINK_SPEED_100M
+(0x01) but gets 0 instead, causing ping to fail. The function
+emac_adjust_link() is called to reconfigure, but it detects no state change
+(emac->link is still 1, speed/duplex match PHY) so new_state remains false
+and icssg_config_set_speed() is never called to correct the firmware speed
+value.
+
+The fix resets emac->link to 0 before calling emac_adjust_link() in
+prueth_emac_common_start(). This forces new_state=true, ensuring
+icssg_config_set_speed() is called to write the correct speed value to
+firmware memory.
+
+Fixes: 06feac15406f ("net: ti: icssg-prueth: Fix emac link speed handling")
+Signed-off-by: MD Danish Anwar <danishanwar@ti.com>
+Link: https://patch.msgid.link/20260226102356.2141871-1-danishanwar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/icssg/icssg_prueth.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+index f65041662173c..2c6e161225f6a 100644
+--- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c
++++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c
+@@ -273,6 +273,14 @@ static int prueth_emac_common_start(struct prueth *prueth)
+ if (ret)
+ goto disable_class;
+
++ /* Reset link state to force reconfiguration in
++ * emac_adjust_link(). Without this, if the link was already up
++ * before restart, emac_adjust_link() won't detect any state
++ * change and will skip critical configuration like writing
++ * speed to firmware.
++ */
++ emac->link = 0;
++
+ mutex_lock(&emac->ndev->phydev->lock);
+ emac_adjust_link(emac->ndev);
+ mutex_unlock(&emac->ndev->phydev->lock);
+--
+2.51.0
+
--- /dev/null
+From 6a17b1528460a19718b1321a66138ffb6825b784 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:57 +0100
+Subject: net: vxlan: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit 168ff39e4758897d2eee4756977d036d52884c7e ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. If an IPv6 packet is injected into the interface,
+route_shortcircuit() is called and a NULL pointer dereference happens on
+neigh_lookup().
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000380
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x20/0x270
+ [...]
+ Call Trace:
+ <TASK>
+ vxlan_xmit+0x638/0x1ef0 [vxlan]
+ dev_hard_start_xmit+0x9e/0x2e0
+ __dev_queue_xmit+0xbee/0x14e0
+ packet_sendmsg+0x116f/0x1930
+ __sys_sendto+0x1f5/0x200
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x12f/0x1590
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by adding an early check on route_shortcircuit() when protocol
+is ETH_P_IPV6. Note that ipv6_mod_enabled() cannot be used here because
+VXLAN can be built-in even when IPv6 is built as a module.
+
+Fixes: e15a00aafa4b ("vxlan: add ipv6 route short circuit support")
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Link: https://patch.msgid.link/20260304120357.9778-2-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index e957aa12a8a44..2a140be86bafc 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2130,6 +2130,11 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct ipv6hdr *pip6;
+
++ /* check if nd_tbl is not initiliazed due to
++ * ipv6.disable=1 set during boot
++ */
++ if (!ipv6_stub->nd_tbl)
++ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+--
+2.51.0
+
--- /dev/null
+From 98159c5fc3c9e34b06255613d17271382efb48e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 01:56:40 +0000
+Subject: net_sched: sch_fq: clear q->band_pkt_count[] in fq_reset()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit a4c2b8be2e5329e7fac6e8f64ddcb8958155cfcb ]
+
+When/if a NIC resets, queues are deactivated by dev_deactivate_many(),
+then reactivated when the reset operation completes.
+
+fq_reset() removes all the skbs from various queues.
+
+If we do not clear q->band_pkt_count[], these counters keep growing
+and can eventually reach sch->limit, preventing new packets to be queued.
+
+Many thanks to Praveen for discovering the root cause.
+
+Fixes: 29f834aa326e ("net_sched: sch_fq: add 3 bands and WRR scheduling")
+Diagnosed-by: Praveen Kaligineedi <pkaligineedi@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Neal Cardwell <ncardwell@google.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20260304015640.961780-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_fq.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
+index 6e5f2f4f24154..b570128ae10a6 100644
+--- a/net/sched/sch_fq.c
++++ b/net/sched/sch_fq.c
+@@ -829,6 +829,7 @@ static void fq_reset(struct Qdisc *sch)
+ for (idx = 0; idx < FQ_BANDS; idx++) {
+ q->band_flows[idx].new_flows.first = NULL;
+ q->band_flows[idx].old_flows.first = NULL;
++ q->band_pkt_count[idx] = 0;
+ }
+ q->delayed = RB_ROOT;
+ q->flows = 0;
+--
+2.51.0
+
--- /dev/null
+From 9e364ddafe249a8ef39312025b0b08da3601a182 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 23:28:15 +0100
+Subject: netfilter: nf_tables: clone set on flush only
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit fb7fb4016300ac622c964069e286dc83166a5d52 ]
+
+Syzbot with fault injection triggered a failing memory allocation with
+GFP_KERNEL which results in a WARN splat:
+
+iter.err
+WARNING: net/netfilter/nf_tables_api.c:845 at nft_map_deactivate+0x34e/0x3c0 net/netfilter/nf_tables_api.c:845, CPU#0: syz.0.17/5992
+Modules linked in:
+CPU: 0 UID: 0 PID: 5992 Comm: syz.0.17 Not tainted syzkaller #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 02/12/2026
+RIP: 0010:nft_map_deactivate+0x34e/0x3c0 net/netfilter/nf_tables_api.c:845
+Code: 8b 05 86 5a 4e 09 48 3b 84 24 a0 00 00 00 75 62 48 8d 65 d8 5b 41 5c 41 5d 41 5e 41 5f 5d c3 cc cc cc cc cc e8 63 6d fa f7 90 <0f> 0b 90 43
++80 7c 35 00 00 0f 85 23 fe ff ff e9 26 fe ff ff 89 d9
+RSP: 0018:ffffc900045af780 EFLAGS: 00010293
+RAX: ffffffff89ca45bd RBX: 00000000fffffff4 RCX: ffff888028111e40
+RDX: 0000000000000000 RSI: 00000000fffffff4 RDI: 0000000000000000
+RBP: ffffc900045af870 R08: 0000000000400dc0 R09: 00000000ffffffff
+R10: dffffc0000000000 R11: fffffbfff1d141db R12: ffffc900045af7e0
+R13: 1ffff920008b5f24 R14: dffffc0000000000 R15: ffffc900045af920
+FS: 000055557a6a5500(0000) GS:ffff888125496000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fb5ea271fc0 CR3: 000000003269e000 CR4: 00000000003526f0
+Call Trace:
+ <TASK>
+ __nft_release_table+0xceb/0x11f0 net/netfilter/nf_tables_api.c:12115
+ nft_rcv_nl_event+0xc25/0xdb0 net/netfilter/nf_tables_api.c:12187
+ notifier_call_chain+0x19d/0x3a0 kernel/notifier.c:85
+ blocking_notifier_call_chain+0x6a/0x90 kernel/notifier.c:380
+ netlink_release+0x123b/0x1ad0 net/netlink/af_netlink.c:761
+ __sock_release net/socket.c:662 [inline]
+ sock_close+0xc3/0x240 net/socket.c:1455
+
+Restrict set clone to the flush set command in the preparation phase.
+Add NFT_ITER_UPDATE_CLONE and use it for this purpose, update the rbtree
+and pipapo backends to only clone the set when this iteration type is
+used.
+
+As for the existing NFT_ITER_UPDATE type, update the pipapo backend to
+use the existing set clone if available, otherwise use the existing set
+representation. After this update, there is no need to clone a set that
+is being deleted, this includes bound anonymous set.
+
+An alternative approach to NFT_ITER_UPDATE_CLONE is to add a .clone
+interface and call it from the flush set path.
+
+Reported-by: syzbot+4924a0edc148e8b4b342@syzkaller.appspotmail.com
+Fixes: 3f1d886cc7c3 ("netfilter: nft_set_pipapo: move cloning of match info to insert/removal path")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 2 ++
+ net/netfilter/nf_tables_api.c | 10 +++++++++-
+ net/netfilter/nft_set_hash.c | 1 +
+ net/netfilter/nft_set_pipapo.c | 11 +++++++++--
+ net/netfilter/nft_set_rbtree.c | 8 +++++---
+ 5 files changed, 26 insertions(+), 6 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index f1b67b40dd4de..077d3121cc9f1 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -317,11 +317,13 @@ static inline void *nft_elem_priv_cast(const struct nft_elem_priv *priv)
+ * @NFT_ITER_UNSPEC: unspecified, to catch errors
+ * @NFT_ITER_READ: read-only iteration over set elements
+ * @NFT_ITER_UPDATE: iteration under mutex to update set element state
++ * @NFT_ITER_UPDATE_CLONE: clone set before iteration under mutex to update element
+ */
+ enum nft_iter_type {
+ NFT_ITER_UNSPEC,
+ NFT_ITER_READ,
+ NFT_ITER_UPDATE,
++ NFT_ITER_UPDATE_CLONE,
+ };
+
+ struct nft_set;
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 92fed8723b8f9..7b357a2a871ed 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -832,6 +832,11 @@ static void nft_map_catchall_deactivate(const struct nft_ctx *ctx,
+ }
+ }
+
++/* Use NFT_ITER_UPDATE iterator even if this may be called from the preparation
++ * phase, the set clone might already exist from a previous command, or it might
++ * be a set that is going away and does not require a clone. The netns and
++ * netlink release paths also need to work on the live set.
++ */
+ static void nft_map_deactivate(const struct nft_ctx *ctx, struct nft_set *set)
+ {
+ struct nft_set_iter iter = {
+@@ -7891,9 +7896,12 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx,
+
+ static int nft_set_flush(struct nft_ctx *ctx, struct nft_set *set, u8 genmask)
+ {
++ /* The set backend might need to clone the set, do it now from the
++ * preparation phase, use NFT_ITER_UPDATE_CLONE iterator type.
++ */
+ struct nft_set_iter iter = {
+ .genmask = genmask,
+- .type = NFT_ITER_UPDATE,
++ .type = NFT_ITER_UPDATE_CLONE,
+ .fn = nft_setelem_flush,
+ };
+
+diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
+index 739b992bde591..b0e571c8e3f38 100644
+--- a/net/netfilter/nft_set_hash.c
++++ b/net/netfilter/nft_set_hash.c
+@@ -374,6 +374,7 @@ static void nft_rhash_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ {
+ switch (iter->type) {
+ case NFT_ITER_UPDATE:
++ case NFT_ITER_UPDATE_CLONE:
+ /* only relevant for netlink dumps which use READ type */
+ WARN_ON_ONCE(iter->skip != 0);
+
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index 18e1903b1d3d0..cd0d2d4ae36bf 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -2145,13 +2145,20 @@ static void nft_pipapo_walk(const struct nft_ctx *ctx, struct nft_set *set,
+ const struct nft_pipapo_match *m;
+
+ switch (iter->type) {
+- case NFT_ITER_UPDATE:
++ case NFT_ITER_UPDATE_CLONE:
+ m = pipapo_maybe_clone(set);
+ if (!m) {
+ iter->err = -ENOMEM;
+ return;
+ }
+-
++ nft_pipapo_do_walk(ctx, set, m, iter);
++ break;
++ case NFT_ITER_UPDATE:
++ if (priv->clone)
++ m = priv->clone;
++ else
++ m = rcu_dereference_protected(priv->match,
++ nft_pipapo_transaction_mutex_held(set));
+ nft_pipapo_do_walk(ctx, set, m, iter);
+ break;
+ case NFT_ITER_READ:
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index a4fb5b517d9de..5d91b7d08d33a 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -810,13 +810,15 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
+ struct nft_rbtree *priv = nft_set_priv(set);
+
+ switch (iter->type) {
+- case NFT_ITER_UPDATE:
+- lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
+-
++ case NFT_ITER_UPDATE_CLONE:
+ if (nft_array_may_resize(set) < 0) {
+ iter->err = -ENOMEM;
+ break;
+ }
++ fallthrough;
++ case NFT_ITER_UPDATE:
++ lockdep_assert_held(&nft_pernet(ctx->net)->commit_mutex);
++
+ nft_rbtree_do_walk(ctx, set, iter);
+ break;
+ case NFT_ITER_READ:
+--
+2.51.0
+
--- /dev/null
+From e88d05d303afb99a7b768a9138c1e692056756da Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 23:12:37 +0100
+Subject: netfilter: nf_tables: unconditionally bump set->nelems before
+ insertion
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit def602e498a4f951da95c95b1b8ce8ae68aa733a ]
+
+In case that the set is full, a new element gets published then removed
+without waiting for the RCU grace period, while RCU reader can be
+walking over it already.
+
+To address this issue, add the element transaction even if set is full,
+but toggle the set_full flag to report -ENFILE so the abort path safely
+unwinds the set to its previous state.
+
+As for element updates, decrement set->nelems to restore it.
+
+A simpler fix is to call synchronize_rcu() in the error path.
+However, with a large batch adding elements to already maxed-out set,
+this could cause noticeable slowdown of such batches.
+
+Fixes: 35d0ac9070ef ("netfilter: nf_tables: fix set->nelems counting with no NLM_F_EXCL")
+Reported-by: Inseo An <y0un9sa@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 30 ++++++++++++++++--------------
+ 1 file changed, 16 insertions(+), 14 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 6d1b34a97ec7f..92fed8723b8f9 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -7169,6 +7169,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ struct nft_data_desc desc;
+ enum nft_registers dreg;
+ struct nft_trans *trans;
++ bool set_full = false;
+ u64 expiration;
+ u64 timeout;
+ int err, i;
+@@ -7455,10 +7456,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ if (err < 0)
+ goto err_elem_free;
+
++ if (!(flags & NFT_SET_ELEM_CATCHALL)) {
++ unsigned int max = nft_set_maxsize(set), nelems;
++
++ nelems = atomic_inc_return(&set->nelems);
++ if (nelems > max)
++ set_full = true;
++ }
++
+ trans = nft_trans_elem_alloc(ctx, NFT_MSG_NEWSETELEM, set);
+ if (trans == NULL) {
+ err = -ENOMEM;
+- goto err_elem_free;
++ goto err_set_size;
+ }
+
+ ext->genmask = nft_genmask_cur(ctx->net);
+@@ -7510,7 +7519,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+
+ ue->priv = elem_priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans);
+- goto err_elem_free;
++ goto err_set_size;
+ }
+ }
+ }
+@@ -7528,23 +7537,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
+ goto err_element_clash;
+ }
+
+- if (!(flags & NFT_SET_ELEM_CATCHALL)) {
+- unsigned int max = nft_set_maxsize(set);
+-
+- if (!atomic_add_unless(&set->nelems, 1, max)) {
+- err = -ENFILE;
+- goto err_set_full;
+- }
+- }
+-
+ nft_trans_container_elem(trans)->elems[0].priv = elem.priv;
+ nft_trans_commit_list_add_elem(ctx->net, trans);
+- return 0;
+
+-err_set_full:
+- nft_setelem_remove(ctx->net, set, elem.priv);
++ return set_full ? -ENFILE : 0;
++
+ err_element_clash:
+ kfree(trans);
++err_set_size:
++ if (!(flags & NFT_SET_ELEM_CATCHALL))
++ atomic_dec(&set->nelems);
+ err_elem_free:
+ nf_tables_set_elem_destroy(ctx, set, elem.priv);
+ err_parse_data:
+--
+2.51.0
+
--- /dev/null
+From 37fe75188aeffffc2f44733275f95530e468dfef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 16:31:32 +0100
+Subject: netfilter: nft_set_pipapo: split gc into unlink and reclaim phase
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 9df95785d3d8302f7c066050117b04cd3c2048c2 ]
+
+Yiming Qian reports Use-after-free in the pipapo set type:
+ Under a large number of expired elements, commit-time GC can run for a very
+ long time in a non-preemptible context, triggering soft lockup warnings and
+ RCU stall reports (local denial of service).
+
+We must split GC in an unlink and a reclaim phase.
+
+We cannot queue elements for freeing until pointers have been swapped.
+Expired elements are still exposed to both the packet path and userspace
+dumpers via the live copy of the data structure.
+
+call_rcu() does not protect us: dump operations or element lookups starting
+after call_rcu has fired can still observe the free'd element, unless the
+commit phase has made enough progress to swap the clone and live pointers
+before any new reader has picked up the old version.
+
+This a similar approach as done recently for the rbtree backend in commit
+35f83a75529a ("netfilter: nft_set_rbtree: don't gc elements on insert").
+
+Fixes: 3c4287f62044 ("nf_tables: Add set type for arbitrary concatenation of ranges")
+Reported-by: Yiming Qian <yimingqian591@gmail.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables.h | 5 +++
+ net/netfilter/nf_tables_api.c | 5 ---
+ net/netfilter/nft_set_pipapo.c | 51 ++++++++++++++++++++++++++-----
+ net/netfilter/nft_set_pipapo.h | 2 ++
+ 4 files changed, 50 insertions(+), 13 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
+index 077d3121cc9f1..c18cffafc9696 100644
+--- a/include/net/netfilter/nf_tables.h
++++ b/include/net/netfilter/nf_tables.h
+@@ -1860,6 +1860,11 @@ struct nft_trans_gc {
+ struct rcu_head rcu;
+ };
+
++static inline int nft_trans_gc_space(const struct nft_trans_gc *trans)
++{
++ return NFT_TRANS_GC_BATCHCOUNT - trans->count;
++}
++
+ static inline void nft_ctx_update(struct nft_ctx *ctx,
+ const struct nft_trans *trans)
+ {
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 7b357a2a871ed..a3865924a505d 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -10480,11 +10480,6 @@ static void nft_trans_gc_queue_work(struct nft_trans_gc *trans)
+ schedule_work(&trans_gc_work);
+ }
+
+-static int nft_trans_gc_space(struct nft_trans_gc *trans)
+-{
+- return NFT_TRANS_GC_BATCHCOUNT - trans->count;
+-}
+-
+ struct nft_trans_gc *nft_trans_gc_queue_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq, gfp_t gfp)
+ {
+diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
+index cd0d2d4ae36bf..d9b74d588c768 100644
+--- a/net/netfilter/nft_set_pipapo.c
++++ b/net/netfilter/nft_set_pipapo.c
+@@ -1681,11 +1681,11 @@ static void nft_pipapo_gc_deactivate(struct net *net, struct nft_set *set,
+ }
+
+ /**
+- * pipapo_gc() - Drop expired entries from set, destroy start and end elements
++ * pipapo_gc_scan() - Drop expired entries from set and link them to gc list
+ * @set: nftables API set representation
+ * @m: Matching data
+ */
+-static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
++static void pipapo_gc_scan(struct nft_set *set, struct nft_pipapo_match *m)
+ {
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct net *net = read_pnet(&set->net);
+@@ -1698,6 +1698,8 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ if (!gc)
+ return;
+
++ list_add(&gc->list, &priv->gc_head);
++
+ while ((rules_f0 = pipapo_rules_same_key(m->f, first_rule))) {
+ union nft_pipapo_map_bucket rulemap[NFT_PIPAPO_MAX_FIELDS];
+ const struct nft_pipapo_field *f;
+@@ -1725,9 +1727,13 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ * NFT_SET_ELEM_DEAD_BIT.
+ */
+ if (__nft_set_elem_expired(&e->ext, tstamp)) {
+- gc = nft_trans_gc_queue_sync(gc, GFP_KERNEL);
+- if (!gc)
+- return;
++ if (!nft_trans_gc_space(gc)) {
++ gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
++ if (!gc)
++ return;
++
++ list_add(&gc->list, &priv->gc_head);
++ }
+
+ nft_pipapo_gc_deactivate(net, set, e);
+ pipapo_drop(m, rulemap);
+@@ -1741,10 +1747,30 @@ static void pipapo_gc(struct nft_set *set, struct nft_pipapo_match *m)
+ }
+ }
+
+- gc = nft_trans_gc_catchall_sync(gc);
++ priv->last_gc = jiffies;
++}
++
++/**
++ * pipapo_gc_queue() - Free expired elements
++ * @set: nftables API set representation
++ */
++static void pipapo_gc_queue(struct nft_set *set)
++{
++ struct nft_pipapo *priv = nft_set_priv(set);
++ struct nft_trans_gc *gc, *next;
++
++ /* always do a catchall cycle: */
++ gc = nft_trans_gc_alloc(set, 0, GFP_KERNEL);
+ if (gc) {
++ gc = nft_trans_gc_catchall_sync(gc);
++ if (gc)
++ nft_trans_gc_queue_sync_done(gc);
++ }
++
++ /* always purge queued gc elements. */
++ list_for_each_entry_safe(gc, next, &priv->gc_head, list) {
++ list_del(&gc->list);
+ nft_trans_gc_queue_sync_done(gc);
+- priv->last_gc = jiffies;
+ }
+ }
+
+@@ -1798,6 +1824,10 @@ static void pipapo_reclaim_match(struct rcu_head *rcu)
+ *
+ * We also need to create a new working copy for subsequent insertions and
+ * deletions.
++ *
++ * After the live copy has been replaced by the clone, we can safely queue
++ * expired elements that have been collected by pipapo_gc_scan() for
++ * memory reclaim.
+ */
+ static void nft_pipapo_commit(struct nft_set *set)
+ {
+@@ -1808,7 +1838,7 @@ static void nft_pipapo_commit(struct nft_set *set)
+ return;
+
+ if (time_after_eq(jiffies, priv->last_gc + nft_set_gc_interval(set)))
+- pipapo_gc(set, priv->clone);
++ pipapo_gc_scan(set, priv->clone);
+
+ old = rcu_replace_pointer(priv->match, priv->clone,
+ nft_pipapo_transaction_mutex_held(set));
+@@ -1816,6 +1846,8 @@ static void nft_pipapo_commit(struct nft_set *set)
+
+ if (old)
+ call_rcu(&old->rcu, pipapo_reclaim_match);
++
++ pipapo_gc_queue(set);
+ }
+
+ static void nft_pipapo_abort(const struct nft_set *set)
+@@ -2280,6 +2312,7 @@ static int nft_pipapo_init(const struct nft_set *set,
+ f->mt = NULL;
+ }
+
++ INIT_LIST_HEAD(&priv->gc_head);
+ rcu_assign_pointer(priv->match, m);
+
+ return 0;
+@@ -2329,6 +2362,8 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx,
+ struct nft_pipapo *priv = nft_set_priv(set);
+ struct nft_pipapo_match *m;
+
++ WARN_ON_ONCE(!list_empty(&priv->gc_head));
++
+ m = rcu_dereference_protected(priv->match, true);
+
+ if (priv->clone) {
+diff --git a/net/netfilter/nft_set_pipapo.h b/net/netfilter/nft_set_pipapo.h
+index eaab422aa56ab..9aee9a9eaeb75 100644
+--- a/net/netfilter/nft_set_pipapo.h
++++ b/net/netfilter/nft_set_pipapo.h
+@@ -156,12 +156,14 @@ struct nft_pipapo_match {
+ * @clone: Copy where pending insertions and deletions are kept
+ * @width: Total bytes to be matched for one packet, including padding
+ * @last_gc: Timestamp of last garbage collection run, jiffies
++ * @gc_head: list of nft_trans_gc to queue up for mem reclaim
+ */
+ struct nft_pipapo {
+ struct nft_pipapo_match __rcu *match;
+ struct nft_pipapo_match *clone;
+ int width;
+ unsigned long last_gc;
++ struct list_head gc_head;
+ };
+
+ struct nft_pipapo_elem;
+--
+2.51.0
+
--- /dev/null
+From 90f6eb2199840ec9b370f602c87305a7288d7ad6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 13:32:33 +0000
+Subject: netfs: Fix unbuffered/DIO writes to dispatch subrequests in strict
+ sequence
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit a0b4c7a49137ed21279f354eb59f49ddae8dffc2 ]
+
+Fix netfslib such that when it's making an unbuffered or DIO write, to make
+sure that it sends each subrequest strictly sequentially, waiting till the
+previous one is 'committed' before sending the next so that we don't have
+pieces landing out of order and potentially leaving a hole if an error
+occurs (ENOSPC for example).
+
+This is done by copying in just those bits of issuing, collecting and
+retrying subrequests that are necessary to do one subrequest at a time.
+Retrying, in particular, is simpler because if the current subrequest needs
+retrying, the source iterator can just be copied again and the subrequest
+prepped and issued again without needing to be concerned about whether it
+needs merging with the previous or next in the sequence.
+
+Note that the issuing loop waits for a subrequest to complete right after
+issuing it, but this wait could be moved elsewhere allowing preparatory
+steps to be performed whilst the subrequest is in progress. In particular,
+once content encryption is available in netfslib, that could be done whilst
+waiting, as could cleanup of buffers that have been completed.
+
+Fixes: 153a9961b551 ("netfs: Implement unbuffered/DIO write support")
+Signed-off-by: David Howells <dhowells@redhat.com>
+Link: https://patch.msgid.link/58526.1772112753@warthog.procyon.org.uk
+Tested-by: Steve French <sfrench@samba.org>
+Reviewed-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/netfs/direct_write.c | 228 ++++++++++++++++++++++++++++++++---
+ fs/netfs/internal.h | 4 +-
+ fs/netfs/write_collect.c | 21 ----
+ fs/netfs/write_issue.c | 41 +------
+ include/trace/events/netfs.h | 4 +-
+ 5 files changed, 221 insertions(+), 77 deletions(-)
+
+diff --git a/fs/netfs/direct_write.c b/fs/netfs/direct_write.c
+index a9d1c3b2c0842..dd1451bf7543d 100644
+--- a/fs/netfs/direct_write.c
++++ b/fs/netfs/direct_write.c
+@@ -9,6 +9,202 @@
+ #include <linux/uio.h>
+ #include "internal.h"
+
++/*
++ * Perform the cleanup rituals after an unbuffered write is complete.
++ */
++static void netfs_unbuffered_write_done(struct netfs_io_request *wreq)
++{
++ struct netfs_inode *ictx = netfs_inode(wreq->inode);
++
++ _enter("R=%x", wreq->debug_id);
++
++ /* Okay, declare that all I/O is complete. */
++ trace_netfs_rreq(wreq, netfs_rreq_trace_write_done);
++
++ if (!wreq->error)
++ netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
++
++ if (wreq->origin == NETFS_DIO_WRITE &&
++ wreq->mapping->nrpages) {
++ /* mmap may have got underfoot and we may now have folios
++ * locally covering the region we just wrote. Attempt to
++ * discard the folios, but leave in place any modified locally.
++ * ->write_iter() is prevented from interfering by the DIO
++ * counter.
++ */
++ pgoff_t first = wreq->start >> PAGE_SHIFT;
++ pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
++
++ invalidate_inode_pages2_range(wreq->mapping, first, last);
++ }
++
++ if (wreq->origin == NETFS_DIO_WRITE)
++ inode_dio_end(wreq->inode);
++
++ _debug("finished");
++ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
++ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
++
++ if (wreq->iocb) {
++ size_t written = umin(wreq->transferred, wreq->len);
++
++ wreq->iocb->ki_pos += written;
++ if (wreq->iocb->ki_complete) {
++ trace_netfs_rreq(wreq, netfs_rreq_trace_ki_complete);
++ wreq->iocb->ki_complete(wreq->iocb, wreq->error ?: written);
++ }
++ wreq->iocb = VFS_PTR_POISON;
++ }
++
++ netfs_clear_subrequests(wreq);
++}
++
++/*
++ * Collect the subrequest results of unbuffered write subrequests.
++ */
++static void netfs_unbuffered_write_collect(struct netfs_io_request *wreq,
++ struct netfs_io_stream *stream,
++ struct netfs_io_subrequest *subreq)
++{
++ trace_netfs_collect_sreq(wreq, subreq);
++
++ spin_lock(&wreq->lock);
++ list_del_init(&subreq->rreq_link);
++ spin_unlock(&wreq->lock);
++
++ wreq->transferred += subreq->transferred;
++ iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
++
++ stream->collected_to = subreq->start + subreq->transferred;
++ wreq->collected_to = stream->collected_to;
++ netfs_put_subrequest(subreq, netfs_sreq_trace_put_done);
++
++ trace_netfs_collect_stream(wreq, stream);
++ trace_netfs_collect_state(wreq, wreq->collected_to, 0);
++}
++
++/*
++ * Write data to the server without going through the pagecache and without
++ * writing it to the local cache. We dispatch the subrequests serially and
++ * wait for each to complete before dispatching the next, lest we leave a gap
++ * in the data written due to a failure such as ENOSPC. We could, however
++ * attempt to do preparation such as content encryption for the next subreq
++ * whilst the current is in progress.
++ */
++static int netfs_unbuffered_write(struct netfs_io_request *wreq)
++{
++ struct netfs_io_subrequest *subreq = NULL;
++ struct netfs_io_stream *stream = &wreq->io_streams[0];
++ int ret;
++
++ _enter("%llx", wreq->len);
++
++ if (wreq->origin == NETFS_DIO_WRITE)
++ inode_dio_begin(wreq->inode);
++
++ stream->collected_to = wreq->start;
++
++ for (;;) {
++ bool retry = false;
++
++ if (!subreq) {
++ netfs_prepare_write(wreq, stream, wreq->start + wreq->transferred);
++ subreq = stream->construct;
++ stream->construct = NULL;
++ stream->front = NULL;
++ }
++
++ /* Check if (re-)preparation failed. */
++ if (unlikely(test_bit(NETFS_SREQ_FAILED, &subreq->flags))) {
++ netfs_write_subrequest_terminated(subreq, subreq->error);
++ wreq->error = subreq->error;
++ break;
++ }
++
++ iov_iter_truncate(&subreq->io_iter, wreq->len - wreq->transferred);
++ if (!iov_iter_count(&subreq->io_iter))
++ break;
++
++ subreq->len = netfs_limit_iter(&subreq->io_iter, 0,
++ stream->sreq_max_len,
++ stream->sreq_max_segs);
++ iov_iter_truncate(&subreq->io_iter, subreq->len);
++ stream->submit_extendable_to = subreq->len;
++
++ trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
++ stream->issue_write(subreq);
++
++ /* Async, need to wait. */
++ netfs_wait_for_in_progress_stream(wreq, stream);
++
++ if (test_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags)) {
++ retry = true;
++ } else if (test_bit(NETFS_SREQ_FAILED, &subreq->flags)) {
++ ret = subreq->error;
++ wreq->error = ret;
++ netfs_see_subrequest(subreq, netfs_sreq_trace_see_failed);
++ subreq = NULL;
++ break;
++ }
++ ret = 0;
++
++ if (!retry) {
++ netfs_unbuffered_write_collect(wreq, stream, subreq);
++ subreq = NULL;
++ if (wreq->transferred >= wreq->len)
++ break;
++ if (!wreq->iocb && signal_pending(current)) {
++ ret = wreq->transferred ? -EINTR : -ERESTARTSYS;
++ trace_netfs_rreq(wreq, netfs_rreq_trace_intr);
++ break;
++ }
++ continue;
++ }
++
++ /* We need to retry the last subrequest, so first reset the
++ * iterator, taking into account what, if anything, we managed
++ * to transfer.
++ */
++ subreq->error = -EAGAIN;
++ trace_netfs_sreq(subreq, netfs_sreq_trace_retry);
++ if (subreq->transferred > 0)
++ iov_iter_advance(&wreq->buffer.iter, subreq->transferred);
++
++ if (stream->source == NETFS_UPLOAD_TO_SERVER &&
++ wreq->netfs_ops->retry_request)
++ wreq->netfs_ops->retry_request(wreq, stream);
++
++ __clear_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
++ __clear_bit(NETFS_SREQ_BOUNDARY, &subreq->flags);
++ __clear_bit(NETFS_SREQ_FAILED, &subreq->flags);
++ subreq->io_iter = wreq->buffer.iter;
++ subreq->start = wreq->start + wreq->transferred;
++ subreq->len = wreq->len - wreq->transferred;
++ subreq->transferred = 0;
++ subreq->retry_count += 1;
++ stream->sreq_max_len = UINT_MAX;
++ stream->sreq_max_segs = INT_MAX;
++
++ netfs_get_subrequest(subreq, netfs_sreq_trace_get_resubmit);
++ stream->prepare_write(subreq);
++
++ __set_bit(NETFS_SREQ_IN_PROGRESS, &subreq->flags);
++ netfs_stat(&netfs_n_wh_retry_write_subreq);
++ }
++
++ netfs_unbuffered_write_done(wreq);
++ _leave(" = %d", ret);
++ return ret;
++}
++
++static void netfs_unbuffered_write_async(struct work_struct *work)
++{
++ struct netfs_io_request *wreq = container_of(work, struct netfs_io_request, work);
++
++ netfs_unbuffered_write(wreq);
++ netfs_put_request(wreq, netfs_rreq_trace_put_complete);
++}
++
+ /*
+ * Perform an unbuffered write where we may have to do an RMW operation on an
+ * encrypted file. This can also be used for direct I/O writes.
+@@ -70,35 +266,35 @@ ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *
+ */
+ wreq->buffer.iter = *iter;
+ }
++
++ wreq->len = iov_iter_count(&wreq->buffer.iter);
+ }
+
+ __set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
+- if (async)
+- __set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &wreq->flags);
+
+ /* Copy the data into the bounce buffer and encrypt it. */
+ // TODO
+
+ /* Dispatch the write. */
+ __set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
+- if (async)
+- wreq->iocb = iocb;
+- wreq->len = iov_iter_count(&wreq->buffer.iter);
+- ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), wreq->len);
+- if (ret < 0) {
+- _debug("begin = %zd", ret);
+- goto out;
+- }
+
+- if (!async) {
+- ret = netfs_wait_for_write(wreq);
+- if (ret > 0)
+- iocb->ki_pos += ret;
+- } else {
++ if (async) {
++ INIT_WORK(&wreq->work, netfs_unbuffered_write_async);
++ wreq->iocb = iocb;
++ queue_work(system_dfl_wq, &wreq->work);
+ ret = -EIOCBQUEUED;
++ } else {
++ ret = netfs_unbuffered_write(wreq);
++ if (ret < 0) {
++ _debug("begin = %zd", ret);
++ } else {
++ iocb->ki_pos += wreq->transferred;
++ ret = wreq->transferred ?: wreq->error;
++ }
++
++ netfs_put_request(wreq, netfs_rreq_trace_put_complete);
+ }
+
+-out:
+ netfs_put_request(wreq, netfs_rreq_trace_put_return);
+ return ret;
+
+diff --git a/fs/netfs/internal.h b/fs/netfs/internal.h
+index 4319611f53544..d436e20d34185 100644
+--- a/fs/netfs/internal.h
++++ b/fs/netfs/internal.h
+@@ -198,6 +198,9 @@ struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
+ struct file *file,
+ loff_t start,
+ enum netfs_io_origin origin);
++void netfs_prepare_write(struct netfs_io_request *wreq,
++ struct netfs_io_stream *stream,
++ loff_t start);
+ void netfs_reissue_write(struct netfs_io_stream *stream,
+ struct netfs_io_subrequest *subreq,
+ struct iov_iter *source);
+@@ -212,7 +215,6 @@ int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_c
+ struct folio **writethrough_cache);
+ ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
+ struct folio *writethrough_cache);
+-int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
+
+ /*
+ * write_retry.c
+diff --git a/fs/netfs/write_collect.c b/fs/netfs/write_collect.c
+index 61eab34ea67ef..83eb3dc1adf8a 100644
+--- a/fs/netfs/write_collect.c
++++ b/fs/netfs/write_collect.c
+@@ -399,27 +399,6 @@ bool netfs_write_collection(struct netfs_io_request *wreq)
+ ictx->ops->invalidate_cache(wreq);
+ }
+
+- if ((wreq->origin == NETFS_UNBUFFERED_WRITE ||
+- wreq->origin == NETFS_DIO_WRITE) &&
+- !wreq->error)
+- netfs_update_i_size(ictx, &ictx->inode, wreq->start, wreq->transferred);
+-
+- if (wreq->origin == NETFS_DIO_WRITE &&
+- wreq->mapping->nrpages) {
+- /* mmap may have got underfoot and we may now have folios
+- * locally covering the region we just wrote. Attempt to
+- * discard the folios, but leave in place any modified locally.
+- * ->write_iter() is prevented from interfering by the DIO
+- * counter.
+- */
+- pgoff_t first = wreq->start >> PAGE_SHIFT;
+- pgoff_t last = (wreq->start + wreq->transferred - 1) >> PAGE_SHIFT;
+- invalidate_inode_pages2_range(wreq->mapping, first, last);
+- }
+-
+- if (wreq->origin == NETFS_DIO_WRITE)
+- inode_dio_end(wreq->inode);
+-
+ _debug("finished");
+ netfs_wake_rreq_flag(wreq, NETFS_RREQ_IN_PROGRESS, netfs_rreq_trace_wake_ip);
+ /* As we cleared NETFS_RREQ_IN_PROGRESS, we acquired its ref. */
+diff --git a/fs/netfs/write_issue.c b/fs/netfs/write_issue.c
+index 34894da5a23ec..437268f656409 100644
+--- a/fs/netfs/write_issue.c
++++ b/fs/netfs/write_issue.c
+@@ -154,9 +154,9 @@ EXPORT_SYMBOL(netfs_prepare_write_failed);
+ * Prepare a write subrequest. We need to allocate a new subrequest
+ * if we don't have one.
+ */
+-static void netfs_prepare_write(struct netfs_io_request *wreq,
+- struct netfs_io_stream *stream,
+- loff_t start)
++void netfs_prepare_write(struct netfs_io_request *wreq,
++ struct netfs_io_stream *stream,
++ loff_t start)
+ {
+ struct netfs_io_subrequest *subreq;
+ struct iov_iter *wreq_iter = &wreq->buffer.iter;
+@@ -698,41 +698,6 @@ ssize_t netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_c
+ return ret;
+ }
+
+-/*
+- * Write data to the server without going through the pagecache and without
+- * writing it to the local cache.
+- */
+-int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len)
+-{
+- struct netfs_io_stream *upload = &wreq->io_streams[0];
+- ssize_t part;
+- loff_t start = wreq->start;
+- int error = 0;
+-
+- _enter("%zx", len);
+-
+- if (wreq->origin == NETFS_DIO_WRITE)
+- inode_dio_begin(wreq->inode);
+-
+- while (len) {
+- // TODO: Prepare content encryption
+-
+- _debug("unbuffered %zx", len);
+- part = netfs_advance_write(wreq, upload, start, len, false);
+- start += part;
+- len -= part;
+- rolling_buffer_advance(&wreq->buffer, part);
+- if (test_bit(NETFS_RREQ_PAUSE, &wreq->flags))
+- netfs_wait_for_paused_write(wreq);
+- if (test_bit(NETFS_RREQ_FAILED, &wreq->flags))
+- break;
+- }
+-
+- netfs_end_issue_write(wreq);
+- _leave(" = %d", error);
+- return error;
+-}
+-
+ /*
+ * Write some of a pending folio data back to the server and/or the cache.
+ */
+diff --git a/include/trace/events/netfs.h b/include/trace/events/netfs.h
+index 64a382fbc31a8..2d366be46a1c3 100644
+--- a/include/trace/events/netfs.h
++++ b/include/trace/events/netfs.h
+@@ -57,6 +57,7 @@
+ EM(netfs_rreq_trace_done, "DONE ") \
+ EM(netfs_rreq_trace_end_copy_to_cache, "END-C2C") \
+ EM(netfs_rreq_trace_free, "FREE ") \
++ EM(netfs_rreq_trace_intr, "INTR ") \
+ EM(netfs_rreq_trace_ki_complete, "KI-CMPL") \
+ EM(netfs_rreq_trace_recollect, "RECLLCT") \
+ EM(netfs_rreq_trace_redirty, "REDIRTY") \
+@@ -169,7 +170,8 @@
+ EM(netfs_sreq_trace_put_oom, "PUT OOM ") \
+ EM(netfs_sreq_trace_put_wip, "PUT WIP ") \
+ EM(netfs_sreq_trace_put_work, "PUT WORK ") \
+- E_(netfs_sreq_trace_put_terminated, "PUT TERM ")
++ EM(netfs_sreq_trace_put_terminated, "PUT TERM ") \
++ E_(netfs_sreq_trace_see_failed, "SEE FAILED ")
+
+ #define netfs_folio_traces \
+ EM(netfs_folio_is_uptodate, "mod-uptodate") \
+--
+2.51.0
+
--- /dev/null
+From 16f4267f9764c584fb63e04e8a77d1ab32b00b59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:44 -0800
+Subject: nfc: nci: clear NCI_DATA_EXCHANGE before calling completion callback
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0efdc02f4f6d52f8ca5d5889560f325a836ce0a8 ]
+
+Move clear_bit(NCI_DATA_EXCHANGE) before invoking the data exchange
+callback in nci_data_exchange_complete().
+
+The callback (e.g. rawsock_data_exchange_complete) may immediately
+schedule another data exchange via schedule_work(tx_work). On a
+multi-CPU system, tx_work can run and reach nci_transceive() before
+the current nci_data_exchange_complete() clears the flag, causing
+test_and_set_bit(NCI_DATA_EXCHANGE) to return -EBUSY and the new
+transfer to fail.
+
+This causes intermittent flakes in nci/nci_dev in NIPA:
+
+ # # RUN NCI.NCI1_0.t4t_tag_read ...
+ # # t4t_tag_read: Test terminated by timeout
+ # # FAIL NCI.NCI1_0.t4t_tag_read
+ # not ok 3 NCI.NCI1_0.t4t_tag_read
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-5-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 78f4131af3cf3..5f98c73db5afd 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -33,7 +33,8 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+- goto exit;
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++ return;
+ }
+
+ cb = conn_info->data_exchange_cb;
+@@ -45,6 +46,12 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ timer_delete_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
++ /* Mark the exchange as done before calling the callback.
++ * The callback (e.g. rawsock_data_exchange_complete) may
++ * want to immediately queue another data exchange.
++ */
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++
+ if (cb) {
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+@@ -54,9 +61,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+-
+-exit:
+- clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+ }
+
+ /* ----------------- NCI TX Data ----------------- */
+--
+2.51.0
+
--- /dev/null
+From cf83bfc592025d5e4272abc62ab3203b83de027f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:43 -0800
+Subject: nfc: nci: complete pending data exchange on device close
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 66083581945bd5b8e99fe49b5aeb83d03f62d053 ]
+
+In nci_close_device(), complete any pending data exchange before
+closing. The data exchange callback (e.g.
+rawsock_data_exchange_complete) holds a socket reference.
+
+NIPA occasionally hits this leak:
+
+unreferenced object 0xff1100000f435000 (size 2048):
+ comm "nci_dev", pid 3954, jiffies 4295441245
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 27 00 01 40 00 00 00 00 00 00 00 00 00 00 00 00 '..@............
+ backtrace (crc ec2b3c5):
+ __kmalloc_noprof+0x4db/0x730
+ sk_prot_alloc.isra.0+0xe4/0x1d0
+ sk_alloc+0x36/0x760
+ rawsock_create+0xd1/0x540
+ nfc_sock_create+0x11f/0x280
+ __sock_create+0x22d/0x630
+ __sys_socket+0x115/0x1d0
+ __x64_sys_socket+0x72/0xd0
+ do_syscall_64+0x117/0xfc0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-4-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index f6dc0a94b8d54..d334b7aa8c172 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -567,6 +567,10 @@ static int nci_close_device(struct nci_dev *ndev)
+ flush_workqueue(ndev->cmd_wq);
+ timer_delete_sync(&ndev->cmd_timer);
+ timer_delete_sync(&ndev->data_timer);
++ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ nci_data_exchange_complete(ndev, NULL,
++ ndev->cur_conn_id,
++ -ENODEV);
+ mutex_unlock(&ndev->req_lock);
+ return 0;
+ }
+@@ -598,6 +602,11 @@ static int nci_close_device(struct nci_dev *ndev)
+ flush_workqueue(ndev->cmd_wq);
+
+ timer_delete_sync(&ndev->cmd_timer);
++ timer_delete_sync(&ndev->data_timer);
++
++ if (test_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ nci_data_exchange_complete(ndev, NULL, ndev->cur_conn_id,
++ -ENODEV);
+
+ /* Clear flags except NCI_UNREG */
+ ndev->flags &= BIT(NCI_UNREG);
+--
+2.51.0
+
--- /dev/null
+From 85e16d00d5979c454abda6c9911dd2f1a0b0348c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:41 -0800
+Subject: nfc: nci: free skb on nci_transceive early error paths
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7bd4b0c4779f978a6528c9b7937d2ca18e936e2c ]
+
+nci_transceive() takes ownership of the skb passed by the caller,
+but the -EPROTO, -EINVAL, and -EBUSY error paths return without
+freeing it.
+
+Due to issues clearing NCI_DATA_EXCHANGE fixed by subsequent changes
+the nci/nci_dev selftest hits the error path occasionally in NIPA,
+and kmemleak detects leaks:
+
+unreferenced object 0xff11000015ce6a40 (size 640):
+ comm "nci_dev", pid 3954, jiffies 4295441246
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 00 a4 00 0c 02 e1 03 6b 6b 6b 6b 6b kkkk.......kkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ backtrace (crc 7c40cc2a):
+ kmem_cache_alloc_node_noprof+0x492/0x630
+ __alloc_skb+0x11e/0x5f0
+ alloc_skb_with_frags+0xc6/0x8f0
+ sock_alloc_send_pskb+0x326/0x3f0
+ nfc_alloc_send_skb+0x94/0x1d0
+ rawsock_sendmsg+0x162/0x4c0
+ do_syscall_64+0x117/0xfc0
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 46681bdaeabff..f6dc0a94b8d54 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1035,18 +1035,23 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+- if (!conn_info)
++ if (!conn_info) {
++ kfree_skb(skb);
+ return -EPROTO;
++ }
+
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ pr_err("unable to exchange data, no active target\n");
++ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+- if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) {
++ kfree_skb(skb);
+ return -EBUSY;
++ }
+
+ /* store cb and context to be used on receiving data */
+ conn_info->data_exchange_cb = cb;
+--
+2.51.0
+
--- /dev/null
+From 0b05c2c8616316fa8055786b545e12262d09c32d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:45 -0800
+Subject: nfc: rawsock: cancel tx_work before socket teardown
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit d793458c45df2aed498d7f74145eab7ee22d25aa ]
+
+In rawsock_release(), cancel any pending tx_work and purge the write
+queue before orphaning the socket. rawsock_tx_work runs on the system
+workqueue and calls nfc_data_exchange which dereferences the NCI
+device. Without synchronization, tx_work can race with socket and
+device teardown when a process is killed (e.g. by SIGKILL), leading
+to use-after-free or leaked references.
+
+Set SEND_SHUTDOWN first so that if tx_work is already running it will
+see the flag and skip transmitting, then use cancel_work_sync to wait
+for any in-progress execution to finish, and finally purge any
+remaining queued skbs.
+
+Fixes: 23b7869c0fd0 ("NFC: add the NFC socket raw protocol")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-6-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/rawsock.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index b049022399aea..f7d7a599fade7 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -67,6 +67,17 @@ static int rawsock_release(struct socket *sock)
+ if (sock->type == SOCK_RAW)
+ nfc_sock_unlink(&raw_sk_list, sk);
+
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* Prevent rawsock_tx_work from starting new transmits and
++ * wait for any in-progress work to finish. This must happen
++ * before the socket is orphaned to avoid a race where
++ * rawsock_tx_work runs after the NCI device has been freed.
++ */
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ cancel_work_sync(&nfc_rawsock(sk)->tx_work);
++ rawsock_write_queue_purge(sk);
++ }
++
+ sock_orphan(sk);
+ sock_put(sk);
+
+--
+2.51.0
+
--- /dev/null
+From 66e89b96cacf2d2cff3e650f0d1404ac1ad157dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 31 Jan 2026 22:48:08 +0800
+Subject: nvme: fix admin queue leak on controller reset
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit b84bb7bd913d8ca2f976ee6faf4a174f91c02b8d ]
+
+When nvme_alloc_admin_tag_set() is called during a controller reset,
+a previous admin queue may still exist. Release it properly before
+allocating a new one to avoid orphaning the old queue.
+
+This fixes a regression introduced by commit 03b3bcd319b3 ("nvme: fix
+admin request_queue lifetime").
+
+Cc: Keith Busch <kbusch@kernel.org>
+Fixes: 03b3bcd319b3 ("nvme: fix admin request_queue lifetime").
+Reported-and-tested-by: Yi Zhang <yi.zhang@redhat.com>
+Closes: https://lore.kernel.org/linux-block/CAHj4cs9wv3SdPo+N01Fw2SHBYDs9tj2M_e1-GdQOkRy=DsBB1w@mail.gmail.com/
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 7bf228df6001f..3fdcd73b95468 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -4864,6 +4864,13 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
+ if (ret)
+ return ret;
+
++ /*
++ * If a previous admin queue exists (e.g., from before a reset),
++ * put it now before allocating a new one to avoid orphaning it.
++ */
++ if (ctrl->admin_q)
++ blk_put_queue(ctrl->admin_q);
++
+ ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
+ if (IS_ERR(ctrl->admin_q)) {
+ ret = PTR_ERR(ctrl->admin_q);
+--
+2.51.0
+
--- /dev/null
+From 41767b9a6fd59b69ed20b6669075e781955f4f43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 19:19:28 -0500
+Subject: nvme: fix memory allocation in nvme_pr_read_keys()
+
+From: Sungwoo Kim <iam@sung-woo.kim>
+
+[ Upstream commit c3320153769f05fd7fe9d840cb555dd3080ae424 ]
+
+nvme_pr_read_keys() takes num_keys from userspace and uses it to
+calculate the allocation size for rse via struct_size(). The upper
+limit is PR_KEYS_MAX (64K).
+
+A malicious or buggy userspace can pass a large num_keys value that
+results in a 4MB allocation attempt at most, causing a warning in
+the page allocator when the order exceeds MAX_PAGE_ORDER.
+
+To fix this, use kvzalloc() instead of kzalloc().
+
+This bug has the same reasoning and fix with the patch below:
+https://lore.kernel.org/linux-block/20251212013510.3576091-1-kartikey406@gmail.com/
+
+Warning log:
+WARNING: mm/page_alloc.c:5216 at __alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216, CPU#1: syz-executor117/272
+Modules linked in:
+CPU: 1 UID: 0 PID: 272 Comm: syz-executor117 Not tainted 6.19.0 #1 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+RIP: 0010:__alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216
+Code: ff 83 bd a8 fe ff ff 0a 0f 86 69 fb ff ff 0f b6 1d f9 f9 c4 04 80 fb 01 0f 87 3b 76 30 ff 83 e3 01 75 09 c6 05 e4 f9 c4 04 01 <0f> 0b 48 c7 85 70 fe ff ff 00 00 00 00 e9 8f fd ff ff 31 c0 e9 0d
+RSP: 0018:ffffc90000fcf450 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 1ffff920001f9ea0
+RDX: 0000000000000000 RSI: 000000000000000b RDI: 0000000000040dc0
+RBP: ffffc90000fcf648 R08: ffff88800b6c3380 R09: 0000000000000001
+R10: ffffc90000fcf840 R11: ffff88807ffad280 R12: 0000000000000000
+R13: 0000000000040dc0 R14: 0000000000000001 R15: ffffc90000fcf620
+FS: 0000555565db33c0(0000) GS:ffff8880be26c000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000000002000000c CR3: 0000000003b72000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ alloc_pages_mpol+0x236/0x4d0 mm/mempolicy.c:2486
+ alloc_frozen_pages_noprof+0x149/0x180 mm/mempolicy.c:2557
+ ___kmalloc_large_node+0x10c/0x140 mm/slub.c:5598
+ __kmalloc_large_node_noprof+0x25/0xc0 mm/slub.c:5629
+ __do_kmalloc_node mm/slub.c:5645 [inline]
+ __kmalloc_noprof+0x483/0x6f0 mm/slub.c:5669
+ kmalloc_noprof include/linux/slab.h:961 [inline]
+ kzalloc_noprof include/linux/slab.h:1094 [inline]
+ nvme_pr_read_keys+0x8f/0x4c0 drivers/nvme/host/pr.c:245
+ blkdev_pr_read_keys block/ioctl.c:456 [inline]
+ blkdev_common_ioctl+0x1b71/0x29b0 block/ioctl.c:730
+ blkdev_ioctl+0x299/0x700 block/ioctl.c:786
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:597 [inline]
+ __se_sys_ioctl fs/ioctl.c:583 [inline]
+ __x64_sys_ioctl+0x1bf/0x220 fs/ioctl.c:583
+ x64_sys_call+0x1280/0x21b0 mnt/fuzznvme_1/fuzznvme/linux-build/v6.19/./arch/x86/include/generated/asm/syscalls_64.h:17
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x71/0x330 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7fb893d3108d
+Code: 28 c3 e8 46 1e 00 00 66 0f 1f 44 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007ffff61f2f38 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007ffff61f3138 RCX: 00007fb893d3108d
+RDX: 0000000020000040 RSI: 00000000c01070ce RDI: 0000000000000003
+RBP: 0000000000000001 R08: 0000000000000000 R09: 00007ffff61f3138
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001
+R13: 00007ffff61f3128 R14: 00007fb893dae530 R15: 0000000000000001
+ </TASK>
+
+Fixes: 5fd96a4e15de (nvme: Add pr_ops read_keys support)
+Acked-by: Chao Shi <cshi008@fiu.edu>
+Acked-by: Weidong Zhu <weizhu@fiu.edu>
+Acked-by: Dave Tian <daveti@purdue.edu>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Sungwoo Kim <iam@sung-woo.kim>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index ad2ecc2f49a97..fe7dbe2648158 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -242,7 +242,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ if (rse_len > U32_MAX)
+ return -EINVAL;
+
+- rse = kzalloc(rse_len, GFP_KERNEL);
++ rse = kvzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+@@ -267,7 +267,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ }
+
+ free_rse:
+- kfree(rse);
++ kvfree(rse);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From a5e5e4e05d1306ddc47db2e1c5307255084fe110 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 11:38:05 -0800
+Subject: nvme-multipath: fix leak on try_module_get failure
+
+From: Keith Busch <kbusch@kernel.org>
+
+[ Upstream commit 0f5197ea9a73a4c406c75e6d8af3a13f7f96ae89 ]
+
+We need to fall back to the synchronous removal if we can't get a
+reference on the module needed for the deferred removal.
+
+Fixes: 62188639ec16 ("nvme-multipath: introduce delayed removal of the multipath head node")
+Reviewed-by: Nilay Shroff <nilay@linux.ibm.com>
+Reviewed-by: John Garry <john.g.garry@oracle.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/multipath.c | 12 +++++-------
+ 1 file changed, 5 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 174027d1cc198..5e41fbaf5f46a 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -1310,13 +1310,11 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
+ if (!list_empty(&head->list))
+ goto out;
+
+- if (head->delayed_removal_secs) {
+- /*
+- * Ensure that no one could remove this module while the head
+- * remove work is pending.
+- */
+- if (!try_module_get(THIS_MODULE))
+- goto out;
++ /*
++ * Ensure that no one could remove this module while the head
++ * remove work is pending.
++ */
++ if (head->delayed_removal_secs && try_module_get(THIS_MODULE)) {
+ mod_delayed_work(nvme_wq, &head->remove_work,
+ head->delayed_removal_secs * HZ);
+ } else {
+--
+2.51.0
+
--- /dev/null
+From 89e483035e893edb3963673da1d1be4c6d2a1c2c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Dec 2025 12:26:13 -0800
+Subject: nvmet-fcloop: Check remoteport port_state before calling done
+ callback
+
+From: Justin Tee <justintee8345@gmail.com>
+
+[ Upstream commit dd677d0598387ea623820ab2bd0e029c377445a3 ]
+
+In nvme_fc_handle_ls_rqst_work, the lsrsp->done callback is only set when
+remoteport->port_state is FC_OBJSTATE_ONLINE. Otherwise, the
+nvme_fc_xmt_ls_rsp's LLDD call to lport->ops->xmt_ls_rsp is expected to
+fail and the nvme-fc transport layer itself will directly call
+nvme_fc_xmt_ls_rsp_free instead of relying on LLDD's done callback to free
+the lsrsp resources.
+
+Update the fcloop_t2h_xmt_ls_rsp routine to check remoteport->port_state.
+If online, then lsrsp->done callback will free the lsrsp. Else, return
+-ENODEV to signal the nvme-fc transport to handle freeing lsrsp.
+
+Cc: Ewan D. Milne <emilne@redhat.com>
+Tested-by: Aristeu Rozanski <aris@redhat.com>
+Acked-by: Aristeu Rozanski <aris@redhat.com>
+Reviewed-by: Daniel Wagner <dwagner@suse.de>
+Closes: https://lore.kernel.org/linux-nvme/21255200-a271-4fa0-b099-97755c8acd4c@work/
+Fixes: 10c165af35d2 ("nvmet-fcloop: call done callback even when remote port is gone")
+Signed-off-by: Justin Tee <justintee8345@gmail.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/fcloop.c | 15 +++++++++++----
+ 1 file changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
+index c30e9a3e014fb..38bd2db3d6bbe 100644
+--- a/drivers/nvme/target/fcloop.c
++++ b/drivers/nvme/target/fcloop.c
+@@ -491,6 +491,7 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ struct fcloop_rport *rport = remoteport->private;
+ struct nvmet_fc_target_port *targetport = rport->targetport;
+ struct fcloop_tport *tport;
++ int ret = 0;
+
+ if (!targetport) {
+ /*
+@@ -500,12 +501,18 @@ fcloop_t2h_xmt_ls_rsp(struct nvme_fc_local_port *localport,
+ * We end up here from delete association exchange:
+ * nvmet_fc_xmt_disconnect_assoc sends an async request.
+ *
+- * Return success because this is what LLDDs do; silently
+- * drop the response.
++ * Return success when remoteport is still online because this
++ * is what LLDDs do and silently drop the response. Otherwise,
++ * return with error to signal upper layer to perform the lsrsp
++ * resource cleanup.
+ */
+- lsrsp->done(lsrsp);
++ if (remoteport->port_state == FC_OBJSTATE_ONLINE)
++ lsrsp->done(lsrsp);
++ else
++ ret = -ENODEV;
++
+ kmem_cache_free(lsreq_cache, tls_req);
+- return 0;
++ return ret;
+ }
+
+ memcpy(lsreq->rspaddr, lsrsp->rspbuf,
+--
+2.51.0
+
--- /dev/null
+From 720aab36f8b8bc89bc408f482932f3c55c4e469e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:58 +0000
+Subject: octeon_ep: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 43b3160cb639079a15daeb5f080120afbfbfc918 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-3-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 21 +++++++++------
+ .../net/ethernet/marvell/octeon_ep/octep_rx.c | 27 +++++++++++++------
+ 2 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 7f8ed8f0ade49..16f52d4b11e91 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -562,17 +562,22 @@ static void octep_clean_irqs(struct octep_device *oct)
+ */
+ static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index f2a7c6a76c742..74de19166488f 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -324,10 +324,16 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
+
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+@@ -338,8 +344,9 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -414,7 +421,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ u16 rx_ol_flags;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -499,7 +506,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
+@@ -522,22 +529,26 @@ int octep_oq_process_rx(struct octep_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From f15a310cc6f65fc81fedec25e2528c946ede0085 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:57 +0000
+Subject: octeon_ep: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 18c04a808c436d629d5812ce883e3822a5f5a47f ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion,
+and replace napi_complete with napi_complete_done.
+
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+napi_complete_done (vs napi_complete) allows for better
+interrupt coalescing.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-2-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 57db7ea2f5be9..7f8ed8f0ade49 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -555,12 +555,12 @@ static void octep_clean_irqs(struct octep_device *oct)
+ }
+
+ /**
+- * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -576,7 +576,17 @@ static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+- wmb();
++ smp_wmb();
++}
++
++/**
++ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++{
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -602,7 +612,8 @@ static int octep_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+- napi_complete(napi);
++ octep_update_pkt(ioq_vector->iq, ioq_vector->oq);
++ napi_complete_done(napi, rx_done);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+ }
+--
+2.51.0
+
--- /dev/null
+From 34f4a6d8d7a29e09743832bbb17a4730b0f97eae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:14:00 +0000
+Subject: octeon_ep_vf: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 6c73126ecd1080351b468fe43353b2f705487f44 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 1cd3b407977c3 ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-5-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeon_ep_vf/octep_vf_main.c | 21 ++++++++------
+ .../marvell/octeon_ep_vf/octep_vf_rx.c | 28 +++++++++++++------
+ 2 files changed, 33 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 17efc8eab4cfb..a3c359124887e 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -294,17 +294,22 @@ static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+
+ static void octep_vf_update_pkt(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+index 6f865dbbba6c6..b579d5b545c46 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_rx.c
+@@ -325,9 +325,16 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ struct octep_vf_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
++
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+@@ -339,8 +346,9 @@ static int octep_vf_oq_check_hw_for_pkts(struct octep_vf_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -369,7 +377,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ struct sk_buff *skb;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -463,7 +471,7 @@ static int __octep_vf_oq_process_rx(struct octep_vf_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats->packets += pkt;
+ oq->stats->bytes += rx_bytes;
+@@ -486,22 +494,26 @@ int octep_vf_oq_process_rx(struct octep_vf_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_vf_device *oct = oq->octep_vf_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_vf_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_vf_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 79af5349a7544c5b9c58c1a3901083f7acddd1eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:59 +0000
+Subject: octeon_ep_vf: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 2ae7d20fb24f598f60faa8f6ecc856dac782261a ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion.
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+
+Fixes: 1cd3b407977c3 ("octeon_ep_vf: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-4-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../marvell/octeon_ep_vf/octep_vf_main.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+index 1d9760b4b8f47..17efc8eab4cfb 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep_vf/octep_vf_main.c
+@@ -286,12 +286,13 @@ static void octep_vf_clean_irqs(struct octep_vf_device *oct)
+ }
+
+ /**
+- * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_vf_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
++
++static void octep_vf_update_pkt(struct octep_vf_iq *iq, struct octep_vf_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -308,6 +309,17 @@ static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq, struct octep_vf_oq *
+
+ /* Flush the previous wrties before writing to RESEND bit */
+ smp_wmb();
++}
++
++/**
++ * octep_vf_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_vf_enable_ioq_irq(struct octep_vf_iq *iq,
++ struct octep_vf_oq *oq)
++{
+ writeq(1UL << OCTEP_VF_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_VF_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -333,6 +345,7 @@ static int octep_vf_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
++ octep_vf_update_pkt(ioq_vector->iq, ioq_vector->oq);
+ if (likely(napi_complete_done(napi, rx_done)))
+ octep_vf_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+
+--
+2.51.0
+
--- /dev/null
+From 70a74d538d27371c3fc7f4790c937398330e6096 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 17:39:07 +0800
+Subject: pinctrl: cirrus: cs42l43: Fix double-put in cs42l43_pin_probe()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fd5bed798f45eb3a178ad527b43ab92705faaf8a ]
+
+devm_add_action_or_reset() already invokes the action on failure,
+so the explicit put causes a double-put.
+
+Fixes: 9b07cdf86a0b ("pinctrl: cirrus: Fix fwnode leak in cs42l43_pin_probe()")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/cirrus/pinctrl-cs42l43.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+index a8f82104a3842..227c37c360e19 100644
+--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
++++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+@@ -574,10 +574,9 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
+ if (child) {
+ ret = devm_add_action_or_reset(&pdev->dev,
+ cs42l43_fwnode_put, child);
+- if (ret) {
+- fwnode_handle_put(child);
++ if (ret)
+ return ret;
+- }
++
+ if (!child->dev)
+ child->dev = priv->dev;
+ fwnode = child;
+--
+2.51.0
+
--- /dev/null
+From 9685a60c94bbabbc62a1c410343bb34512dfd08b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:46 +0100
+Subject: pinctrl: equilibrium: fix warning trace on load
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 3e00b1b332e54ba50cca6691f628b9c06574024f ]
+
+The callback functions 'eqbr_irq_mask()' and 'eqbr_irq_ack()' are also
+called in the callback function 'eqbr_irq_mask_ack()'. This is done to
+avoid source code duplication. The problem, is that in the function
+'eqbr_irq_mask()' also calles the gpiolib function 'gpiochip_disable_irq()'
+
+This generates the following warning trace in the log for every gpio on
+load.
+
+[ 6.088111] ------------[ cut here ]------------
+[ 6.092440] WARNING: CPU: 3 PID: 1 at drivers/gpio/gpiolib.c:3810 gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Modules linked in:
+[ 6.097847] CPU: 3 UID: 0 PID: 1 Comm: swapper/0 Tainted: G W 6.12.59+ #0
+[ 6.097847] Tainted: [W]=WARN
+[ 6.097847] RIP: 0010:gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Code: 39 c6 48 19 c0 21 c6 48 c1 e6 05 48 03 b2 38 03 00 00 48 81 fe 00 f0 ff ff 77 11 48 8b 46 08 f6 c4 02 74 06 f0 80 66 09 fb c3 <0f> 0b 90 0f 1f 40 00 c3 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40
+[ 6.097847] RSP: 0000:ffffc9000000b830 EFLAGS: 00010046
+[ 6.097847] RAX: 0000000000000045 RBX: ffff888001be02a0 RCX: 0000000000000008
+[ 6.097847] RDX: ffff888001be9000 RSI: ffff888001b2dd00 RDI: ffff888001be02a0
+[ 6.097847] RBP: ffffc9000000b860 R08: 0000000000000000 R09: 0000000000000000
+[ 6.097847] R10: 0000000000000001 R11: ffff888001b2a154 R12: ffff888001be0514
+[ 6.097847] R13: ffff888001be02a0 R14: 0000000000000008 R15: 0000000000000000
+[ 6.097847] FS: 0000000000000000(0000) GS:ffff888041d80000(0000) knlGS:0000000000000000
+[ 6.097847] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 6.097847] CR2: 0000000000000000 CR3: 0000000003030000 CR4: 00000000001026b0
+[ 6.097847] Call Trace:
+[ 6.097847] <TASK>
+[ 6.097847] ? eqbr_irq_mask+0x63/0x70
+[ 6.097847] ? no_action+0x10/0x10
+[ 6.097847] eqbr_irq_mask_ack+0x11/0x60
+
+In an other driver (drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c) the
+interrupt is not disabled here.
+
+To fix this, do not call the 'eqbr_irq_mask()' and 'eqbr_irq_ack()'
+function. Implement instead this directly without disabling the interrupts.
+
+Fixes: 52066a53bd11 ("pinctrl: equilibrium: Convert to immutable irq_chip")
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index 49c8232b525a9..ba1c867b7b891 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -64,8 +64,15 @@ static void eqbr_irq_ack(struct irq_data *d)
+
+ static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_irq_mask(d);
+- eqbr_irq_ack(d);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
++ unsigned int offset = irqd_to_hwirq(d);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&gctrl->lock, flags);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
++ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+--
+2.51.0
+
--- /dev/null
+From 162a939395180bac0daea89ecbe2097322c8d944 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:45 +0100
+Subject: pinctrl: equilibrium: rename irq_chip function callbacks
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 1f96b84835eafb3e6f366dc3a66c0e69504cec9d ]
+
+Renaming of the irq_chip callback functions to improve clarity.
+
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Stable-dep-of: 3e00b1b332e5 ("pinctrl: equilibrium: fix warning trace on load")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index 48b55c5bf8d4f..49c8232b525a9 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -23,7 +23,7 @@
+ #define PIN_NAME_LEN 10
+ #define PAD_REG_OFF 0x100
+
+-static void eqbr_gpio_disable_irq(struct irq_data *d)
++static void eqbr_irq_mask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -36,7 +36,7 @@ static void eqbr_gpio_disable_irq(struct irq_data *d)
+ gpiochip_disable_irq(gc, offset);
+ }
+
+-static void eqbr_gpio_enable_irq(struct irq_data *d)
++static void eqbr_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -50,7 +50,7 @@ static void eqbr_gpio_enable_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_ack_irq(struct irq_data *d)
++static void eqbr_irq_ack(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -62,10 +62,10 @@ static void eqbr_gpio_ack_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
++static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_gpio_disable_irq(d);
+- eqbr_gpio_ack_irq(d);
++ eqbr_irq_mask(d);
++ eqbr_irq_ack(d);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+@@ -92,7 +92,7 @@ static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ return 0;
+ }
+
+-static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
++static int eqbr_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -166,11 +166,11 @@ static void eqbr_irq_handler(struct irq_desc *desc)
+
+ static const struct irq_chip eqbr_irq_chip = {
+ .name = "gpio_irq",
+- .irq_mask = eqbr_gpio_disable_irq,
+- .irq_unmask = eqbr_gpio_enable_irq,
+- .irq_ack = eqbr_gpio_ack_irq,
+- .irq_mask_ack = eqbr_gpio_mask_ack_irq,
+- .irq_set_type = eqbr_gpio_set_irq_type,
++ .irq_ack = eqbr_irq_ack,
++ .irq_mask = eqbr_irq_mask,
++ .irq_mask_ack = eqbr_irq_mask_ack,
++ .irq_unmask = eqbr_irq_unmask,
++ .irq_set_type = eqbr_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+--
+2.51.0
+
--- /dev/null
+From ac0bbbf8f35b6c41bc99da9833ea24c90f277b13 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Feb 2026 16:17:07 +0000
+Subject: pinctrl: generic: move function to amlogic-am4 driver
+
+From: Conor Dooley <conor.dooley@microchip.com>
+
+[ Upstream commit 9c5a40f2922a5a6d6b42e7b3d4c8e253918c07a1 ]
+
+pinconf_generic_dt_node_to_map_pinmux() is not actually a generic
+function, and really belongs in the amlogic-am4 driver. There are three
+reasons why.
+
+First, and least, of the reasons is that this function behaves
+differently to the other dt_node_to_map functions in a way that is not
+obvious from a first glance. This difference stems for the devicetree
+properties that the function is intended for use with, and how they are
+typically used. The other generic dt_node_to_map functions support
+platforms where the pins, groups and functions are described statically
+in the driver and require a function that will produce a mapping from dt
+nodes to these pre-established descriptions. No other code in the driver
+is require to be executed at runtime.
+pinconf_generic_dt_node_to_map_pinmux() on the other hand is intended for
+use with the pinmux property, where groups and functions are determined
+entirely from the devicetree. As a result, there are no statically
+defined groups and functions in the driver for this function to perform
+a mapping to. Other drivers that use the pinmux property (e.g. the k1)
+their dt_node_to_map function creates the groups and functions as the
+devicetree is parsed. Instead of that,
+pinconf_generic_dt_node_to_map_pinmux() requires that the devicetree is
+parsed twice, once by it and once at probe, so that the driver
+dynamically creates the groups and functions before the dt_node_to_map
+callback is executed. I don't believe this double parsing requirement is
+how developers would expect this to work and is not necessary given
+there are drivers that do not have this behaviour.
+
+Secondly and thirdly, the function bakes in some assumptions that only
+really match the amlogic platform about how the devicetree is constructed.
+These, to me, are problematic for something that claims to be generic.
+
+The other dt_node_to_map implementations accept a being called for
+either a node containing pin configuration properties or a node
+containing child nodes that each contain the configuration properties.
+IOW, they support the following two devicetree configurations:
+
+| cfg {
+| label: group {
+| pinmux = <asjhdasjhlajskd>;
+| config-item1;
+| };
+| };
+
+| label: cfg {
+| group1 {
+| pinmux = <dsjhlfka>;
+| config-item2;
+| };
+| group2 {
+| pinmux = <lsdjhaf>;
+| config-item1;
+| };
+| };
+
+pinconf_generic_dt_node_to_map_pinmux() only supports the latter.
+
+The other assumption about devicetree configuration that the function
+makes is that the labeled node's parent is a "function node". The amlogic
+driver uses these "function nodes" to create the functions at probe
+time, and pinconf_generic_dt_node_to_map_pinmux() finds the parent of
+the node it is operating on's name as part of the mapping. IOW, it
+requires that the devicetree look like:
+
+| pinctrl@bla {
+|
+| func-foo {
+| label: group-default {
+| pinmuxes = <lskdf>;
+| };
+| };
+| };
+
+and couldn't be used if the nodes containing the pinmux and
+configuration properties are children of the pinctrl node itself:
+
+| pinctrl@bla {
+|
+| label: group-default {
+| pinmuxes = <lskdf>;
+| };
+| };
+
+These final two reasons are mainly why I believe this is not suitable as
+a generic function, and should be moved into the driver that is the sole
+user and originator of the "generic" function.
+
+Signed-off-by: Conor Dooley <conor.dooley@microchip.com>
+Acked-by: Andy Shevchenko <andriy.shevchenko@intel.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Stable-dep-of: a2539b92e4b7 ("pinctrl: meson: amlogic-a4: Fix device node reference leak in aml_dt_node_to_map_pinmux()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/meson/pinctrl-amlogic-a4.c | 71 +++++++++++++++++++++-
+ drivers/pinctrl/pinconf-generic.c | 69 ---------------------
+ include/linux/pinctrl/pinconf-generic.h | 5 --
+ 3 files changed, 70 insertions(+), 75 deletions(-)
+
+diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+index 40542edd557e0..dfa32b11555cd 100644
+--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
++++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+@@ -24,6 +24,7 @@
+ #include <dt-bindings/pinctrl/amlogic,pinctrl.h>
+
+ #include "../core.h"
++#include "../pinctrl-utils.h"
+ #include "../pinconf.h"
+
+ #define gpio_chip_to_bank(chip) \
+@@ -672,11 +673,79 @@ static void aml_pin_dbg_show(struct pinctrl_dev *pcdev, struct seq_file *s,
+ seq_printf(s, " %s", dev_name(pcdev->dev));
+ }
+
++static int aml_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
++ struct device_node *np,
++ struct pinctrl_map **map,
++ unsigned int *num_maps)
++{
++ struct device *dev = pctldev->dev;
++ struct device_node *pnode;
++ unsigned long *configs = NULL;
++ unsigned int num_configs = 0;
++ struct property *prop;
++ unsigned int reserved_maps;
++ int reserve;
++ int ret;
++
++ prop = of_find_property(np, "pinmux", NULL);
++ if (!prop) {
++ dev_info(dev, "Missing pinmux property\n");
++ return -ENOENT;
++ }
++
++ pnode = of_get_parent(np);
++ if (!pnode) {
++ dev_info(dev, "Missing function node\n");
++ return -EINVAL;
++ }
++
++ reserved_maps = 0;
++ *map = NULL;
++ *num_maps = 0;
++
++ ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
++ &num_configs);
++ if (ret < 0) {
++ dev_err(dev, "%pOF: could not parse node property\n", np);
++ return ret;
++ }
++
++ reserve = 1;
++ if (num_configs)
++ reserve++;
++
++ ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps,
++ num_maps, reserve);
++ if (ret < 0)
++ goto exit;
++
++ ret = pinctrl_utils_add_map_mux(pctldev, map,
++ &reserved_maps, num_maps, np->name,
++ pnode->name);
++ if (ret < 0)
++ goto exit;
++
++ if (num_configs) {
++ ret = pinctrl_utils_add_map_configs(pctldev, map, &reserved_maps,
++ num_maps, np->name, configs,
++ num_configs, PIN_MAP_TYPE_CONFIGS_GROUP);
++ if (ret < 0)
++ goto exit;
++ }
++
++exit:
++ kfree(configs);
++ if (ret)
++ pinctrl_utils_free_map(pctldev, *map, *num_maps);
++
++ return ret;
++}
++
+ static const struct pinctrl_ops aml_pctrl_ops = {
+ .get_groups_count = aml_get_groups_count,
+ .get_group_name = aml_get_group_name,
+ .get_group_pins = aml_get_group_pins,
+- .dt_node_to_map = pinconf_generic_dt_node_to_map_pinmux,
++ .dt_node_to_map = aml_dt_node_to_map_pinmux,
+ .dt_free_map = pinconf_generic_dt_free_map,
+ .pin_dbg_show = aml_pin_dbg_show,
+ };
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 38a8daf4a5848..2b030bd0e6adc 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -385,75 +385,6 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
+ }
+ EXPORT_SYMBOL_GPL(pinconf_generic_parse_dt_config);
+
+-int pinconf_generic_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+- struct device_node *np,
+- struct pinctrl_map **map,
+- unsigned int *num_maps)
+-{
+- struct device *dev = pctldev->dev;
+- struct device_node *pnode;
+- unsigned long *configs = NULL;
+- unsigned int num_configs = 0;
+- struct property *prop;
+- unsigned int reserved_maps;
+- int reserve;
+- int ret;
+-
+- prop = of_find_property(np, "pinmux", NULL);
+- if (!prop) {
+- dev_info(dev, "Missing pinmux property\n");
+- return -ENOENT;
+- }
+-
+- pnode = of_get_parent(np);
+- if (!pnode) {
+- dev_info(dev, "Missing function node\n");
+- return -EINVAL;
+- }
+-
+- reserved_maps = 0;
+- *map = NULL;
+- *num_maps = 0;
+-
+- ret = pinconf_generic_parse_dt_config(np, pctldev, &configs,
+- &num_configs);
+- if (ret < 0) {
+- dev_err(dev, "%pOF: could not parse node property\n", np);
+- return ret;
+- }
+-
+- reserve = 1;
+- if (num_configs)
+- reserve++;
+-
+- ret = pinctrl_utils_reserve_map(pctldev, map, &reserved_maps,
+- num_maps, reserve);
+- if (ret < 0)
+- goto exit;
+-
+- ret = pinctrl_utils_add_map_mux(pctldev, map,
+- &reserved_maps, num_maps, np->name,
+- pnode->name);
+- if (ret < 0)
+- goto exit;
+-
+- if (num_configs) {
+- ret = pinctrl_utils_add_map_configs(pctldev, map, &reserved_maps,
+- num_maps, np->name, configs,
+- num_configs, PIN_MAP_TYPE_CONFIGS_GROUP);
+- if (ret < 0)
+- goto exit;
+- }
+-
+-exit:
+- kfree(configs);
+- if (ret)
+- pinctrl_utils_free_map(pctldev, *map, *num_maps);
+-
+- return ret;
+-}
+-EXPORT_SYMBOL_GPL(pinconf_generic_dt_node_to_map_pinmux);
+-
+ int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
+ struct device_node *np, struct pinctrl_map **map,
+ unsigned int *reserved_maps, unsigned int *num_maps,
+diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
+index 1be4032071c23..89277808ea614 100644
+--- a/include/linux/pinctrl/pinconf-generic.h
++++ b/include/linux/pinctrl/pinconf-generic.h
+@@ -250,9 +250,4 @@ static inline int pinconf_generic_dt_node_to_map_all(struct pinctrl_dev *pctldev
+ return pinconf_generic_dt_node_to_map(pctldev, np_config, map, num_maps,
+ PIN_MAP_TYPE_INVALID);
+ }
+-
+-int pinconf_generic_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+- struct device_node *np,
+- struct pinctrl_map **map,
+- unsigned int *num_maps);
+ #endif /* __LINUX_PINCTRL_PINCONF_GENERIC_H */
+--
+2.51.0
+
--- /dev/null
+From 4afcafb64fd2beb8ca42fc7494f6c4c69cd09565 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Feb 2026 00:51:22 +0800
+Subject: pinctrl: meson: amlogic-a4: Fix device node reference leak in
+ aml_dt_node_to_map_pinmux()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit a2539b92e4b791c1ba482930b5e51b1591975461 ]
+
+The of_get_parent() function returns a device_node with an incremented
+reference count.
+
+Use the __free(device_node) cleanup attribute to ensure of_node_put()
+is automatically called when pnode goes out of scope, fixing a
+reference leak.
+
+Fixes: 6e9be3abb78c ("pinctrl: Add driver support for Amlogic SoCs")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/meson/pinctrl-amlogic-a4.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+index dfa32b11555cd..e2293a872dcb7 100644
+--- a/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
++++ b/drivers/pinctrl/meson/pinctrl-amlogic-a4.c
+@@ -679,7 +679,6 @@ static int aml_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+ unsigned int *num_maps)
+ {
+ struct device *dev = pctldev->dev;
+- struct device_node *pnode;
+ unsigned long *configs = NULL;
+ unsigned int num_configs = 0;
+ struct property *prop;
+@@ -693,7 +692,7 @@ static int aml_dt_node_to_map_pinmux(struct pinctrl_dev *pctldev,
+ return -ENOENT;
+ }
+
+- pnode = of_get_parent(np);
++ struct device_node *pnode __free(device_node) = of_get_parent(np);
+ if (!pnode) {
+ dev_info(dev, "Missing function node\n");
+ return -EINVAL;
+--
+2.51.0
+
--- /dev/null
+From fd8d640fcb5f854db1f4622bd3e122eb12abfc90 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Feb 2026 23:14:51 +0800
+Subject: pinctrl: pinconf-generic: Fix memory leak in
+ pinconf_generic_parse_dt_config()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 7a648d598cb8e8c62af3f0e020a25820a3f3a9a7 ]
+
+In pinconf_generic_parse_dt_config(), if parse_dt_cfg() fails, it returns
+directly. This bypasses the cleanup logic and results in a memory leak of
+the cfg buffer.
+
+Fix this by jumping to the out label on failure, ensuring kfree(cfg) is
+called before returning.
+
+Fixes: 90a18c512884 ("pinctrl: pinconf-generic: Handle string values for generic properties")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Antonio Borneo <antonio.borneo@foss.st.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinconf-generic.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/pinconf-generic.c b/drivers/pinctrl/pinconf-generic.c
+index 366775841c639..38a8daf4a5848 100644
+--- a/drivers/pinctrl/pinconf-generic.c
++++ b/drivers/pinctrl/pinconf-generic.c
+@@ -351,13 +351,13 @@ int pinconf_generic_parse_dt_config(struct device_node *np,
+
+ ret = parse_dt_cfg(np, dt_params, ARRAY_SIZE(dt_params), cfg, &ncfg);
+ if (ret)
+- return ret;
++ goto out;
+ if (pctldev && pctldev->desc->num_custom_params &&
+ pctldev->desc->custom_params) {
+ ret = parse_dt_cfg(np, pctldev->desc->custom_params,
+ pctldev->desc->num_custom_params, cfg, &ncfg);
+ if (ret)
+- return ret;
++ goto out;
+ }
+
+ /* no configs found at all */
+--
+2.51.0
+
--- /dev/null
+From d951ba8062fe726c6991865a074fe32f06c9f5e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 09:33:44 +0530
+Subject: pinctrl: qcom: qcs615: Add missing dual edge GPIO IRQ errata flag
+
+From: Maulik Shah <maulik.shah@oss.qualcomm.com>
+
+[ Upstream commit 09a30b7a035f9f4ac918c8a9af89d70e43462152 ]
+
+Wakeup capable GPIOs uses PDC as parent IRQ chip and PDC on qcs615 do not
+support dual edge IRQs. Add missing wakeirq_dual_edge_errata configuration
+to enable workaround for dual edge GPIO IRQs.
+
+Fixes: b698f36a9d40 ("pinctrl: qcom: add the tlmm driver for QCS615 platform")
+Signed-off-by: Maulik Shah <maulik.shah@oss.qualcomm.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-qcs615.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c
+index 4dfa820d4e77c..f1c827ddbfbfa 100644
+--- a/drivers/pinctrl/qcom/pinctrl-qcs615.c
++++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c
+@@ -1067,6 +1067,7 @@ static const struct msm_pinctrl_soc_data qcs615_tlmm = {
+ .ntiles = ARRAY_SIZE(qcs615_tiles),
+ .wakeirq_map = qcs615_pdc_map,
+ .nwakeirq_map = ARRAY_SIZE(qcs615_pdc_map),
++ .wakeirq_dual_edge_errata = true,
+ };
+
+ static const struct of_device_id qcs615_tlmm_of_match[] = {
+--
+2.51.0
+
--- /dev/null
+From 72751edb0ec69384fe8f05a6fe78c39c5a75e80b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 01:01:29 +0000
+Subject: platform/x86: thinkpad_acpi: Fix errors reading battery thresholds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Teh <jonathan.teh@outlook.com>
+
+[ Upstream commit 53e977b1d50c46f2c4ec3865cd13a822f58ad3cd ]
+
+Check whether the battery supports the relevant charge threshold before
+reading the value to silence these errors:
+
+thinkpad_acpi: acpi_evalf(BCTG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCTG: evaluate failed
+thinkpad_acpi: acpi_evalf(BCSG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCSG: evaluate failed
+
+when reading the charge thresholds via sysfs on platforms that do not
+support them such as the ThinkPad T400.
+
+Fixes: 2801b9683f74 ("thinkpad_acpi: Add support for battery thresholds")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=202619
+Signed-off-by: Jonathan Teh <jonathan.teh@outlook.com>
+Reviewed-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://patch.msgid.link/MI0P293MB01967B206E1CA6F337EBFB12926CA@MI0P293MB0196.ITAP293.PROD.OUTLOOK.COM
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/lenovo/thinkpad_acpi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/lenovo/thinkpad_acpi.c b/drivers/platform/x86/lenovo/thinkpad_acpi.c
+index cc19fe520ea96..075543cd0e77e 100644
+--- a/drivers/platform/x86/lenovo/thinkpad_acpi.c
++++ b/drivers/platform/x86/lenovo/thinkpad_acpi.c
+@@ -9525,14 +9525,16 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
+ {
+ switch (what) {
+ case THRESHOLD_START:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
++ if (!battery_info.batteries[battery].start_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery)))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
++ if (!battery_info.batteries[battery].stop_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery)))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+--
+2.51.0
+
--- /dev/null
+From ceabfe051233660d48089e93a1a42b98e8f78862 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 02:24:34 +0800
+Subject: regulator: mt6363: Fix incorrect and redundant IRQ disposal in probe
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit 23942b71f07cc99e39d9216a5b370df494759d8c ]
+
+In mt6363_regulator_probe(), devm_add_action_or_reset() is used to
+automatically dispose of the IRQ mapping if the probe fails or the
+device is removed.
+
+The manual call to irq_dispose_mapping() in the error path was redundant
+as the reset action already triggers mt6363_irq_remove(). Furthermore,
+the manual call incorrectly passed the hardware IRQ number (info->hwirq)
+instead of the virtual IRQ mapping (info->virq).
+
+Remove the redundant and incorrect manual disposal.
+
+Fixes: 3c36965df808 ("regulator: Add support for MediaTek MT6363 SPMI PMIC Regulators")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Link: https://patch.msgid.link/20260223-mt6363-v1-1-c99a2e8ac621@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/regulator/mt6363-regulator.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/drivers/regulator/mt6363-regulator.c b/drivers/regulator/mt6363-regulator.c
+index 03af5fa536007..0aebcbda0a196 100644
+--- a/drivers/regulator/mt6363-regulator.c
++++ b/drivers/regulator/mt6363-regulator.c
+@@ -899,10 +899,8 @@ static int mt6363_regulator_probe(struct platform_device *pdev)
+ "Failed to map IRQ%d\n", info->hwirq);
+
+ ret = devm_add_action_or_reset(dev, mt6363_irq_remove, &info->virq);
+- if (ret) {
+- irq_dispose_mapping(info->hwirq);
++ if (ret)
+ return ret;
+- }
+
+ config.driver_data = info;
+ INIT_DELAYED_WORK(&info->oc_work, mt6363_oc_irq_enable_work);
+--
+2.51.0
+
--- /dev/null
+From 66497b1a685fd17691cc41f5562c19a6e620ca19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 19:37:56 +0900
+Subject: rust: kunit: fix warning when !CONFIG_PRINTK
+
+From: Alexandre Courbot <acourbot@nvidia.com>
+
+[ Upstream commit 7dd34dfc8dfa92a7244242098110388367996ac3 ]
+
+If `CONFIG_PRINTK` is not set, then the following warnings are issued
+during build:
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:16:12
+ |
+ 16 | pub fn err(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+ |
+ = note: `#[warn(unused_variables)]` (part of `#[warn(unused)]`) on by default
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:32:13
+ |
+ 32 | pub fn info(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+
+Fix this by adding a no-op assignment using `args` when `CONFIG_PRINTK`
+is not set.
+
+Fixes: a66d733da801 ("rust: support running Rust documentation tests as KUnit ones")
+Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/kunit.rs | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
+index 79436509dd73d..8907b6f89ece5 100644
+--- a/rust/kernel/kunit.rs
++++ b/rust/kernel/kunit.rs
+@@ -17,6 +17,10 @@
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn err(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+@@ -33,6 +37,10 @@ pub fn err(args: fmt::Arguments<'_>) {
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn info(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+--
+2.51.0
+
--- /dev/null
+From 0e1c37a7d5b44b9431fa634a41da6860b1e517f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 16:45:40 +0100
+Subject: sched/deadline: Fix missing ENQUEUE_REPLENISH during PI de-boosting
+
+From: Juri Lelli <juri.lelli@redhat.com>
+
+[ Upstream commit d658686a1331db3bb108ca079d76deb3208ed949 ]
+
+Running stress-ng --schedpolicy 0 on an RT kernel on a big machine
+might lead to the following WARNINGs (edited).
+
+ sched: DL de-boosted task PID 22725: REPLENISH flag missing
+
+ WARNING: CPU: 93 PID: 0 at kernel/sched/deadline.c:239 dequeue_task_dl+0x15c/0x1f8
+ ... (running_bw underflow)
+ Call trace:
+ dequeue_task_dl+0x15c/0x1f8 (P)
+ dequeue_task+0x80/0x168
+ deactivate_task+0x24/0x50
+ push_dl_task+0x264/0x2e0
+ dl_task_timer+0x1b0/0x228
+ __hrtimer_run_queues+0x188/0x378
+ hrtimer_interrupt+0xfc/0x260
+ ...
+
+The problem is that when a SCHED_DEADLINE task (lock holder) is
+changed to a lower priority class via sched_setscheduler(), it may
+fail to properly inherit the parameters of potential DEADLINE donors
+if it didn't already inherit them in the past (shorter deadline than
+donor's at that time). This might lead to bandwidth accounting
+corruption, as enqueue_task_dl() won't recognize the lock holder as
+boosted.
+
+The scenario occurs when:
+1. A DEADLINE task (donor) blocks on a PI mutex held by another
+ DEADLINE task (holder), but the holder doesn't inherit parameters
+ (e.g., it already has a shorter deadline)
+2. sched_setscheduler() changes the holder from DEADLINE to a lower
+ class while still holding the mutex
+3. The holder should now inherit DEADLINE parameters from the donor
+ and be enqueued with ENQUEUE_REPLENISH, but this doesn't happen
+
+Fix the issue by introducing __setscheduler_dl_pi(), which detects when
+a DEADLINE (proper or boosted) task gets setscheduled to a lower
+priority class. In case, the function makes the task inherit DEADLINE
+parameters of the donoer (pi_se) and sets ENQUEUE_REPLENISH flag to
+ensure proper bandwidth accounting during the next enqueue operation.
+
+Fixes: 2279f540ea7d ("sched/deadline: Fix priority inheritance with multiple scheduling classes")
+Reported-by: Bruno Goncalves <bgoncalv@redhat.com>
+Signed-off-by: Juri Lelli <juri.lelli@redhat.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://patch.msgid.link/20260302-upstream-fix-deadline-piboost-b4-v3-1-6ba32184a9e0@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/syscalls.c | 30 ++++++++++++++++++++++++++++++
+ 1 file changed, 30 insertions(+)
+
+diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c
+index 6f10db3646e7f..cadb0e9fe19b9 100644
+--- a/kernel/sched/syscalls.c
++++ b/kernel/sched/syscalls.c
+@@ -284,6 +284,35 @@ static bool check_same_owner(struct task_struct *p)
+ uid_eq(cred->euid, pcred->uid));
+ }
+
++#ifdef CONFIG_RT_MUTEXES
++static inline void __setscheduler_dl_pi(int newprio, int policy,
++ struct task_struct *p,
++ struct sched_change_ctx *scope)
++{
++ /*
++ * In case a DEADLINE task (either proper or boosted) gets
++ * setscheduled to a lower priority class, check if it neeeds to
++ * inherit parameters from a potential pi_task. In that case make
++ * sure replenishment happens with the next enqueue.
++ */
++
++ if (dl_prio(newprio) && !dl_policy(policy)) {
++ struct task_struct *pi_task = rt_mutex_get_top_task(p);
++
++ if (pi_task) {
++ p->dl.pi_se = pi_task->dl.pi_se;
++ scope->flags |= ENQUEUE_REPLENISH;
++ }
++ }
++}
++#else /* !CONFIG_RT_MUTEXES */
++static inline void __setscheduler_dl_pi(int newprio, int policy,
++ struct task_struct *p,
++ struct sched_change_ctx *scope)
++{
++}
++#endif /* !CONFIG_RT_MUTEXES */
++
+ #ifdef CONFIG_UCLAMP_TASK
+
+ static int uclamp_validate(struct task_struct *p,
+@@ -655,6 +684,7 @@ int __sched_setscheduler(struct task_struct *p,
+ __setscheduler_params(p, attr);
+ p->sched_class = next_class;
+ p->prio = newprio;
++ __setscheduler_dl_pi(newprio, policy, p, scope);
+ }
+ __setscheduler_uclamp(p, attr);
+
+--
+2.51.0
+
--- /dev/null
+From 5489481b3905127f910088c5fc71a7f3a8e8a157 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 09:36:37 +0800
+Subject: selftest/arm64: Fix sve2p1_sigill() to hwcap test
+
+From: Yifan Wu <wuyifan50@huawei.com>
+
+[ Upstream commit d87c828daa7ead9763416f75cc416496969cf1dc ]
+
+The FEAT_SVE2p1 is indicated by ID_AA64ZFR0_EL1.SVEver. However,
+the BFADD requires the FEAT_SVE_B16B16, which is indicated by
+ID_AA64ZFR0_EL1.B16B16. This could cause the test to incorrectly
+fail on a CPU that supports FEAT_SVE2.1 but not FEAT_SVE_B16B16.
+
+LD1Q Gather load quadwords which is decoded from SVE encodings and
+implied by FEAT_SVE2p1.
+
+Fixes: c5195b027d29 ("kselftest/arm64: Add SVE 2.1 to hwcap test")
+Signed-off-by: Yifan Wu <wuyifan50@huawei.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/arm64/abi/hwcap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
+index c41640f18e4ec..62ea450f2ccc0 100644
+--- a/tools/testing/selftests/arm64/abi/hwcap.c
++++ b/tools/testing/selftests/arm64/abi/hwcap.c
+@@ -473,8 +473,8 @@ static void sve2_sigill(void)
+
+ static void sve2p1_sigill(void)
+ {
+- /* BFADD Z0.H, Z0.H, Z0.H */
+- asm volatile(".inst 0x65000000" : : : "z0");
++ /* LD1Q {Z0.Q}, P0/Z, [Z0.D, X0] */
++ asm volatile(".inst 0xC400A000" : : : "z0");
+ }
+
+ static void sve2p2_sigill(void)
+--
+2.51.0
+
--- /dev/null
+From d76ea82df87c54f59a64789b5ad236003aa6b350 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 19:14:50 +0800
+Subject: selftests/harness: order TEST_F and XFAIL_ADD constructors
+
+From: Sun Jian <sun.jian.kdev@gmail.com>
+
+[ Upstream commit 6be2681514261324c8ee8a1c6f76cefdf700220f ]
+
+TEST_F() allocates and registers its struct __test_metadata via mmap()
+inside its constructor, and only then assigns the
+_##fixture_##test##_object pointer.
+
+XFAIL_ADD() runs in a constructor too and reads
+_##fixture_##test##_object to initialize xfail->test. If XFAIL_ADD runs
+first, xfail->test can be NULL and the expected failure will be reported
+as FAIL.
+
+Use constructor priorities to ensure TEST_F registration runs before
+XFAIL_ADD, without adding extra state or runtime lookups.
+
+Fixes: 2709473c9386 ("selftests: kselftest_harness: support using xfail")
+Signed-off-by: Sun Jian <sun.jian.kdev@gmail.com>
+Link: https://patch.msgid.link/20260225111451.347923-1-sun.jian.kdev@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/kselftest_harness.h | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h
+index 16a119a4656c7..4afaef01c22e9 100644
+--- a/tools/testing/selftests/kselftest_harness.h
++++ b/tools/testing/selftests/kselftest_harness.h
+@@ -76,6 +76,9 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
+ memset(s, c, n);
+ }
+
++#define KSELFTEST_PRIO_TEST_F 20000
++#define KSELFTEST_PRIO_XFAIL 20001
++
+ #define TEST_TIMEOUT_DEFAULT 30
+
+ /* Utilities exposed to the test definitions */
+@@ -465,7 +468,7 @@ static inline void __kselftest_memset_safe(void *s, int c, size_t n)
+ fixture_name##_teardown(_metadata, self, variant); \
+ } \
+ static struct __test_metadata *_##fixture_name##_##test_name##_object; \
+- static void __attribute__((constructor)) \
++ static void __attribute__((constructor(KSELFTEST_PRIO_TEST_F))) \
+ _register_##fixture_name##_##test_name(void) \
+ { \
+ struct __test_metadata *object = mmap(NULL, sizeof(*object), \
+@@ -880,7 +883,7 @@ struct __test_xfail {
+ .fixture = &_##fixture_name##_fixture_object, \
+ .variant = &_##fixture_name##_##variant_name##_object, \
+ }; \
+- static void __attribute__((constructor)) \
++ static void __attribute__((constructor(KSELFTEST_PRIO_XFAIL))) \
+ _register_##fixture_name##_##variant_name##_##test_name##_xfail(void) \
+ { \
+ _##fixture_name##_##variant_name##_##test_name##_xfail.test = \
+--
+2.51.0
+
ipmi-fix-use-after-free-and-list-corruption-on-sender-error.patch
net-stmmac-remove-support-for-lpi_intr_o.patch
drm-amd-display-use-gfp_atomic-in-dc_create_stream_for_sink.patch
+nvme-fix-admin-queue-leak-on-controller-reset.patch
+hwmon-macsmc-fix-regressions-in-apple-silicon-smc-hw.patch
+hwmon-macsmc-fix-overflows-underflows-and-sign-exten.patch
+hwmon-aht10-fix-initialization-commands-for-aht20.patch
+pinctrl-equilibrium-rename-irq_chip-function-callbac.patch
+pinctrl-equilibrium-fix-warning-trace-on-load.patch
+pinctrl-qcom-qcs615-add-missing-dual-edge-gpio-irq-e.patch
+platform-x86-thinkpad_acpi-fix-errors-reading-batter.patch
+module-remove-duplicate-freeing-of-lockdep-classes.patch
+hid-multitouch-new-class-mt_cls_egalax_p80h84.patch
+pinctrl-pinconf-generic-fix-memory-leak-in-pinconf_g.patch
+pinctrl-generic-move-function-to-amlogic-am4-driver.patch
+pinctrl-meson-amlogic-a4-fix-device-node-reference-l.patch
+pinctrl-cirrus-cs42l43-fix-double-put-in-cs42l43_pin.patch
+hwmon-it87-check-the-it87_lock-return-value.patch
+idpf-increment-completion-queue-next_to_clean-in-sw-.patch
+idpf-change-irq-naming-to-match-netdev-and-ethtool-q.patch
+idpf-fix-flow-rule-delete-failure-due-to-invalid-val.patch
+ice-recap-the-vsi-and-qos-info-after-rebuild.patch
+ice-fix-crash-in-ethtool-offline-loopback-test.patch
+i40e-fix-preempt-count-leak-in-napi-poll-tracepoint.patch
+e1000e-clear-dpg_en-after-reset-to-avoid-autonomous-.patch
+drm-solomon-fix-page-start-when-updating-rectangle-i.patch
+netfs-fix-unbuffered-dio-writes-to-dispatch-subreque.patch
+nvmet-fcloop-check-remoteport-port_state-before-call.patch
+net-annotate-data-races-around-sk-sk_-data_ready-wri.patch
+bridge-check-relevant-per-vlan-options-in-vlan-range.patch
+net-ethernet-ti-am65-cpsw-nuss-cpsw-ale-fix-multicas.patch
+nvme-multipath-fix-leak-on-try_module_get-failure.patch
+inet-annotate-data-races-around-isk-inet_num.patch
+crypto-ccp-fix-use-after-free-on-error-path.patch
+accel-amdxdna-fill-invalid-payload-for-failed-comman.patch
+udp-unhash-auto-bound-connected-sk-from-4-tuple-hash.patch
+tcp-give-up-on-stronger-sk_rcvbuf-checks-for-now.patch
+xsk-fix-fragment-node-deletion-to-prevent-buffer-lea.patch
+xsk-fix-zero-copy-af_xdp-fragment-drop.patch
+dpaa2-switch-fix-interrupt-storm-after-receiving-bad.patch
+atm-lec-fix-null-ptr-deref-in-lec_arp_clear_vccs.patch
+net-ti-icssg-prueth-fix-ping-failure-after-offload-m.patch
+amd-xgbe-fix-mac_tcr_ss-register-width-for-2.5g-and-.patch
+regulator-mt6363-fix-incorrect-and-redundant-irq-dis.patch
+can-bcm-fix-locking-for-bcm_op-runtime-updates.patch
+can-dummy_can-dummy_can_init-fix-packet-statistics.patch
+can-mcp251x-fix-deadlock-in-error-path-of-mcp251x_op.patch
+wifi-rsi-don-t-default-to-eopnotsupp-in-rsi_mac80211.patch
+drm-syncobj-fix-handle-fd-ioctls-with-dirty-stack.patch
+drm-xe-do-not-preempt-fence-signaling-cs-instruction.patch
+drm-xe-configfs-free-ctx_restore_mid_bb-in-release.patch
+drm-xe-queue-call-fini-on-exec-queue-creation-fail.patch
+blktrace-fix-__this_cpu_read-write-in-preemptible-co.patch
+rust-kunit-fix-warning-when-config_printk.patch
+kunit-tool-copy-caller-args-in-run_kernel-to-prevent.patch
+accel-amdxdna-fix-null-pointer-dereference-of-mgmt_c.patch
+drm-amd-display-use-mpc.preblend-flag-to-indicate-3d.patch
+drm-amd-display-enable-degamma-and-reject-color_pipe.patch
+net-dsa-realtek-rtl8365mb-fix-rtl8365mb_phy_ocp_writ.patch
+bpf-bonding-reject-vlan-srcmac-xmit_hash_policy-chan.patch
+octeon_ep-relocate-counter-updates-before-napi.patch
+octeon_ep-avoid-compiler-and-iq-oq-reordering.patch
+octeon_ep_vf-relocate-counter-updates-before-napi.patch
+octeon_ep_vf-avoid-compiler-and-iq-oq-reordering.patch
+wifi-cw1200-fix-locking-in-error-paths.patch
+wifi-wlcore-fix-a-locking-bug.patch
+wifi-mt76-mt7996-fix-possible-oob-access-in-mt7996_m.patch
+wifi-mt76-mt7925-fix-possible-oob-access-in-mt7925_m.patch
+wifi-mt76-fix-possible-oob-access-in-mt76_connac2_ma.patch
+indirect_call_wrapper-do-not-reevaluate-function-poi.patch
+net-rds-fix-circular-locking-dependency-in-rds_tcp_t.patch
+xen-acpi-processor-fix-_cst-detection-using-undersiz.patch
+asoc-sdca-add-allocation-failure-check-for-entity-na.patch
+ice-fix-adding-aq-lldp-filter-for-vf.patch
+ice-fix-memory-leak-in-ice_set_ringparam.patch
+libie-don-t-unroll-if-fwlog-isn-t-supported.patch
+iavf-fix-netdev-max_mtu-to-respect-actual-hardware-l.patch
+igb-fix-trigger-of-incorrect-irq-in-igb_xsk_wakeup.patch
+igc-fix-trigger-of-incorrect-irq-in-igc_xsk_wakeup-f.patch
+bpf-fix-a-uaf-issue-in-bpf_trampoline_link_cgroup_sh.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch-6573
+ipv6-fix-null-pointer-deref-in-ip6_rt_get_dev_rcu.patch
+net-ipv4-fix-arm64-alignment-fault-in-multipath-hash.patch
+amd-xgbe-fix-sleep-while-atomic-on-suspend-resume.patch
+drm-sched-fix-kernel-doc-warning-for-drm_sched_job_d.patch
+ata-libata-cancel-pending-work-after-clearing-deferr.patch
+i2c-i801-revert-i2c-i801-replace-acpi_lock-with-i2c-.patch
+time-jiffies-fix-sysctl-file-error-on-configurations.patch
+drm-xe-gsc-fix-gsc-proxy-cleanup-on-early-initializa.patch
+drm-xe-reg_sr-fix-leak-on-xa_store-failure.patch
+nvme-fix-memory-allocation-in-nvme_pr_read_keys.patch
+x86-numa-store-extra-copy-of-numa_nodes_parsed.patch
+x86-topo-add-topology_num_nodes_per_package.patch
+x86-topo-replace-x86_has_numa_in_package.patch
+x86-topo-fix-snc-topology-mess.patch
+sched-deadline-fix-missing-enqueue_replenish-during-.patch
+timekeeping-fix-timex-status-validation-for-auxiliar.patch
+hwmon-max6639-fix-inverted-polarity.patch
+net-sched-avoid-qdisc_reset_all_tx_gt-vs-dequeue-rac.patch
+tcp-secure_seq-add-back-ports-to-ts-offset.patch
+net-nfc-nci-fix-zero-length-proprietary-notification.patch
+net_sched-sch_fq-clear-q-band_pkt_count-in-fq_reset.patch
+net-devmem-use-read_once-write_once-on-binding-dev.patch
+nfc-nci-free-skb-on-nci_transceive-early-error-paths.patch
+nfc-nci-complete-pending-data-exchange-on-device-clo.patch
+nfc-nci-clear-nci_data_exchange-before-calling-compl.patch
+nfc-rawsock-cancel-tx_work-before-socket-teardown.patch
+net-stmmac-fix-error-handling-in-vlan-add-and-delete.patch
+net-stmmac-improve-double-vlan-handling.patch
+net-stmmac-fix-vlan-hw-state-restore.patch
+net-stmmac-defer-vlan-hw-configuration-when-interfac.patch
+block-use-trylock-to-avoid-lockdep-circular-dependen.patch
+net-provide-a-preempt_rt-specific-check-for-netdev_q.patch
+netfilter-nf_tables-unconditionally-bump-set-nelems-.patch
+netfilter-nf_tables-clone-set-on-flush-only.patch
+netfilter-nft_set_pipapo-split-gc-into-unlink-and-re.patch
+net-ethernet-mtk_eth_soc-reset-prog-ptr-to-old_prog-.patch
+selftests-harness-order-test_f-and-xfail_add-constru.patch
+net-bridge-fix-nd_tbl-null-dereference-when-ipv6-is-.patch
+net-vxlan-fix-nd_tbl-null-dereference-when-ipv6-is-d.patch
+net-ipv6-fix-panic-when-ipv4-route-references-loopba.patch
+net-sched-act_ife-fix-metalist-update-behavior.patch
+xdp-use-modulo-operation-to-calculate-xdp-frag-tailr.patch
+xsk-introduce-helper-to-determine-rxq-frag_size.patch
+ice-fix-rxq-info-registering-in-mbuf-packets.patch
+ice-change-xdp-rxq-frag_size-from-dma-write-length-t.patch
+i40e-fix-registering-xdp-rxq-info.patch
+i40e-use-xdp.frame_sz-as-xdp-rxq-info-frag_size.patch
+net-enetc-use-truesize-as-xdp-rxq-info-frag_size.patch
+xdp-produce-a-warning-when-calculated-tailroom-is-ne.patch
+accel-ethosu-fix-job-submit-error-clean-up-refcount-.patch
+accel-ethosu-fix-npu_op_elementwise-validation-with-.patch
+ata-libata-eh-fix-detection-of-deferred-qc-timeouts.patch
+selftest-arm64-fix-sve2p1_sigill-to-hwcap-test.patch
+tracing-add-null-pointer-check-to-trigger_data_free.patch
+bpf-collect-only-live-registers-in-linked-regs.patch
--- /dev/null
+From 6982b5212beaa8505b026480e2f79a330ba317b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:11 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ smb2_compound_op()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 12c43a062acb0ac137fc2a4a106d4d084b8c5416 ]
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: 6a5f6592a0b6 ("SMB311: Add support for query info using posix extensions (level 100)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2inode.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index 6b0420a5b52a7..5ebcc68560a06 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -325,7 +325,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ cfile->fid.volatile_fid,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ } else {
+@@ -335,7 +335,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ }
+--
+2.51.0
+
--- /dev/null
+From 0c9afb72b0a7b37e2dfb4738f609c0dd5f35a6a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:12 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ SMB311_posix_query_info()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 9621b996e4db1dbc2b3dc5d5910b7d6179397320 ]
+
+SMB311_posix_query_info() is currently unused, but it may still be used in
+some stable versions, so these changes are submitted as a separate patch.
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: b1bc1874b885 ("smb311: Add support for SMB311 query info (non-compounded)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2pdu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index b16d7b42a73c4..bf4a13acc8b86 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3981,7 +3981,7 @@ SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid,
+ struct smb311_posix_qinfo *data, u32 *plen)
+ {
+- size_t output_len = sizeof(struct smb311_posix_qinfo *) +
++ size_t output_len = sizeof(struct smb311_posix_qinfo) +
+ (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
+ *plen = 0;
+
+--
+2.51.0
+
--- /dev/null
+From d1c32e7ab2119f07f0010d882502c426b97df86a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:33:59 -0800
+Subject: tcp: give up on stronger sk_rcvbuf checks (for now)
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 026dfef287c07f37d4d4eef7a0b5a4bfdb29b32d ]
+
+We hit another corner case which leads to TcpExtTCPRcvQDrop
+
+Connections which send RPCs in the 20-80kB range over loopback
+experience spurious drops. The exact conditions for most of
+the drops I investigated are that:
+ - socket exchanged >1MB of data so its not completely fresh
+ - rcvbuf is around 128kB (default, hasn't grown)
+ - there is ~60kB of data in rcvq
+ - skb > 64kB arrives
+
+The sum of skb->len (!) of both of the skbs (the one already
+in rcvq and the arriving one) is larger than rwnd.
+My suspicion is that this happens because __tcp_select_window()
+rounds the rwnd up to (1 << wscale) if less than half of
+the rwnd has been consumed.
+
+Eric suggests that given the number of Fixes we already have
+pointing to 1d2fbaad7cd8 it's probably time to give up on it,
+until a bigger revamp of rmem management.
+
+Also while we could risk tweaking the rwnd math, there are other
+drops on workloads I investigated, after the commit in question,
+not explained by this phenomenon.
+
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/20260225122355.585fd57b@kernel.org
+Fixes: 1d2fbaad7cd8 ("tcp: stronger sk_rcvbuf checks")
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20260227003359.2391017-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_input.c | 16 +---------------
+ 1 file changed, 1 insertion(+), 15 deletions(-)
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index adec44313772b..1c9db9a246f71 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5116,25 +5116,11 @@ static void tcp_ofo_queue(struct sock *sk)
+ static bool tcp_prune_ofo_queue(struct sock *sk, const struct sk_buff *in_skb);
+ static int tcp_prune_queue(struct sock *sk, const struct sk_buff *in_skb);
+
+-/* Check if this incoming skb can be added to socket receive queues
+- * while satisfying sk->sk_rcvbuf limit.
+- *
+- * In theory we should use skb->truesize, but this can cause problems
+- * when applications use too small SO_RCVBUF values.
+- * When LRO / hw gro is used, the socket might have a high tp->scaling_ratio,
+- * allowing RWIN to be close to available space.
+- * Whenever the receive queue gets full, we can receive a small packet
+- * filling RWIN, but with a high skb->truesize, because most NIC use 4K page
+- * plus sk_buff metadata even when receiving less than 1500 bytes of payload.
+- *
+- * Note that we use skb->len to decide to accept or drop this packet,
+- * but sk->sk_rmem_alloc is the sum of all skb->truesize.
+- */
+ static bool tcp_can_ingest(const struct sock *sk, const struct sk_buff *skb)
+ {
+ unsigned int rmem = atomic_read(&sk->sk_rmem_alloc);
+
+- return rmem + skb->len <= sk->sk_rcvbuf;
++ return rmem <= sk->sk_rcvbuf;
+ }
+
+ static int tcp_try_rmem_schedule(struct sock *sk, const struct sk_buff *skb,
+--
+2.51.0
+
--- /dev/null
+From 336d8ed1c742f9352047931c0a7f0d8d6e6c74eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 20:55:27 +0000
+Subject: tcp: secure_seq: add back ports to TS offset
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 165573e41f2f66ef98940cf65f838b2cb575d9d1 ]
+
+This reverts 28ee1b746f49 ("secure_seq: downgrade to per-host timestamp offsets")
+
+tcp_tw_recycle went away in 2017.
+
+Zhouyan Deng reported off-path TCP source port leakage via
+SYN cookie side-channel that can be fixed in multiple ways.
+
+One of them is to bring back TCP ports in TS offset randomization.
+
+As a bonus, we perform a single siphash() computation
+to provide both an ISN and a TS offset.
+
+Fixes: 28ee1b746f49 ("secure_seq: downgrade to per-host timestamp offsets")
+Reported-by: Zhouyan Deng <dengzhouyan_nwpu@163.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Link: https://patch.msgid.link/20260302205527.1982836-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/secure_seq.h | 45 ++++++++++++++++++----
+ include/net/tcp.h | 6 ++-
+ net/core/secure_seq.c | 80 +++++++++++++++-------------------------
+ net/ipv4/syncookies.c | 11 ++++--
+ net/ipv4/tcp_input.c | 8 +++-
+ net/ipv4/tcp_ipv4.c | 37 +++++++++----------
+ net/ipv6/syncookies.c | 11 ++++--
+ net/ipv6/tcp_ipv6.c | 37 +++++++++----------
+ 8 files changed, 127 insertions(+), 108 deletions(-)
+
+diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
+index cddebafb9f779..6f996229167b3 100644
+--- a/include/net/secure_seq.h
++++ b/include/net/secure_seq.h
+@@ -5,16 +5,47 @@
+ #include <linux/types.h>
+
+ struct net;
++extern struct net init_net;
++
++union tcp_seq_and_ts_off {
++ struct {
++ u32 seq;
++ u32 ts_off;
++ };
++ u64 hash64;
++};
+
+ u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
+ u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport);
+-u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+- __be16 sport, __be16 dport);
+-u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr);
+-u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+- __be16 sport, __be16 dport);
+-u32 secure_tcpv6_ts_off(const struct net *net,
+- const __be32 *saddr, const __be32 *daddr);
++union tcp_seq_and_ts_off
++secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr,
++ __be16 sport, __be16 dport);
++
++static inline u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
++ __be16 sport, __be16 dport)
++{
++ union tcp_seq_and_ts_off ts;
++
++ ts = secure_tcp_seq_and_ts_off(&init_net, saddr, daddr,
++ sport, dport);
++
++ return ts.seq;
++}
++
++union tcp_seq_and_ts_off
++secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr,
++ const __be32 *daddr,
++ __be16 sport, __be16 dport);
++
++static inline u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
++ __be16 sport, __be16 dport)
++{
++ union tcp_seq_and_ts_off ts;
++
++ ts = secure_tcpv6_seq_and_ts_off(&init_net, saddr, daddr,
++ sport, dport);
+
++ return ts.seq;
++}
+ #endif /* _NET_SECURE_SEQ */
+diff --git a/include/net/tcp.h b/include/net/tcp.h
+index 279ddb923e656..e15e1d0e6f4e2 100644
+--- a/include/net/tcp.h
++++ b/include/net/tcp.h
+@@ -43,6 +43,7 @@
+ #include <net/dst.h>
+ #include <net/mptcp.h>
+ #include <net/xfrm.h>
++#include <net/secure_seq.h>
+
+ #include <linux/seq_file.h>
+ #include <linux/memcontrol.h>
+@@ -2437,8 +2438,9 @@ struct tcp_request_sock_ops {
+ struct flowi *fl,
+ struct request_sock *req,
+ u32 tw_isn);
+- u32 (*init_seq)(const struct sk_buff *skb);
+- u32 (*init_ts_off)(const struct net *net, const struct sk_buff *skb);
++ union tcp_seq_and_ts_off (*init_seq_and_ts_off)(
++ const struct net *net,
++ const struct sk_buff *skb);
+ int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
+ struct flowi *fl, struct request_sock *req,
+ struct tcp_fastopen_cookie *foc,
+diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
+index 9a39656804513..6a6f2cda5aaef 100644
+--- a/net/core/secure_seq.c
++++ b/net/core/secure_seq.c
+@@ -20,7 +20,6 @@
+ #include <net/tcp.h>
+
+ static siphash_aligned_key_t net_secret;
+-static siphash_aligned_key_t ts_secret;
+
+ #define EPHEMERAL_PORT_SHUFFLE_PERIOD (10 * HZ)
+
+@@ -28,11 +27,6 @@ static __always_inline void net_secret_init(void)
+ {
+ net_get_random_once(&net_secret, sizeof(net_secret));
+ }
+-
+-static __always_inline void ts_secret_init(void)
+-{
+- net_get_random_once(&ts_secret, sizeof(ts_secret));
+-}
+ #endif
+
+ #ifdef CONFIG_INET
+@@ -53,28 +47,9 @@ static u32 seq_scale(u32 seq)
+ #endif
+
+ #if IS_ENABLED(CONFIG_IPV6)
+-u32 secure_tcpv6_ts_off(const struct net *net,
+- const __be32 *saddr, const __be32 *daddr)
+-{
+- const struct {
+- struct in6_addr saddr;
+- struct in6_addr daddr;
+- } __aligned(SIPHASH_ALIGNMENT) combined = {
+- .saddr = *(struct in6_addr *)saddr,
+- .daddr = *(struct in6_addr *)daddr,
+- };
+-
+- if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
+- return 0;
+-
+- ts_secret_init();
+- return siphash(&combined, offsetofend(typeof(combined), daddr),
+- &ts_secret);
+-}
+-EXPORT_IPV6_MOD(secure_tcpv6_ts_off);
+-
+-u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+- __be16 sport, __be16 dport)
++union tcp_seq_and_ts_off
++secure_tcpv6_seq_and_ts_off(const struct net *net, const __be32 *saddr,
++ const __be32 *daddr, __be16 sport, __be16 dport)
+ {
+ const struct {
+ struct in6_addr saddr;
+@@ -87,14 +62,20 @@ u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+ .sport = sport,
+ .dport = dport
+ };
+- u32 hash;
++ union tcp_seq_and_ts_off st;
+
+ net_secret_init();
+- hash = siphash(&combined, offsetofend(typeof(combined), dport),
+- &net_secret);
+- return seq_scale(hash);
++
++ st.hash64 = siphash(&combined, offsetofend(typeof(combined), dport),
++ &net_secret);
++
++ if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
++ st.ts_off = 0;
++
++ st.seq = seq_scale(st.seq);
++ return st;
+ }
+-EXPORT_SYMBOL(secure_tcpv6_seq);
++EXPORT_SYMBOL(secure_tcpv6_seq_and_ts_off);
+
+ u64 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport)
+@@ -118,33 +99,30 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral);
+ #endif
+
+ #ifdef CONFIG_INET
+-u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr)
+-{
+- if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
+- return 0;
+-
+- ts_secret_init();
+- return siphash_2u32((__force u32)saddr, (__force u32)daddr,
+- &ts_secret);
+-}
+-
+ /* secure_tcp_seq_and_tsoff(a, b, 0, d) == secure_ipv4_port_ephemeral(a, b, d),
+ * but fortunately, `sport' cannot be 0 in any circumstances. If this changes,
+ * it would be easy enough to have the former function use siphash_4u32, passing
+ * the arguments as separate u32.
+ */
+-u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+- __be16 sport, __be16 dport)
++union tcp_seq_and_ts_off
++secure_tcp_seq_and_ts_off(const struct net *net, __be32 saddr, __be32 daddr,
++ __be16 sport, __be16 dport)
+ {
+- u32 hash;
++ u32 ports = (__force u32)sport << 16 | (__force u32)dport;
++ union tcp_seq_and_ts_off st;
+
+ net_secret_init();
+- hash = siphash_3u32((__force u32)saddr, (__force u32)daddr,
+- (__force u32)sport << 16 | (__force u32)dport,
+- &net_secret);
+- return seq_scale(hash);
++
++ st.hash64 = siphash_3u32((__force u32)saddr, (__force u32)daddr,
++ ports, &net_secret);
++
++ if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1)
++ st.ts_off = 0;
++
++ st.seq = seq_scale(st.seq);
++ return st;
+ }
+-EXPORT_SYMBOL_GPL(secure_tcp_seq);
++EXPORT_SYMBOL_GPL(secure_tcp_seq_and_ts_off);
+
+ u64 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
+ {
+diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
+index 061751aabc8e1..fc3affd9c8014 100644
+--- a/net/ipv4/syncookies.c
++++ b/net/ipv4/syncookies.c
+@@ -378,9 +378,14 @@ static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk,
+ tcp_parse_options(net, skb, &tcp_opt, 0, NULL);
+
+ if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
+- tsoff = secure_tcp_ts_off(net,
+- ip_hdr(skb)->daddr,
+- ip_hdr(skb)->saddr);
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcp_seq_and_ts_off(net,
++ ip_hdr(skb)->daddr,
++ ip_hdr(skb)->saddr,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
++ tsoff = st.ts_off;
+ tcp_opt.rcv_tsecr -= tsoff;
+ }
+
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index 1c9db9a246f71..3e95b36fa2736 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -7411,6 +7411,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ const struct tcp_sock *tp = tcp_sk(sk);
+ struct net *net = sock_net(sk);
+ struct sock *fastopen_sk = NULL;
++ union tcp_seq_and_ts_off st;
+ struct request_sock *req;
+ bool want_cookie = false;
+ struct dst_entry *dst;
+@@ -7480,9 +7481,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ if (!dst)
+ goto drop_and_free;
+
++ if (tmp_opt.tstamp_ok || (!want_cookie && !isn))
++ st = af_ops->init_seq_and_ts_off(net, skb);
++
+ if (tmp_opt.tstamp_ok) {
+ tcp_rsk(req)->req_usec_ts = dst_tcp_usec_ts(dst);
+- tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
++ tcp_rsk(req)->ts_off = st.ts_off;
+ }
+ if (!want_cookie && !isn) {
+ int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog);
+@@ -7504,7 +7508,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
+ goto drop_and_release;
+ }
+
+- isn = af_ops->init_seq(skb);
++ isn = st.seq;
+ }
+
+ tcp_ecn_create_request(req, skb, sk, dst);
+diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
+index e4e7bc8782ab6..d27965294aef3 100644
+--- a/net/ipv4/tcp_ipv4.c
++++ b/net/ipv4/tcp_ipv4.c
+@@ -104,17 +104,14 @@ static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = {
+
+ static DEFINE_MUTEX(tcp_exit_batch_mutex);
+
+-static u32 tcp_v4_init_seq(const struct sk_buff *skb)
++static union tcp_seq_and_ts_off
++tcp_v4_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb)
+ {
+- return secure_tcp_seq(ip_hdr(skb)->daddr,
+- ip_hdr(skb)->saddr,
+- tcp_hdr(skb)->dest,
+- tcp_hdr(skb)->source);
+-}
+-
+-static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
+-{
+- return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
++ return secure_tcp_seq_and_ts_off(net,
++ ip_hdr(skb)->daddr,
++ ip_hdr(skb)->saddr,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
+ }
+
+ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
+@@ -326,15 +323,16 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr_unsized *uaddr, int addr_len
+ rt = NULL;
+
+ if (likely(!tp->repair)) {
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcp_seq_and_ts_off(net,
++ inet->inet_saddr,
++ inet->inet_daddr,
++ inet->inet_sport,
++ usin->sin_port);
+ if (!tp->write_seq)
+- WRITE_ONCE(tp->write_seq,
+- secure_tcp_seq(inet->inet_saddr,
+- inet->inet_daddr,
+- inet->inet_sport,
+- usin->sin_port));
+- WRITE_ONCE(tp->tsoffset,
+- secure_tcp_ts_off(net, inet->inet_saddr,
+- inet->inet_daddr));
++ WRITE_ONCE(tp->write_seq, st.seq);
++ WRITE_ONCE(tp->tsoffset, st.ts_off);
+ }
+
+ atomic_set(&inet->inet_id, get_random_u16());
+@@ -1677,8 +1675,7 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+ .cookie_init_seq = cookie_v4_init_sequence,
+ #endif
+ .route_req = tcp_v4_route_req,
+- .init_seq = tcp_v4_init_seq,
+- .init_ts_off = tcp_v4_init_ts_off,
++ .init_seq_and_ts_off = tcp_v4_init_seq_and_ts_off,
+ .send_synack = tcp_v4_send_synack,
+ };
+
+diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
+index 7e007f013ec82..4f6f0d751d6c5 100644
+--- a/net/ipv6/syncookies.c
++++ b/net/ipv6/syncookies.c
+@@ -151,9 +151,14 @@ static struct request_sock *cookie_tcp_check(struct net *net, struct sock *sk,
+ tcp_parse_options(net, skb, &tcp_opt, 0, NULL);
+
+ if (tcp_opt.saw_tstamp && tcp_opt.rcv_tsecr) {
+- tsoff = secure_tcpv6_ts_off(net,
+- ipv6_hdr(skb)->daddr.s6_addr32,
+- ipv6_hdr(skb)->saddr.s6_addr32);
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcpv6_seq_and_ts_off(net,
++ ipv6_hdr(skb)->daddr.s6_addr32,
++ ipv6_hdr(skb)->saddr.s6_addr32,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
++ tsoff = st.ts_off;
+ tcp_opt.rcv_tsecr -= tsoff;
+ }
+
+diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
+index 9df81f85ec982..ca68ce16bcbe8 100644
+--- a/net/ipv6/tcp_ipv6.c
++++ b/net/ipv6/tcp_ipv6.c
+@@ -104,18 +104,14 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+ }
+ }
+
+-static u32 tcp_v6_init_seq(const struct sk_buff *skb)
++static union tcp_seq_and_ts_off
++tcp_v6_init_seq_and_ts_off(const struct net *net, const struct sk_buff *skb)
+ {
+- return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
+- ipv6_hdr(skb)->saddr.s6_addr32,
+- tcp_hdr(skb)->dest,
+- tcp_hdr(skb)->source);
+-}
+-
+-static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
+-{
+- return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
+- ipv6_hdr(skb)->saddr.s6_addr32);
++ return secure_tcpv6_seq_and_ts_off(net,
++ ipv6_hdr(skb)->daddr.s6_addr32,
++ ipv6_hdr(skb)->saddr.s6_addr32,
++ tcp_hdr(skb)->dest,
++ tcp_hdr(skb)->source);
+ }
+
+ static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
+@@ -318,14 +314,16 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr_unsized *uaddr,
+ sk_set_txhash(sk);
+
+ if (likely(!tp->repair)) {
++ union tcp_seq_and_ts_off st;
++
++ st = secure_tcpv6_seq_and_ts_off(net,
++ np->saddr.s6_addr32,
++ sk->sk_v6_daddr.s6_addr32,
++ inet->inet_sport,
++ inet->inet_dport);
+ if (!tp->write_seq)
+- WRITE_ONCE(tp->write_seq,
+- secure_tcpv6_seq(np->saddr.s6_addr32,
+- sk->sk_v6_daddr.s6_addr32,
+- inet->inet_sport,
+- inet->inet_dport));
+- tp->tsoffset = secure_tcpv6_ts_off(net, np->saddr.s6_addr32,
+- sk->sk_v6_daddr.s6_addr32);
++ WRITE_ONCE(tp->write_seq, st.seq);
++ tp->tsoffset = st.ts_off;
+ }
+
+ if (tcp_fastopen_defer_connect(sk, &err))
+@@ -814,8 +812,7 @@ const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+ .cookie_init_seq = cookie_v6_init_sequence,
+ #endif
+ .route_req = tcp_v6_route_req,
+- .init_seq = tcp_v6_init_seq,
+- .init_ts_off = tcp_v6_init_ts_off,
++ .init_seq_and_ts_off = tcp_v6_init_seq_and_ts_off,
+ .send_synack = tcp_v6_send_synack,
+ };
+
+--
+2.51.0
+
--- /dev/null
+From 55c32b28cd99e82ba21b0aac5608003dfa1214f2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 15:37:49 -0800
+Subject: time/jiffies: Fix sysctl file error on configurations where USER_HZ <
+ HZ
+
+From: Gerd Rausch <gerd.rausch@oracle.com>
+
+[ Upstream commit 6932256d3a3764f3a5e06e2cb8603be45b6a9fef ]
+
+Commit 2dc164a48e6fd ("sysctl: Create converter functions with two new
+macros") incorrectly returns error to user space when jiffies sysctl
+converter is used. The old overflow check got replaced with an
+unconditional one:
+ + if (USER_HZ < HZ)
+ + return -EINVAL;
+which will always be true on configurations with "USER_HZ < HZ".
+
+Remove the check; it is no longer needed as clock_t_to_jiffies() returns
+ULONG_MAX for the overflow case and proc_int_u2k_conv_uop() checks for
+"> INT_MAX" after conversion
+
+Fixes: 2dc164a48e6fd ("sysctl: Create converter functions with two new macros")
+Reported-by: Colm Harrington <colm.harrington@oracle.com>
+Signed-off-by: Gerd Rausch <gerd.rausch@oracle.com>
+Signed-off-by: Joel Granados <joel.granados@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/jiffies.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
+index d31a6d40d38dc..11d09cd8037c5 100644
+--- a/kernel/time/jiffies.c
++++ b/kernel/time/jiffies.c
+@@ -162,8 +162,6 @@ EXPORT_SYMBOL(proc_dointvec_jiffies);
+ int proc_dointvec_userhz_jiffies(const struct ctl_table *table, int dir,
+ void *buffer, size_t *lenp, loff_t *ppos)
+ {
+- if (SYSCTL_USER_TO_KERN(dir) && USER_HZ < HZ)
+- return -EINVAL;
+ return proc_dointvec_conv(table, dir, buffer, lenp, ppos,
+ do_proc_int_conv_userhz_jiffies);
+ }
+--
+2.51.0
+
--- /dev/null
+From 47085d85a17c002dad8e712b98fd01bee2c0b759 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 09:51:35 +0100
+Subject: timekeeping: Fix timex status validation for auxiliary clocks
+
+From: Miroslav Lichvar <mlichvar@redhat.com>
+
+[ Upstream commit e48a869957a70cc39b4090cd27c36a86f8db9b92 ]
+
+The timekeeping_validate_timex() function validates the timex status
+of an auxiliary system clock even when the status is not to be changed,
+which causes unexpected errors for applications that make read-only
+clock_adjtime() calls, or set some other timex fields, but without
+clearing the status field.
+
+Do the AUX-specific status validation only when the modes field contains
+ADJ_STATUS, i.e. the application is actually trying to change the
+status. This makes the AUX-specific clock_adjtime() behavior consistent
+with CLOCK_REALTIME.
+
+Fixes: 4eca49d0b621 ("timekeeping: Prepare do_adtimex() for auxiliary clocks")
+Signed-off-by: Miroslav Lichvar <mlichvar@redhat.com>
+Signed-off-by: Thomas Gleixner <tglx@kernel.org>
+Link: https://patch.msgid.link/20260225085231.276751-1-mlichvar@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/timekeeping.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 91fa2003351c9..c07e562ee4c1a 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -2653,7 +2653,8 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc, bool aux
+
+ if (aux_clock) {
+ /* Auxiliary clocks are similar to TAI and do not have leap seconds */
+- if (txc->status & (STA_INS | STA_DEL))
++ if (txc->modes & ADJ_STATUS &&
++ txc->status & (STA_INS | STA_DEL))
+ return -EINVAL;
+
+ /* No TAI offset setting */
+@@ -2661,7 +2662,8 @@ static int timekeeping_validate_timex(const struct __kernel_timex *txc, bool aux
+ return -EINVAL;
+
+ /* No PPS support either */
+- if (txc->status & (STA_PPSFREQ | STA_PPSTIME))
++ if (txc->modes & ADJ_STATUS &&
++ txc->status & (STA_PPSFREQ | STA_PPSTIME))
+ return -EINVAL;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 2de55cc78cb4eb7b2d2093c40ae9110ba9462697 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 11:33:39 -0800
+Subject: tracing: Add NULL pointer check to trigger_data_free()
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 457965c13f0837a289c9164b842d0860133f6274 ]
+
+If trigger_data_alloc() fails and returns NULL, event_hist_trigger_parse()
+jumps to the out_free error path. While kfree() safely handles a NULL
+pointer, trigger_data_free() does not. This causes a NULL pointer
+dereference in trigger_data_free() when evaluating
+data->cmd_ops->set_filter.
+
+Fix the problem by adding a NULL pointer check to trigger_data_free().
+
+The problem was found by an experimental code review agent based on
+gemini-3.1-pro while reviewing backports into v6.18.y.
+
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Link: https://patch.msgid.link/20260305193339.2810953-1-linux@roeck-us.net
+Fixes: 0550069cc25f ("tracing: Properly process error handling in event_hist_trigger_parse()")
+Assisted-by: Gemini:gemini-3.1-pro
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_events_trigger.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index 06b75bcfc7b8b..871e7a99d03cb 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -50,6 +50,9 @@ static int trigger_kthread_fn(void *ignore)
+
+ void trigger_data_free(struct event_trigger_data *data)
+ {
++ if (!data)
++ return;
++
+ if (data->cmd_ops->set_filter)
+ data->cmd_ops->set_filter(NULL, data, NULL);
+
+--
+2.51.0
+
--- /dev/null
+From 5ad89da81ed0f2880f4c1cd1254050f3f83e9e6f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 03:55:35 +0000
+Subject: udp: Unhash auto-bound connected sk from 4-tuple hash table when
+ disconnected.
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 6996a2d2d0a64808c19c98002aeb5d9d1b2df6a4 ]
+
+Let's say we bind() an UDP socket to the wildcard address with a
+non-zero port, connect() it to an address, and disconnect it from
+the address.
+
+bind() sets SOCK_BINDPORT_LOCK on sk->sk_userlocks (but not
+SOCK_BINDADDR_LOCK), and connect() calls udp_lib_hash4() to put
+the socket into the 4-tuple hash table.
+
+Then, __udp_disconnect() calls sk->sk_prot->rehash(sk).
+
+It computes a new hash based on the wildcard address and moves
+the socket to a new slot in the 4-tuple hash table, leaving a
+garbage in the chain that no packet hits.
+
+Let's remove such a socket from 4-tuple hash table when disconnected.
+
+Note that udp_sk(sk)->udp_portaddr_hash needs to be udpated after
+udp_hash4_dec(hslot2) in udp_unhash4().
+
+Fixes: 78c91ae2c6de ("ipv4/udp: Add 4-tuple hash for connected socket")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20260227035547.3321327-1-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/udp.c | 25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 37258b54a357e..fbdbb65676e0d 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -2268,7 +2268,6 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
+ udp_sk(sk)->udp_port_hash);
+ hslot2 = udp_hashslot2(udptable, udp_sk(sk)->udp_portaddr_hash);
+ nhslot2 = udp_hashslot2(udptable, newhash);
+- udp_sk(sk)->udp_portaddr_hash = newhash;
+
+ if (hslot2 != nhslot2 ||
+ rcu_access_pointer(sk->sk_reuseport_cb)) {
+@@ -2302,19 +2301,25 @@ void udp_lib_rehash(struct sock *sk, u16 newhash, u16 newhash4)
+ if (udp_hashed4(sk)) {
+ spin_lock_bh(&hslot->lock);
+
+- udp_rehash4(udptable, sk, newhash4);
+- if (hslot2 != nhslot2) {
+- spin_lock(&hslot2->lock);
+- udp_hash4_dec(hslot2);
+- spin_unlock(&hslot2->lock);
+-
+- spin_lock(&nhslot2->lock);
+- udp_hash4_inc(nhslot2);
+- spin_unlock(&nhslot2->lock);
++ if (inet_rcv_saddr_any(sk)) {
++ udp_unhash4(udptable, sk);
++ } else {
++ udp_rehash4(udptable, sk, newhash4);
++ if (hslot2 != nhslot2) {
++ spin_lock(&hslot2->lock);
++ udp_hash4_dec(hslot2);
++ spin_unlock(&hslot2->lock);
++
++ spin_lock(&nhslot2->lock);
++ udp_hash4_inc(nhslot2);
++ spin_unlock(&nhslot2->lock);
++ }
+ }
+
+ spin_unlock_bh(&hslot->lock);
+ }
++
++ udp_sk(sk)->udp_portaddr_hash = newhash;
+ }
+ }
+ EXPORT_IPV6_MOD(udp_lib_rehash);
+--
+2.51.0
+
--- /dev/null
+From 7f49b7682c54c6c9eec9c071631e95350178f18b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:24 -0800
+Subject: wifi: cw1200: Fix locking in error paths
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit d98c24617a831e92e7224a07dcaed2dd0b02af96 ]
+
+cw1200_wow_suspend() must only return with priv->conf_mutex locked if it
+returns zero. This mutex must be unlocked if an error is returned. Add
+mutex_unlock() calls to the error paths from which that call is missing.
+This has been detected by the Clang thread-safety analyzer.
+
+Fixes: a910e4a94f69 ("cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-25-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/st/cw1200/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
+index 2002e3f9fe45b..b656afe65db07 100644
+--- a/drivers/net/wireless/st/cw1200/pm.c
++++ b/drivers/net/wireless/st/cw1200/pm.c
+@@ -264,12 +264,14 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EAGAIN;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 3deefb6ecf47c0b9cc758e1a9fde237da361531e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:16 +0100
+Subject: wifi: mt76: Fix possible oob access in
+ mt76_connac2_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 4e10a730d1b511ff49723371ed6d694dd1b2c785 ]
+
+Check frame length before accessing the mgmt fields in
+mt76_connac2_mac_write_txwi_80211 in order to avoid a possible oob
+access.
+
+Fixes: 577dbc6c656d ("mt76: mt7915: enable offloading of sequence number assignment")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-3-b0f6d1ad4850@kernel.org
+[fix check to also cover mgmt->u.action.u.addba_req.capab,
+correct Fixes tag]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index 3304b5971be09..b41ca1410da92 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -413,6 +413,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + 1 + 2 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+--
+2.51.0
+
--- /dev/null
+From ba13e02c4736794f316f2ef8c3188fbdb65e45c7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:15 +0100
+Subject: wifi: mt76: mt7925: Fix possible oob access in
+ mt7925_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit c41a9abd6ae31d130e8f332e7c8800c4c866234b ]
+
+Check frame length before accessing the mgmt fields in
+mt7925_mac_write_txwi_80211 in order to avoid a possible oob access.
+
+Fixes: c948b5da6bbec ("wifi: mt76: mt7925: add Mediatek Wi-Fi7 driver for mt7925 chips")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-2-b0f6d1ad4850@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7925/mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+index 871b67101976a..0d94359004233 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c
+@@ -668,6 +668,7 @@ mt7925_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ tid = MT_TX_ADDBA;
+--
+2.51.0
+
--- /dev/null
+From b8411c52c12a758e53ba34ed8881c892d8fd04a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:14 +0100
+Subject: wifi: mt76: mt7996: Fix possible oob access in
+ mt7996_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 60862846308627e9e15546bb647a00de44deb27b ]
+
+Check frame length before accessing the mgmt fields in
+mt7996_mac_write_txwi_80211 in order to avoid a possible oob access.
+
+Fixes: 98686cd21624c ("wifi: mt76: mt7996: add driver for MediaTek Wi-Fi 7 (802.11be) devices")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-1-b0f6d1ad4850@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 2560e2f46e89a..d4f3ee943b472 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -800,6 +800,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ if (is_mt7990(&dev->mt76))
+--
+2.51.0
+
--- /dev/null
+From 0c3473be00f5e4e3f9f8b4b11fae071eb2c7f522 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Feb 2026 17:28:04 +0100
+Subject: wifi: rsi: Don't default to -EOPNOTSUPP in rsi_mac80211_config
+
+From: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
+
+[ Upstream commit d973b1039ccde6b241b438d53297edce4de45b5c ]
+
+This triggers a WARN_ON in ieee80211_hw_conf_init and isn't the expected
+behavior from the driver - other drivers default to 0 too.
+
+Fixes: 0a44dfc07074 ("wifi: mac80211: simplify non-chanctx drivers")
+Signed-off-by: Sebastian Krzyszkowiak <sebastian.krzyszkowiak@puri.sm>
+Link: https://patch.msgid.link/20260221-rsi-config-ret-v1-1-9a8f805e2f31@puri.sm
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/rsi/rsi_91x_mac80211.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+index 8c8e074a3a705..c7ae8031436ae 100644
+--- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c
++++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c
+@@ -668,7 +668,7 @@ static int rsi_mac80211_config(struct ieee80211_hw *hw,
+ struct rsi_hw *adapter = hw->priv;
+ struct rsi_common *common = adapter->priv;
+ struct ieee80211_conf *conf = &hw->conf;
+- int status = -EOPNOTSUPP;
++ int status = 0;
+
+ mutex_lock(&common->mutex);
+
+--
+2.51.0
+
--- /dev/null
+From 29e57b6e7ef6e0a2ad8927d0fd7b9187657e488e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:25 -0800
+Subject: wifi: wlcore: Fix a locking bug
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 72c6df8f284b3a49812ce2ac136727ace70acc7c ]
+
+Make sure that wl->mutex is locked before it is unlocked. This has been
+detected by the Clang thread-safety analyzer.
+
+Fixes: 45aa7f071b06 ("wlcore: Use generic runtime pm calls for wowlan elp configuration")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-26-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 12f0167d7380e..1f6b906594930 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -1875,6 +1875,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
+
++ mutex_lock(&wl->mutex);
++
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+@@ -1891,8 +1893,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+- mutex_lock(&wl->mutex);
+-
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+--
+2.51.0
+
--- /dev/null
+From 2f44dfd0c0f36d2819af7466a0a59ea48c3c457e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 11:55:40 +0100
+Subject: x86/numa: Store extra copy of numa_nodes_parsed
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 48084cc153a5b0fbf0aa98d47670d3be0b9f64d5 ]
+
+The topology setup code needs to know the total number of physical
+nodes enumerated in SRAT; however NUMA_EMU can cause the existing
+numa_nodes_parsed bitmap to be fictitious. Therefore, keep a copy of
+the bitmap specifically to retain the physical node count.
+
+Suggested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Zhang Rui <rui.zhang@intel.com>
+Tested-by: Chen Yu <yu.c.chen@intel.com>
+Tested-by: Kyle Meyer <kyle.meyer@hpe.com>
+Link: https://patch.msgid.link/20260303110059.889884023@infradead.org
+Stable-dep-of: 528d89a4707e ("x86/topo: Fix SNC topology mess")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/numa.h | 6 ++++++
+ arch/x86/mm/numa.c | 8 ++++++++
+ arch/x86/mm/srat.c | 2 ++
+ 3 files changed, 16 insertions(+)
+
+diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
+index 53ba39ce010cd..a9063f332fa6e 100644
+--- a/arch/x86/include/asm/numa.h
++++ b/arch/x86/include/asm/numa.h
+@@ -22,6 +22,7 @@ extern int numa_off;
+ */
+ extern s16 __apicid_to_node[MAX_LOCAL_APIC];
+ extern nodemask_t numa_nodes_parsed __initdata;
++extern nodemask_t numa_phys_nodes_parsed __initdata;
+
+ static inline void set_apicid_to_node(int apicid, s16 node)
+ {
+@@ -48,6 +49,7 @@ extern void __init init_cpu_to_node(void);
+ extern void numa_add_cpu(unsigned int cpu);
+ extern void numa_remove_cpu(unsigned int cpu);
+ extern void init_gi_nodes(void);
++extern int num_phys_nodes(void);
+ #else /* CONFIG_NUMA */
+ static inline void numa_set_node(int cpu, int node) { }
+ static inline void numa_clear_node(int cpu) { }
+@@ -55,6 +57,10 @@ static inline void init_cpu_to_node(void) { }
+ static inline void numa_add_cpu(unsigned int cpu) { }
+ static inline void numa_remove_cpu(unsigned int cpu) { }
+ static inline void init_gi_nodes(void) { }
++static inline int num_phys_nodes(void)
++{
++ return 1;
++}
+ #endif /* CONFIG_NUMA */
+
+ #ifdef CONFIG_DEBUG_PER_CPU_MAPS
+diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
+index 7a97327140df8..99d0a9332c145 100644
+--- a/arch/x86/mm/numa.c
++++ b/arch/x86/mm/numa.c
+@@ -48,6 +48,8 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] = {
+ [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
+ };
+
++nodemask_t numa_phys_nodes_parsed __initdata;
++
+ int numa_cpu_node(int cpu)
+ {
+ u32 apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
+@@ -57,6 +59,11 @@ int numa_cpu_node(int cpu)
+ return NUMA_NO_NODE;
+ }
+
++int __init num_phys_nodes(void)
++{
++ return bitmap_weight(numa_phys_nodes_parsed.bits, MAX_NUMNODES);
++}
++
+ cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
+ EXPORT_SYMBOL(node_to_cpumask_map);
+
+@@ -210,6 +217,7 @@ static int __init dummy_numa_init(void)
+ 0LLU, PFN_PHYS(max_pfn) - 1);
+
+ node_set(0, numa_nodes_parsed);
++ node_set(0, numa_phys_nodes_parsed);
+ numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
+
+ return 0;
+diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
+index 6f8e0f21c7103..44ca666517561 100644
+--- a/arch/x86/mm/srat.c
++++ b/arch/x86/mm/srat.c
+@@ -57,6 +57,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
+ }
+ set_apicid_to_node(apic_id, node);
+ node_set(node, numa_nodes_parsed);
++ node_set(node, numa_phys_nodes_parsed);
+ pr_debug("SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", pxm, apic_id, node);
+ }
+
+@@ -97,6 +98,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+
+ set_apicid_to_node(apic_id, node);
+ node_set(node, numa_nodes_parsed);
++ node_set(node, numa_phys_nodes_parsed);
+ pr_debug("SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", pxm, apic_id, node);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From e495cb9de51e3f08b75046076e27e1c49f7e18ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 11:55:41 +0100
+Subject: x86/topo: Add topology_num_nodes_per_package()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit ae6730ff42b3a13d94b405edeb5e40108b6d21b6 ]
+
+Use the MADT and SRAT table data to compute __num_nodes_per_package.
+
+Specifically, SRAT has already been parsed in x86_numa_init(), which is called
+before acpi_boot_init() which parses MADT. So both are available in
+topology_init_possible_cpus().
+
+This number is useful to divinate the various Intel CoD/SNC and AMD NPS modes,
+since the platforms are failing to provide this otherwise.
+
+Doing it this way is independent of the number of online CPUs and
+other such shenanigans.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Tony Luck <tony.luck@intel.com>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Zhang Rui <rui.zhang@intel.com>
+Tested-by: Chen Yu <yu.c.chen@intel.com>
+Tested-by: Kyle Meyer <kyle.meyer@hpe.com>
+Link: https://patch.msgid.link/20260303110100.004091624@infradead.org
+Stable-dep-of: 528d89a4707e ("x86/topo: Fix SNC topology mess")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/topology.h | 6 ++++++
+ arch/x86/kernel/cpu/common.c | 3 +++
+ arch/x86/kernel/cpu/topology.c | 13 +++++++++++--
+ 3 files changed, 20 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
+index 1fadf0cf520c5..0ba9bdb998717 100644
+--- a/arch/x86/include/asm/topology.h
++++ b/arch/x86/include/asm/topology.h
+@@ -155,6 +155,7 @@ extern unsigned int __max_logical_packages;
+ extern unsigned int __max_threads_per_core;
+ extern unsigned int __num_threads_per_package;
+ extern unsigned int __num_cores_per_package;
++extern unsigned int __num_nodes_per_package;
+
+ const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c);
+ enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c);
+@@ -179,6 +180,11 @@ static inline unsigned int topology_num_threads_per_package(void)
+ return __num_threads_per_package;
+ }
+
++static inline unsigned int topology_num_nodes_per_package(void)
++{
++ return __num_nodes_per_package;
++}
++
+ #ifdef CONFIG_X86_LOCAL_APIC
+ int topology_get_logical_id(u32 apicid, enum x86_topology_domains at_level);
+ #else
+diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
+index e7ab22fce3b57..5edafdc9680f1 100644
+--- a/arch/x86/kernel/cpu/common.c
++++ b/arch/x86/kernel/cpu/common.c
+@@ -95,6 +95,9 @@ EXPORT_SYMBOL(__max_dies_per_package);
+ unsigned int __max_logical_packages __ro_after_init = 1;
+ EXPORT_SYMBOL(__max_logical_packages);
+
++unsigned int __num_nodes_per_package __ro_after_init = 1;
++EXPORT_SYMBOL(__num_nodes_per_package);
++
+ unsigned int __num_cores_per_package __ro_after_init = 1;
+ EXPORT_SYMBOL(__num_cores_per_package);
+
+diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
+index 23190a786d310..eafcb1fc185ad 100644
+--- a/arch/x86/kernel/cpu/topology.c
++++ b/arch/x86/kernel/cpu/topology.c
+@@ -31,6 +31,7 @@
+ #include <asm/mpspec.h>
+ #include <asm/msr.h>
+ #include <asm/smp.h>
++#include <asm/numa.h>
+
+ #include "cpu.h"
+
+@@ -492,11 +493,19 @@ void __init topology_init_possible_cpus(void)
+ set_nr_cpu_ids(allowed);
+
+ cnta = domain_weight(TOPO_PKG_DOMAIN);
+- cntb = domain_weight(TOPO_DIE_DOMAIN);
+ __max_logical_packages = cnta;
++
++ pr_info("Max. logical packages: %3u\n", __max_logical_packages);
++
++ cntb = num_phys_nodes();
++ __num_nodes_per_package = DIV_ROUND_UP(cntb, cnta);
++
++ pr_info("Max. logical nodes: %3u\n", cntb);
++ pr_info("Num. nodes per package:%3u\n", __num_nodes_per_package);
++
++ cntb = domain_weight(TOPO_DIE_DOMAIN);
+ __max_dies_per_package = 1U << (get_count_order(cntb) - get_count_order(cnta));
+
+- pr_info("Max. logical packages: %3u\n", cnta);
+ pr_info("Max. logical dies: %3u\n", cntb);
+ pr_info("Max. dies per package: %3u\n", __max_dies_per_package);
+
+--
+2.51.0
+
--- /dev/null
+From f941c9529a96202c822e67fe316498a9ddb6c6e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 11:55:43 +0100
+Subject: x86/topo: Fix SNC topology mess
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 528d89a4707e5bfd86e30823c45dbb66877df900 ]
+
+Per 4d6dd05d07d0 ("sched/topology: Fix sched domain build error for GNR, CWF in
+SNC-3 mode"), the original crazy SNC-3 SLIT table was:
+
+node distances:
+node 0 1 2 3 4 5
+ 0: 10 15 17 21 28 26
+ 1: 15 10 15 23 26 23
+ 2: 17 15 10 26 23 21
+ 3: 21 28 26 10 15 17
+ 4: 23 26 23 15 10 15
+ 5: 26 23 21 17 15 10
+
+And per:
+
+ https://lore.kernel.org/lkml/20250825075642.GQ3245006@noisy.programming.kicks-ass.net/
+
+The suggestion was to average the off-trace clusters to restore sanity.
+
+However, 4d6dd05d07d0 implements this under various assumptions:
+
+ - anything GNR/CWF with numa_in_package;
+ - there will never be more than 2 packages;
+ - the off-trace cluster will have distance >20
+
+And then HPE shows up with a machine that matches the
+Vendor-Family-Model checks but looks like this:
+
+Here's an 8 socket (2 chassis) HPE system with SNC enabled:
+
+node 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
+ 0: 10 12 16 16 16 16 18 18 40 40 40 40 40 40 40 40
+ 1: 12 10 16 16 16 16 18 18 40 40 40 40 40 40 40 40
+ 2: 16 16 10 12 18 18 16 16 40 40 40 40 40 40 40 40
+ 3: 16 16 12 10 18 18 16 16 40 40 40 40 40 40 40 40
+ 4: 16 16 18 18 10 12 16 16 40 40 40 40 40 40 40 40
+ 5: 16 16 18 18 12 10 16 16 40 40 40 40 40 40 40 40
+ 6: 18 18 16 16 16 16 10 12 40 40 40 40 40 40 40 40
+ 7: 18 18 16 16 16 16 12 10 40 40 40 40 40 40 40 40
+ 8: 40 40 40 40 40 40 40 40 10 12 16 16 16 16 18 18
+ 9: 40 40 40 40 40 40 40 40 12 10 16 16 16 16 18 18
+ 10: 40 40 40 40 40 40 40 40 16 16 10 12 18 18 16 16
+ 11: 40 40 40 40 40 40 40 40 16 16 12 10 18 18 16 16
+ 12: 40 40 40 40 40 40 40 40 16 16 18 18 10 12 16 16
+ 13: 40 40 40 40 40 40 40 40 16 16 18 18 12 10 16 16
+ 14: 40 40 40 40 40 40 40 40 18 18 16 16 16 16 10 12
+ 15: 40 40 40 40 40 40 40 40 18 18 16 16 16 16 12 10
+
+ 10 = Same chassis and socket
+ 12 = Same chassis and socket (SNC)
+ 16 = Same chassis and adjacent socket
+ 18 = Same chassis and non-adjacent socket
+ 40 = Different chassis
+
+Turns out, the 'max 2 packages' thing is only relevant to the SNC-3 parts, the
+smaller parts do 8 sockets (like usual). The above SLIT table is sane, but
+violates the previous assumptions and trips a WARN.
+
+Now that the topology code has a sensible measure of nodes-per-package, we can
+use that to divinate the SNC mode at hand, and only fix up SNC-3 topologies.
+
+There is a 'healthy' amount of paranoia code validating the assumptions on the
+SLIT table, a simple pr_err(FW_BUG) print on failure and a fallback to using
+the regular table. Lets see how long this lasts :-)
+
+Fixes: 4d6dd05d07d0 ("sched/topology: Fix sched domain build error for GNR, CWF in SNC-3 mode")
+Reported-by: Kyle Meyer <kyle.meyer@hpe.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Zhang Rui <rui.zhang@intel.com>
+Tested-by: Chen Yu <yu.c.chen@intel.com>
+Tested-by: Kyle Meyer <kyle.meyer@hpe.com>
+Link: https://patch.msgid.link/20260303110100.238361290@infradead.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/smpboot.c | 190 ++++++++++++++++++++++++++++----------
+ 1 file changed, 143 insertions(+), 47 deletions(-)
+
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index db3e481cdbb2e..294a8ea602986 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -506,33 +506,149 @@ static void __init build_sched_topology(void)
+ }
+
+ #ifdef CONFIG_NUMA
+-static int sched_avg_remote_distance;
+-static int avg_remote_numa_distance(void)
++/*
++ * Test if the on-trace cluster at (N,N) is symmetric.
++ * Uses upper triangle iteration to avoid obvious duplicates.
++ */
++static bool slit_cluster_symmetric(int N)
+ {
+- int i, j;
+- int distance, nr_remote, total_distance;
+-
+- if (sched_avg_remote_distance > 0)
+- return sched_avg_remote_distance;
+-
+- nr_remote = 0;
+- total_distance = 0;
+- for_each_node_state(i, N_CPU) {
+- for_each_node_state(j, N_CPU) {
+- distance = node_distance(i, j);
+-
+- if (distance >= REMOTE_DISTANCE) {
+- nr_remote++;
+- total_distance += distance;
+- }
++ int u = topology_num_nodes_per_package();
++
++ for (int k = 0; k < u; k++) {
++ for (int l = k; l < u; l++) {
++ if (node_distance(N + k, N + l) !=
++ node_distance(N + l, N + k))
++ return false;
+ }
+ }
+- if (nr_remote)
+- sched_avg_remote_distance = total_distance / nr_remote;
+- else
+- sched_avg_remote_distance = REMOTE_DISTANCE;
+
+- return sched_avg_remote_distance;
++ return true;
++}
++
++/*
++ * Return the package-id of the cluster, or ~0 if indeterminate.
++ * Each node in the on-trace cluster should have the same package-id.
++ */
++static u32 slit_cluster_package(int N)
++{
++ int u = topology_num_nodes_per_package();
++ u32 pkg_id = ~0;
++
++ for (int n = 0; n < u; n++) {
++ const struct cpumask *cpus = cpumask_of_node(N + n);
++ int cpu;
++
++ for_each_cpu(cpu, cpus) {
++ u32 id = topology_logical_package_id(cpu);
++
++ if (pkg_id == ~0)
++ pkg_id = id;
++ if (pkg_id != id)
++ return ~0;
++ }
++ }
++
++ return pkg_id;
++}
++
++/*
++ * Validate the SLIT table is of the form expected for SNC, specifically:
++ *
++ * - each on-trace cluster should be symmetric,
++ * - each on-trace cluster should have a unique package-id.
++ *
++ * If you NUMA_EMU on top of SNC, you get to keep the pieces.
++ */
++static bool slit_validate(void)
++{
++ int u = topology_num_nodes_per_package();
++ u32 pkg_id, prev_pkg_id = ~0;
++
++ for (int pkg = 0; pkg < topology_max_packages(); pkg++) {
++ int n = pkg * u;
++
++ /*
++ * Ensure the on-trace cluster is symmetric and each cluster
++ * has a different package id.
++ */
++ if (!slit_cluster_symmetric(n))
++ return false;
++ pkg_id = slit_cluster_package(n);
++ if (pkg_id == ~0)
++ return false;
++ if (pkg && pkg_id == prev_pkg_id)
++ return false;
++
++ prev_pkg_id = pkg_id;
++ }
++
++ return true;
++}
++
++/*
++ * Compute a sanitized SLIT table for SNC; notably SNC-3 can end up with
++ * asymmetric off-trace clusters, reflecting physical assymmetries. However
++ * this leads to 'unfortunate' sched_domain configurations.
++ *
++ * For example dual socket GNR with SNC-3:
++ *
++ * node distances:
++ * node 0 1 2 3 4 5
++ * 0: 10 15 17 21 28 26
++ * 1: 15 10 15 23 26 23
++ * 2: 17 15 10 26 23 21
++ * 3: 21 28 26 10 15 17
++ * 4: 23 26 23 15 10 15
++ * 5: 26 23 21 17 15 10
++ *
++ * Fix things up by averaging out the off-trace clusters; resulting in:
++ *
++ * node 0 1 2 3 4 5
++ * 0: 10 15 17 24 24 24
++ * 1: 15 10 15 24 24 24
++ * 2: 17 15 10 24 24 24
++ * 3: 24 24 24 10 15 17
++ * 4: 24 24 24 15 10 15
++ * 5: 24 24 24 17 15 10
++ */
++static int slit_cluster_distance(int i, int j)
++{
++ static int slit_valid = -1;
++ int u = topology_num_nodes_per_package();
++ long d = 0;
++ int x, y;
++
++ if (slit_valid < 0) {
++ slit_valid = slit_validate();
++ if (!slit_valid)
++ pr_err(FW_BUG "SLIT table doesn't have the expected form for SNC -- fixup disabled!\n");
++ else
++ pr_info("Fixing up SNC SLIT table.\n");
++ }
++
++ /*
++ * Is this a unit cluster on the trace?
++ */
++ if ((i / u) == (j / u) || !slit_valid)
++ return node_distance(i, j);
++
++ /*
++ * Off-trace cluster.
++ *
++ * Notably average out the symmetric pair of off-trace clusters to
++ * ensure the resulting SLIT table is symmetric.
++ */
++ x = i - (i % u);
++ y = j - (j % u);
++
++ for (i = x; i < x + u; i++) {
++ for (j = y; j < y + u; j++) {
++ d += node_distance(i, j);
++ d += node_distance(j, i);
++ }
++ }
++
++ return d / (2*u*u);
+ }
+
+ int arch_sched_node_distance(int from, int to)
+@@ -542,34 +658,14 @@ int arch_sched_node_distance(int from, int to)
+ switch (boot_cpu_data.x86_vfm) {
+ case INTEL_GRANITERAPIDS_X:
+ case INTEL_ATOM_DARKMONT_X:
+-
+- if (topology_max_packages() == 1 || topology_num_nodes_per_package() == 1 ||
+- d < REMOTE_DISTANCE)
++ if (topology_max_packages() == 1 ||
++ topology_num_nodes_per_package() < 3)
+ return d;
+
+ /*
+- * With SNC enabled, there could be too many levels of remote
+- * NUMA node distances, creating NUMA domain levels
+- * including local nodes and partial remote nodes.
+- *
+- * Trim finer distance tuning for NUMA nodes in remote package
+- * for the purpose of building sched domains. Group NUMA nodes
+- * in the remote package in the same sched group.
+- * Simplify NUMA domains and avoid extra NUMA levels including
+- * different remote NUMA nodes and local nodes.
+- *
+- * GNR and CWF don't expect systems with more than 2 packages
+- * and more than 2 hops between packages. Single average remote
+- * distance won't be appropriate if there are more than 2
+- * packages as average distance to different remote packages
+- * could be different.
++ * Handle SNC-3 asymmetries.
+ */
+- WARN_ONCE(topology_max_packages() > 2,
+- "sched: Expect only up to 2 packages for GNR or CWF, "
+- "but saw %d packages when building sched domains.",
+- topology_max_packages());
+-
+- d = avg_remote_numa_distance();
++ return slit_cluster_distance(from, to);
+ }
+ return d;
+ }
+--
+2.51.0
+
--- /dev/null
+From 411d6670d45f05bc28e8167118d21484d399e9bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 11:55:42 +0100
+Subject: x86/topo: Replace x86_has_numa_in_package
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+[ Upstream commit 717b64d58cff6fb97f97be07e382ed7641167a56 ]
+
+.. with the brand spanking new topology_num_nodes_per_package().
+
+Having the topology setup determine this value during MADT/SRAT parsing before
+SMP bringup avoids having to detect this situation when building the SMP
+topology masks.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Ingo Molnar <mingo@kernel.org>
+Tested-by: Tony Luck <tony.luck@intel.com>
+Tested-by: K Prateek Nayak <kprateek.nayak@amd.com>
+Tested-by: Zhang Rui <rui.zhang@intel.com>
+Tested-by: Chen Yu <yu.c.chen@intel.com>
+Tested-by: Kyle Meyer <kyle.meyer@hpe.com>
+Link: https://patch.msgid.link/20260303110100.123701837@infradead.org
+Stable-dep-of: 528d89a4707e ("x86/topo: Fix SNC topology mess")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kernel/smpboot.c | 13 +++----------
+ 1 file changed, 3 insertions(+), 10 deletions(-)
+
+diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
+index 5cd6950ab672a..db3e481cdbb2e 100644
+--- a/arch/x86/kernel/smpboot.c
++++ b/arch/x86/kernel/smpboot.c
+@@ -468,13 +468,6 @@ static int x86_cluster_flags(void)
+ }
+ #endif
+
+-/*
+- * Set if a package/die has multiple NUMA nodes inside.
+- * AMD Magny-Cours, Intel Cluster-on-Die, and Intel
+- * Sub-NUMA Clustering have this.
+- */
+-static bool x86_has_numa_in_package;
+-
+ static struct sched_domain_topology_level x86_topology[] = {
+ SDTL_INIT(tl_smt_mask, cpu_smt_flags, SMT),
+ #ifdef CONFIG_SCHED_CLUSTER
+@@ -496,7 +489,7 @@ static void __init build_sched_topology(void)
+ * PKG domain since the NUMA domains will auto-magically create the
+ * right spanning domains based on the SLIT.
+ */
+- if (x86_has_numa_in_package) {
++ if (topology_num_nodes_per_package() > 1) {
+ unsigned int pkgdom = ARRAY_SIZE(x86_topology) - 2;
+
+ memset(&x86_topology[pkgdom], 0, sizeof(x86_topology[pkgdom]));
+@@ -550,7 +543,7 @@ int arch_sched_node_distance(int from, int to)
+ case INTEL_GRANITERAPIDS_X:
+ case INTEL_ATOM_DARKMONT_X:
+
+- if (!x86_has_numa_in_package || topology_max_packages() == 1 ||
++ if (topology_max_packages() == 1 || topology_num_nodes_per_package() == 1 ||
+ d < REMOTE_DISTANCE)
+ return d;
+
+@@ -606,7 +599,7 @@ void set_cpu_sibling_map(int cpu)
+ o = &cpu_data(i);
+
+ if (match_pkg(c, o) && !topology_same_node(c, o))
+- x86_has_numa_in_package = true;
++ WARN_ON_ONCE(topology_num_nodes_per_package() == 1);
+
+ if ((i == cpu) || (has_smt && match_smt(c, o)))
+ link_mask(topology_sibling_cpumask, cpu, i);
+--
+2.51.0
+
--- /dev/null
+From 8fb5d2d2d779ba28764dd9923753ace09d5c0640 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:50 +0100
+Subject: xdp: produce a warning when calculated tailroom is negative
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8821e857759be9db3cde337ad328b71fe5c8a55f ]
+
+Many ethernet drivers report xdp Rx queue frag size as being the same as
+DMA write size. However, the only user of this field, namely
+bpf_xdp_frags_increase_tail(), clearly expects a truesize.
+
+Such difference leads to unspecific memory corruption issues under certain
+circumstances, e.g. in ixgbevf maximum DMA write size is 3 KB, so when
+running xskxceiver's XDP_ADJUST_TAIL_GROW_MULTI_BUFF, 6K packet fully uses
+all DMA-writable space in 2 buffers. This would be fine, if only
+rxq->frag_size was properly set to 4K, but value of 3K results in a
+negative tailroom, because there is a non-zero page offset.
+
+We are supposed to return -EINVAL and be done with it in such case, but due
+to tailroom being stored as an unsigned int, it is reported to be somewhere
+near UINT_MAX, resulting in a tail being grown, even if the requested
+offset is too much (it is around 2K in the abovementioned test). This later
+leads to all kinds of unspecific calltraces.
+
+[ 7340.337579] xskxceiver[1440]: segfault at 1da718 ip 00007f4161aeac9d sp 00007f41615a6a00 error 6
+[ 7340.338040] xskxceiver[1441]: segfault at 7f410000000b ip 00000000004042b5 sp 00007f415bffecf0 error 4
+[ 7340.338179] in libc.so.6[61c9d,7f4161aaf000+160000]
+[ 7340.339230] in xskxceiver[42b5,400000+69000]
+[ 7340.340300] likely on CPU 6 (core 0, socket 6)
+[ 7340.340302] Code: ff ff 01 e9 f4 fe ff ff 0f 1f 44 00 00 4c 39 f0 74 73 31 c0 ba 01 00 00 00 f0 0f b1 17 0f 85 ba 00 00 00 49 8b 87 88 00 00 00 <4c> 89 70 08 eb cc 0f 1f 44 00 00 48 8d bd f0 fe ff ff 89 85 ec fe
+[ 7340.340888] likely on CPU 3 (core 0, socket 3)
+[ 7340.345088] Code: 00 00 00 ba 00 00 00 00 be 00 00 00 00 89 c7 e8 31 ca ff ff 89 45 ec 8b 45 ec 85 c0 78 07 b8 00 00 00 00 eb 46 e8 0b c8 ff ff <8b> 00 83 f8 69 74 24 e8 ff c7 ff ff 8b 00 83 f8 0b 74 18 e8 f3 c7
+[ 7340.404334] Oops: general protection fault, probably for non-canonical address 0x6d255010bdffc: 0000 [#1] SMP NOPTI
+[ 7340.405972] CPU: 7 UID: 0 PID: 1439 Comm: xskxceiver Not tainted 6.19.0-rc1+ #21 PREEMPT(lazy)
+[ 7340.408006] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-5.fc42 04/01/2014
+[ 7340.409716] RIP: 0010:lookup_swap_cgroup_id+0x44/0x80
+[ 7340.410455] Code: 83 f8 1c 73 39 48 ba ff ff ff ff ff ff ff 03 48 8b 04 c5 20 55 fa bd 48 21 d1 48 89 ca 83 e1 01 48 d1 ea c1 e1 04 48 8d 04 90 <8b> 00 48 83 c4 10 d3 e8 c3 cc cc cc cc 31 c0 e9 98 b7 dd 00 48 89
+[ 7340.412787] RSP: 0018:ffffcc5c04f7f6d0 EFLAGS: 00010202
+[ 7340.413494] RAX: 0006d255010bdffc RBX: ffff891f477895a8 RCX: 0000000000000010
+[ 7340.414431] RDX: 0001c17e3fffffff RSI: 00fa070000000000 RDI: 000382fc7fffffff
+[ 7340.415354] RBP: 00fa070000000000 R08: ffffcc5c04f7f8f8 R09: ffffcc5c04f7f7d0
+[ 7340.416283] R10: ffff891f4c1a7000 R11: ffffcc5c04f7f9c8 R12: ffffcc5c04f7f7d0
+[ 7340.417218] R13: 03ffffffffffffff R14: 00fa06fffffffe00 R15: ffff891f47789500
+[ 7340.418229] FS: 0000000000000000(0000) GS:ffff891ffdfaa000(0000) knlGS:0000000000000000
+[ 7340.419489] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 7340.420286] CR2: 00007f415bfffd58 CR3: 0000000103f03002 CR4: 0000000000772ef0
+[ 7340.421237] PKRU: 55555554
+[ 7340.421623] Call Trace:
+[ 7340.421987] <TASK>
+[ 7340.422309] ? softleaf_from_pte+0x77/0xa0
+[ 7340.422855] swap_pte_batch+0xa7/0x290
+[ 7340.423363] zap_nonpresent_ptes.constprop.0.isra.0+0xd1/0x270
+[ 7340.424102] zap_pte_range+0x281/0x580
+[ 7340.424607] zap_pmd_range.isra.0+0xc9/0x240
+[ 7340.425177] unmap_page_range+0x24d/0x420
+[ 7340.425714] unmap_vmas+0xa1/0x180
+[ 7340.426185] exit_mmap+0xe1/0x3b0
+[ 7340.426644] __mmput+0x41/0x150
+[ 7340.427098] exit_mm+0xb1/0x110
+[ 7340.427539] do_exit+0x1b2/0x460
+[ 7340.427992] do_group_exit+0x2d/0xc0
+[ 7340.428477] get_signal+0x79d/0x7e0
+[ 7340.428957] arch_do_signal_or_restart+0x34/0x100
+[ 7340.429571] exit_to_user_mode_loop+0x8e/0x4c0
+[ 7340.430159] do_syscall_64+0x188/0x6b0
+[ 7340.430672] ? __do_sys_clone3+0xd9/0x120
+[ 7340.431212] ? switch_fpu_return+0x4e/0xd0
+[ 7340.431761] ? arch_exit_to_user_mode_prepare.isra.0+0xa1/0xc0
+[ 7340.432498] ? do_syscall_64+0xbb/0x6b0
+[ 7340.433015] ? __handle_mm_fault+0x445/0x690
+[ 7340.433582] ? count_memcg_events+0xd6/0x210
+[ 7340.434151] ? handle_mm_fault+0x212/0x340
+[ 7340.434697] ? do_user_addr_fault+0x2b4/0x7b0
+[ 7340.435271] ? clear_bhb_loop+0x30/0x80
+[ 7340.435788] ? clear_bhb_loop+0x30/0x80
+[ 7340.436299] ? clear_bhb_loop+0x30/0x80
+[ 7340.436812] ? clear_bhb_loop+0x30/0x80
+[ 7340.437323] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 7340.437973] RIP: 0033:0x7f4161b14169
+[ 7340.438468] Code: Unable to access opcode bytes at 0x7f4161b1413f.
+[ 7340.439242] RSP: 002b:00007ffc6ebfa770 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca
+[ 7340.440173] RAX: fffffffffffffe00 RBX: 00000000000005a1 RCX: 00007f4161b14169
+[ 7340.441061] RDX: 00000000000005a1 RSI: 0000000000000109 RDI: 00007f415bfff990
+[ 7340.441943] RBP: 00007ffc6ebfa7a0 R08: 0000000000000000 R09: 00000000ffffffff
+[ 7340.442824] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+[ 7340.443707] R13: 0000000000000000 R14: 00007f415bfff990 R15: 00007f415bfff6c0
+[ 7340.444586] </TASK>
+[ 7340.444922] Modules linked in: rfkill intel_rapl_msr intel_rapl_common intel_uncore_frequency_common skx_edac_common nfit libnvdimm kvm_intel vfat fat kvm snd_pcm irqbypass rapl iTCO_wdt snd_timer intel_pmc_bxt iTCO_vendor_support snd ixgbevf virtio_net soundcore i2c_i801 pcspkr libeth_xdp net_failover i2c_smbus lpc_ich failover libeth virtio_balloon joydev 9p fuse loop zram lz4hc_compress lz4_compress 9pnet_virtio 9pnet netfs ghash_clmulni_intel serio_raw qemu_fw_cfg
+[ 7340.449650] ---[ end trace 0000000000000000 ]---
+
+The issue can be fixed in all in-tree drivers, but we cannot just trust OOT
+drivers to not do this. Therefore, make tailroom a signed int and produce a
+warning when it is negative to prevent such mistakes in the future.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-10-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index f82996e63dd72..8bbf24c15413e 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4151,13 +4151,14 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+ struct xdp_rxq_info *rxq = xdp->rxq;
+- unsigned int tailroom;
++ int tailroom;
+
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+ tailroom = rxq->frag_size - skb_frag_size(frag) -
+ skb_frag_off(frag) % rxq->frag_size;
++ WARN_ON_ONCE(tailroom < 0);
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From fbe5a019ab4cf26c84dc16dd775be75c2cb383bc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:42 +0100
+Subject: xdp: use modulo operation to calculate XDP frag tailroom
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 88b6b7f7b216108a09887b074395fa7b751880b1 ]
+
+The current formula for calculating XDP tailroom in mbuf packets works only
+if each frag has its own page (if rxq->frag_size is PAGE_SIZE), this
+defeats the purpose of the parameter overall and without any indication
+leads to negative calculated tailroom on at least half of frags, if shared
+pages are used.
+
+There are not many drivers that set rxq->frag_size. Among them:
+* i40e and enetc always split page uniformly between frags, use shared
+ pages
+* ice uses page_pool frags via libeth, those are power-of-2 and uniformly
+ distributed across page
+* idpf has variable frag_size with XDP on, so current API is not applicable
+* mlx5, mtk and mvneta use PAGE_SIZE or 0 as frag_size for page_pool
+
+As for AF_XDP ZC, only ice, i40e and idpf declare frag_size for it. Modulo
+operation yields good results for aligned chunks, they are all power-of-2,
+between 2K and PAGE_SIZE. Formula without modulo fails when chunk_size is
+2K. Buffers in unaligned mode are not distributed uniformly, so modulo
+operation would not work.
+
+To accommodate unaligned buffers, we could define frag_size as
+data + tailroom, and hence do not subtract offset when calculating
+tailroom, but this would necessitate more changes in the drivers.
+
+Define rxq->frag_size as an even portion of a page that fully belongs to a
+single frag. When calculating tailroom, locate the data start within such
+portion by performing a modulo operation on page offset.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-2-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 51318cb40f778..f82996e63dd72 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4156,7 +4156,8 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+- tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag);
++ tailroom = rxq->frag_size - skb_frag_size(frag) -
++ skb_frag_off(frag) % rxq->frag_size;
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From 02840765b9a96de428cfb15ae6db8311f882efee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 09:37:11 +0000
+Subject: xen/acpi-processor: fix _CST detection using undersized evaluation
+ buffer
+
+From: David Thomson <dt@linux-mail.net>
+
+[ Upstream commit 8b57227d59a86fc06d4f09de08f98133680f2cae ]
+
+read_acpi_id() attempts to evaluate _CST using a stack buffer of
+sizeof(union acpi_object) (48 bytes), but _CST returns a nested Package
+of sub-Packages (one per C-state, each containing a register descriptor,
+type, latency, and power) requiring hundreds of bytes. The evaluation
+always fails with AE_BUFFER_OVERFLOW.
+
+On modern systems using FFH/MWAIT entry (where pblk is zero), this
+causes the function to return before setting the acpi_id_cst_present
+bit. In check_acpi_ids(), flags.power is then zero for all Phase 2 CPUs
+(physical CPUs beyond dom0's vCPU count), so push_cxx_to_hypervisor() is
+never called for them.
+
+On a system with dom0_max_vcpus=2 and 8 physical CPUs, only PCPUs 0-1
+receive C-state data. PCPUs 2-7 are stuck in C0/C1 idle, unable to
+enter C2/C3. This costs measurable wall power (4W observed on an Intel
+Core Ultra 7 265K with Xen 4.20).
+
+The function never uses the _CST return value -- it only needs to know
+whether _CST exists. Replace the broken acpi_evaluate_object() call with
+acpi_has_method(), which correctly detects _CST presence using
+acpi_get_handle() without any buffer allocation. This brings C-state
+detection to parity with the P-state path, which already works correctly
+for Phase 2 CPUs.
+
+Fixes: 59a568029181 ("xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.")
+Signed-off-by: David Thomson <dt@linux-mail.net>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260224093707.19679-1-dt@linux-mail.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xen-acpi-processor.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index f2e8eaf684ba6..8d1860bd5d578 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -379,11 +379,8 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ acpi_psd[acpi_id].domain);
+ }
+
+- status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- if (!pblk)
+- return AE_OK;
+- }
++ if (!pblk && !acpi_has_method(handle, "_CST"))
++ return AE_OK;
+ /* .. and it has a C-state */
+ __set_bit(acpi_id, acpi_id_cst_present);
+
+--
+2.51.0
+
--- /dev/null
+From 05241e959ffdbe6d7b52d4d074e47f2b65a2bb77 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:26 +0000
+Subject: xsk: Fix fragment node deletion to prevent buffer leak
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit 60abb0ac11dccd6b98fd9182bc5f85b621688861 ]
+
+After commit b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node"),
+the list_node field is reused for both the xskb pool list and the buffer
+free list, this causes a buffer leak as described below.
+
+xp_free() checks if a buffer is already on the free list using
+list_empty(&xskb->list_node). When list_del() is used to remove a node
+from the xskb pool list, it doesn't reinitialize the node pointers.
+This means list_empty() will return false even after the node has been
+removed, causing xp_free() to incorrectly skip adding the buffer to the
+free list.
+
+Fix this by using list_del_init() instead of list_del() in all fragment
+handling paths, this ensures the list node is reinitialized after removal,
+allowing the list_empty() to work correctly.
+
+Fixes: b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node")
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-2-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 6 +++---
+ net/xdp/xsk.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 242e34f771cca..aefc368449d59 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -122,7 +122,7 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ goto out;
+
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ xp_free(pos);
+ }
+
+@@ -157,7 +157,7 @@ static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+ struct xdp_buff_xsk, list_node);
+ if (frag) {
+- list_del(&frag->list_node);
++ list_del_init(&frag->list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -168,7 +168,7 @@ static inline void xsk_buff_del_frag(struct xdp_buff *xdp)
+ {
+ struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->list_node);
++ list_del_init(&xskb->list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_head(struct xdp_buff *first)
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index f093c3453f64c..f2ec4f78bbb6a 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -186,7 +186,7 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ }
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 1b545e20574c0656a856e9359239788a7e169b84 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:27 +0000
+Subject: xsk: Fix zero-copy AF_XDP fragment drop
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit f7387d6579d65efd490a864254101cb665f2e7a7 ]
+
+AF_XDP should ensure that only a complete packet is sent to application.
+In the zero-copy case, if the Rx queue gets full as fragments are being
+enqueued, the remaining fragments are dropped.
+
+For the multi-buffer case, add a check to ensure that the Rx queue has
+enough space for all fragments of a packet before starting to enqueue
+them.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-3-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xdp/xsk.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index f2ec4f78bbb6a..a6d3938154f21 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -167,25 +167,31 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ struct xdp_buff_xsk *pos, *tmp;
+ struct list_head *xskb_list;
+ u32 contd = 0;
++ u32 num_desc;
+ int err;
+
+- if (frags)
+- contd = XDP_PKT_CONTD;
++ if (likely(!frags)) {
++ err = __xsk_rcv_zc(xs, xskb, len, contd);
++ if (err)
++ goto err;
++ return 0;
++ }
+
+- err = __xsk_rcv_zc(xs, xskb, len, contd);
+- if (err)
++ contd = XDP_PKT_CONTD;
++ num_desc = xdp_get_shared_info_from_buff(xdp)->nr_frags + 1;
++ if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
++ xs->rx_queue_full++;
++ err = -ENOBUFS;
+ goto err;
+- if (likely(!frags))
+- return 0;
++ }
+
++ __xsk_rcv_zc(xs, xskb, len, contd);
+ xskb_list = &xskb->pool->xskb_list;
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+- err = __xsk_rcv_zc(xs, pos, len, contd);
+- if (err)
+- goto err;
++ __xsk_rcv_zc(xs, pos, len, contd);
+ list_del_init(&pos->list_node);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 2d97b46c7da3922c3f3d72efc3464397cb060614 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:43 +0100
+Subject: xsk: introduce helper to determine rxq->frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 16394d80539937d348dd3b9ea32415c54e67a81b ]
+
+rxq->frag_size is basically a step between consecutive strictly aligned
+frames. In ZC mode, chunk size fits exactly, but if chunks are unaligned,
+there is no safe way to determine accessible space to grow tailroom.
+
+Report frag_size to be zero, if chunks are unaligned, chunk_size otherwise.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-3-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index aefc368449d59..6b9ebae2dc952 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -51,6 +51,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+@@ -337,6 +342,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return 0;
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return 0;
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+--
+2.51.0
+
--- /dev/null
+From 3b5371d39dd32411e1093c74c9ecc1320b8611df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 22:37:53 +0530
+Subject: amd-xgbe: fix MAC_TCR_SS register width for 2.5G and 10M speeds
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit 9439a661c2e80485406ce2c90b107ca17858382d ]
+
+Extend the MAC_TCR_SS (Speed Select) register field width from 2 bits
+to 3 bits to properly support all speed settings.
+
+The MAC_TCR register's SS field encoding requires 3 bits to represent
+all supported speeds:
+ - 0x00: 10Gbps (XGMII)
+ - 0x02: 2.5Gbps (GMII) / 100Mbps
+ - 0x03: 1Gbps / 10Mbps
+ - 0x06: 2.5Gbps (XGMII) - P100a only
+
+With only 2 bits, values 0x04-0x07 cannot be represented, which breaks
+2.5G XGMII mode on newer platforms and causes incorrect speed select
+values to be programmed.
+
+Fixes: 07445f3c7ca1 ("amd-xgbe: Add support for 10 Mbps speed")
+Co-developed-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Guruvendra Punugupati <Guruvendra.Punugupati@amd.com>
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260226170753.250312-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-common.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+index aa25a8a0a106f..d99d2295eab0f 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+@@ -514,7 +514,7 @@
+ #define MAC_SSIR_SSINC_INDEX 16
+ #define MAC_SSIR_SSINC_WIDTH 8
+ #define MAC_TCR_SS_INDEX 29
+-#define MAC_TCR_SS_WIDTH 2
++#define MAC_TCR_SS_WIDTH 3
+ #define MAC_TCR_TE_INDEX 0
+ #define MAC_TCR_TE_WIDTH 1
+ #define MAC_TCR_VNE_INDEX 24
+--
+2.51.0
+
--- /dev/null
+From 7f2b708defddae9353d9aebfdc6eaa29583331cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 09:51:24 +0530
+Subject: amd-xgbe: fix sleep while atomic on suspend/resume
+
+From: Raju Rangoju <Raju.Rangoju@amd.com>
+
+[ Upstream commit e2f27363aa6d983504c6836dd0975535e2e9dba0 ]
+
+The xgbe_powerdown() and xgbe_powerup() functions use spinlocks
+(spin_lock_irqsave) while calling functions that may sleep:
+- napi_disable() can sleep waiting for NAPI polling to complete
+- flush_workqueue() can sleep waiting for pending work items
+
+This causes a "BUG: scheduling while atomic" error during suspend/resume
+cycles on systems using the AMD XGBE Ethernet controller.
+
+The spinlock protection in these functions is unnecessary as these
+functions are called from suspend/resume paths which are already serialized
+by the PM core
+
+Fix this by removing the spinlock. Since only code that takes this lock
+is xgbe_powerdown() and xgbe_powerup(), remove it completely.
+
+Fixes: c5aa9e3b8156 ("amd-xgbe: Initial AMD 10GbE platform driver")
+Signed-off-by: Raju Rangoju <Raju.Rangoju@amd.com>
+Link: https://patch.msgid.link/20260302042124.1386445-1-Raju.Rangoju@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 10 ----------
+ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 -
+ drivers/net/ethernet/amd/xgbe/xgbe.h | 3 ---
+ 3 files changed, 14 deletions(-)
+
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+index 3d6f8f3a83366..256969ac2cb9e 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+@@ -1181,7 +1181,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+@@ -1192,8 +1191,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+@@ -1209,8 +1206,6 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+
+ pdata->power_down = 1;
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+@@ -1220,7 +1215,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ {
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+- unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+@@ -1231,8 +1225,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+ return -EINVAL;
+ }
+
+- spin_lock_irqsave(&pdata->lock, flags);
+-
+ pdata->power_down = 0;
+
+ xgbe_napi_enable(pdata, 0);
+@@ -1247,8 +1239,6 @@ int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+
+ xgbe_start_timers(pdata);
+
+- spin_unlock_irqrestore(&pdata->lock, flags);
+-
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+index 0e8698928e4d7..6e8fafb2acbaa 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c
++++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
+@@ -185,7 +185,6 @@ struct xgbe_prv_data *xgbe_alloc_pdata(struct device *dev)
+ pdata->netdev = netdev;
+ pdata->dev = dev;
+
+- spin_lock_init(&pdata->lock);
+ spin_lock_init(&pdata->xpcs_lock);
+ mutex_init(&pdata->rss_mutex);
+ spin_lock_init(&pdata->tstamp_lock);
+diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
+index a596cd08124fa..82a88d0c15e31 100644
+--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
++++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
+@@ -1083,9 +1083,6 @@ struct xgbe_prv_data {
+ unsigned int pp3;
+ unsigned int pp4;
+
+- /* Overall device lock */
+- spinlock_t lock;
+-
+ /* XPCS indirect addressing lock */
+ spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+--
+2.51.0
+
--- /dev/null
+From eb80da91a8d4573fba2d9ea97e4cf14a0d6938ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 20:32:40 +0800
+Subject: atm: lec: fix null-ptr-deref in lec_arp_clear_vccs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 101bacb303e89dc2e0640ae6a5e0fb97c4eb45bb ]
+
+syzkaller reported a null-ptr-deref in lec_arp_clear_vccs().
+This issue can be easily reproduced using the syzkaller reproducer.
+
+In the ATM LANE (LAN Emulation) module, the same atm_vcc can be shared by
+multiple lec_arp_table entries (e.g., via entry->vcc or entry->recv_vcc).
+When the underlying VCC is closed, lec_vcc_close() iterates over all
+ARP entries and calls lec_arp_clear_vccs() for each matched entry.
+
+For example, when lec_vcc_close() iterates through the hlists in
+priv->lec_arp_empty_ones or other ARP tables:
+
+1. In the first iteration, for the first matched ARP entry sharing the VCC,
+lec_arp_clear_vccs() frees the associated vpriv (which is vcc->user_back)
+and sets vcc->user_back to NULL.
+2. In the second iteration, for the next matched ARP entry sharing the same
+VCC, lec_arp_clear_vccs() is called again. It obtains a NULL vpriv from
+vcc->user_back (via LEC_VCC_PRIV(vcc)) and then attempts to dereference it
+via `vcc->pop = vpriv->old_pop`, leading to a null-ptr-deref crash.
+
+Fix this by adding a null check for vpriv before dereferencing
+it. If vpriv is already NULL, it means the VCC has been cleared
+by a previous call, so we can safely skip the cleanup and just
+clear the entry's vcc/recv_vcc pointers.
+
+The entire cleanup block (including vcc_release_async()) is placed inside
+the vpriv guard because a NULL vpriv indicates the VCC has already been
+fully released by a prior iteration — repeating the teardown would
+redundantly set flags and trigger callbacks on an already-closing socket.
+
+The Fixes tag points to the initial commit because the entry->vcc path has
+been vulnerable since the original code. The entry->recv_vcc path was later
+added by commit 8d9f73c0ad2f ("atm: fix a memory leak of vcc->user_back")
+with the same pattern, and both paths are fixed here.
+
+Reported-by: syzbot+72e3ea390c305de0e259@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/68c95a83.050a0220.3c6139.0e5c.GAE@google.com/T/
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260225123250.189289-1-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 26 +++++++++++++++-----------
+ 1 file changed, 15 insertions(+), 11 deletions(-)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index b7fa48a9b7205..0d4b8e5936dcf 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1260,24 +1260,28 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+ struct net_device *dev = (struct net_device *)vcc->proto_data;
+
+- vcc->pop = vpriv->old_pop;
+- if (vpriv->xoff)
+- netif_wake_queue(dev);
+- kfree(vpriv);
+- vcc->user_back = NULL;
+- vcc->push = entry->old_push;
+- vcc_release_async(vcc, -EPIPE);
++ if (vpriv) {
++ vcc->pop = vpriv->old_pop;
++ if (vpriv->xoff)
++ netif_wake_queue(dev);
++ kfree(vpriv);
++ vcc->user_back = NULL;
++ vcc->push = entry->old_push;
++ vcc_release_async(vcc, -EPIPE);
++ }
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
+ struct atm_vcc *vcc = entry->recv_vcc;
+ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
+
+- kfree(vpriv);
+- vcc->user_back = NULL;
++ if (vpriv) {
++ kfree(vpriv);
++ vcc->user_back = NULL;
+
+- entry->recv_vcc->push = entry->old_recv_push;
+- vcc_release_async(entry->recv_vcc, -EPIPE);
++ entry->recv_vcc->push = entry->old_recv_push;
++ vcc_release_async(entry->recv_vcc, -EPIPE);
++ }
+ entry->recv_vcc = NULL;
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From c05dc615f315162b51e4ae5c408b871fdd76d236 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 16:03:01 +0800
+Subject: bpf/bonding: reject vlan+srcmac xmit_hash_policy change when XDP is
+ loaded
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 479d589b40b836442bbdadc3fdb37f001bb67f26 ]
+
+bond_option_mode_set() already rejects mode changes that would make a
+loaded XDP program incompatible via bond_xdp_check(). However,
+bond_option_xmit_hash_policy_set() has no such guard.
+
+For 802.3ad and balance-xor modes, bond_xdp_check() returns false when
+xmit_hash_policy is vlan+srcmac, because the 802.1q payload is usually
+absent due to hardware offload. This means a user can:
+
+1. Attach a native XDP program to a bond in 802.3ad/balance-xor mode
+ with a compatible xmit_hash_policy (e.g. layer2+3).
+2. Change xmit_hash_policy to vlan+srcmac while XDP remains loaded.
+
+This leaves bond->xdp_prog set but bond_xdp_check() now returning false
+for the same device. When the bond is later destroyed, dev_xdp_uninstall()
+calls bond_xdp_set(dev, NULL, NULL) to remove the program, which hits
+the bond_xdp_check() guard and returns -EOPNOTSUPP, triggering:
+
+WARN_ON(dev_xdp_install(dev, mode, bpf_op, NULL, 0, NULL))
+
+Fix this by rejecting xmit_hash_policy changes to vlan+srcmac when an
+XDP program is loaded on a bond in 802.3ad or balance-xor mode.
+
+commit 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+introduced bond_xdp_check() which returns false for 802.3ad/balance-xor
+modes when xmit_hash_policy is vlan+srcmac. The check was wired into
+bond_xdp_set() to reject XDP attachment with an incompatible policy, but
+the symmetric path -- preventing xmit_hash_policy from being changed to an
+incompatible value after XDP is already loaded -- was left unguarded in
+bond_option_xmit_hash_policy_set().
+
+Note:
+commit 094ee6017ea0 ("bonding: check xdp prog when set bond mode")
+later added a similar guard to bond_option_mode_set(), but
+bond_option_xmit_hash_policy_set() remained unprotected.
+
+Reported-by: syzbot+5a287bcdc08104bc3132@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/6995aff6.050a0220.2eeac1.014e.GAE@google.com/T/
+Fixes: 39a0876d595b ("net, bonding: Disallow vlan+srcmac with XDP")
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Link: https://patch.msgid.link/20260226080306.98766-2-jiayuan.chen@linux.dev
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 9 +++++++--
+ drivers/net/bonding/bond_options.c | 2 ++
+ include/net/bonding.h | 1 +
+ 3 files changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 836d7fcac71a1..a2bf7bb12ff7c 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -322,7 +322,7 @@ static bool bond_sk_check(struct bonding *bond)
+ }
+ }
+
+-bool bond_xdp_check(struct bonding *bond, int mode)
++bool __bond_xdp_check(int mode, int xmit_policy)
+ {
+ switch (mode) {
+ case BOND_MODE_ROUNDROBIN:
+@@ -333,7 +333,7 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ /* vlan+srcmac is not supported with XDP as in most cases the 802.1q
+ * payload is not in the packet due to hardware offload.
+ */
+- if (bond->params.xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
++ if (xmit_policy != BOND_XMIT_POLICY_VLAN_SRCMAC)
+ return true;
+ fallthrough;
+ default:
+@@ -341,6 +341,11 @@ bool bond_xdp_check(struct bonding *bond, int mode)
+ }
+ }
+
++bool bond_xdp_check(struct bonding *bond, int mode)
++{
++ return __bond_xdp_check(mode, bond->params.xmit_policy);
++}
++
+ /*---------------------------------- VLAN -----------------------------------*/
+
+ /* In the following 2 functions, bond_vlan_rx_add_vid and bond_vlan_rx_kill_vid,
+diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
+index 5a2a935945c4c..b823425ad7f69 100644
+--- a/drivers/net/bonding/bond_options.c
++++ b/drivers/net/bonding/bond_options.c
+@@ -1546,6 +1546,8 @@ static int bond_option_fail_over_mac_set(struct bonding *bond,
+ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
+ const struct bond_opt_value *newval)
+ {
++ if (bond->xdp_prog && !__bond_xdp_check(BOND_MODE(bond), newval->value))
++ return -EOPNOTSUPP;
+ netdev_dbg(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
+ newval->string, newval->value);
+ bond->params.xmit_policy = newval->value;
+diff --git a/include/net/bonding.h b/include/net/bonding.h
+index 9fb40a5920209..66940d41d4854 100644
+--- a/include/net/bonding.h
++++ b/include/net/bonding.h
+@@ -696,6 +696,7 @@ void bond_debug_register(struct bonding *bond);
+ void bond_debug_unregister(struct bonding *bond);
+ void bond_debug_reregister(struct bonding *bond);
+ const char *bond_mode_name(int mode);
++bool __bond_xdp_check(int mode, int xmit_policy);
+ bool bond_xdp_check(struct bonding *bond, int mode);
+ void bond_setup(struct net_device *bond_dev);
+ unsigned int bond_get_num_tx_queues(void);
+--
+2.51.0
+
--- /dev/null
+From b070ff8a2a14cdb2e646cad3091b4b8615a64ae2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 23:59:42 -0700
+Subject: bpf: export bpf_link_inc_not_zero.
+
+From: Kui-Feng Lee <thinker.li@gmail.com>
+
+[ Upstream commit 67c3e8353f45c27800eecc46e00e8272f063f7d1 ]
+
+bpf_link_inc_not_zero() will be used by kernel modules. We will use it in
+bpf_testmod.c later.
+
+Signed-off-by: Kui-Feng Lee <thinker.li@gmail.com>
+Link: https://lore.kernel.org/r/20240530065946.979330-5-thinker.li@gmail.com
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Stable-dep-of: 56145d237385 ("bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf.h | 6 ++++++
+ kernel/bpf/syscall.c | 3 ++-
+ 2 files changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/bpf.h b/include/linux/bpf.h
+index 0af6b2a5273ad..1021156886272 100644
+--- a/include/linux/bpf.h
++++ b/include/linux/bpf.h
+@@ -2231,6 +2231,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer);
+ int bpf_link_settle(struct bpf_link_primer *primer);
+ void bpf_link_cleanup(struct bpf_link_primer *primer);
+ void bpf_link_inc(struct bpf_link *link);
++struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link);
+ void bpf_link_put(struct bpf_link *link);
+ int bpf_link_new_fd(struct bpf_link *link);
+ struct bpf_link *bpf_link_get_from_fd(u32 ufd);
+@@ -2586,6 +2587,11 @@ static inline void bpf_link_inc(struct bpf_link *link)
+ {
+ }
+
++static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
++{
++ return NULL;
++}
++
+ static inline void bpf_link_put(struct bpf_link *link)
+ {
+ }
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 63cf5a221081b..2207f9e7a5674 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -5219,10 +5219,11 @@ static int link_detach(union bpf_attr *attr)
+ return ret;
+ }
+
+-static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
++struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link)
+ {
+ return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT);
+ }
++EXPORT_SYMBOL(bpf_link_inc_not_zero);
+
+ struct bpf_link *bpf_link_by_id(u32 id)
+ {
+--
+2.51.0
+
--- /dev/null
+From ccd664511b634c86606d8861fab3a27f76a80a51 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 17:52:17 +0800
+Subject: bpf: Fix a UAF issue in bpf_trampoline_link_cgroup_shim
+
+From: Lang Xu <xulang@uniontech.com>
+
+[ Upstream commit 56145d237385ca0e7ca9ff7b226aaf2eb8ef368b ]
+
+The root cause of this bug is that when 'bpf_link_put' reduces the
+refcount of 'shim_link->link.link' to zero, the resource is considered
+released but may still be referenced via 'tr->progs_hlist' in
+'cgroup_shim_find'. The actual cleanup of 'tr->progs_hlist' in
+'bpf_shim_tramp_link_release' is deferred. During this window, another
+process can cause a use-after-free via 'bpf_trampoline_link_cgroup_shim'.
+
+Based on Martin KaFai Lau's suggestions, I have created a simple patch.
+
+To fix this:
+ Add an atomic non-zero check in 'bpf_trampoline_link_cgroup_shim'.
+ Only increment the refcount if it is not already zero.
+
+Testing:
+ I verified the fix by adding a delay in
+ 'bpf_shim_tramp_link_release' to make the bug easier to trigger:
+
+static void bpf_shim_tramp_link_release(struct bpf_link *link)
+{
+ /* ... */
+ if (!shim_link->trampoline)
+ return;
+
++ msleep(100);
+ WARN_ON_ONCE(bpf_trampoline_unlink_prog(&shim_link->link,
+ shim_link->trampoline, NULL));
+ bpf_trampoline_put(shim_link->trampoline);
+}
+
+Before the patch, running a PoC easily reproduced the crash(almost 100%)
+with a call trace similar to KaiyanM's report.
+After the patch, the bug no longer occurs even after millions of
+iterations.
+
+Fixes: 69fd337a975c ("bpf: per-cgroup lsm flavor")
+Reported-by: Kaiyan Mei <M202472210@hust.edu.cn>
+Closes: https://lore.kernel.org/bpf/3c4ebb0b.46ff8.19abab8abe2.Coremail.kaiyanm@hust.edu.cn/
+Signed-off-by: Lang Xu <xulang@uniontech.com>
+Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/279EEE1BA1DDB49D+20260303095217.34436-1-xulang@uniontech.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/trampoline.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
+index e48791442acc5..6f7968d3704eb 100644
+--- a/kernel/bpf/trampoline.c
++++ b/kernel/bpf/trampoline.c
+@@ -701,10 +701,8 @@ int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
+ mutex_lock(&tr->mutex);
+
+ shim_link = cgroup_shim_find(tr, bpf_func);
+- if (shim_link) {
++ if (shim_link && !IS_ERR(bpf_link_inc_not_zero(&shim_link->link.link))) {
+ /* Reusing existing shim attached by the other program. */
+- bpf_link_inc(&shim_link->link.link);
+-
+ mutex_unlock(&tr->mutex);
+ bpf_trampoline_put(tr); /* bpf_trampoline_get above */
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From e93231ce80118de170644934ecd01ed298b8415b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Feb 2026 11:58:06 +0100
+Subject: can: bcm: fix locking for bcm_op runtime updates
+
+From: Oliver Hartkopp <socketcan@hartkopp.net>
+
+[ Upstream commit c35636e91e392e1540949bbc67932167cb48bc3a ]
+
+Commit c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+added a locking for some variables that can be modified at runtime when
+updating the sending bcm_op with a new TX_SETUP command in bcm_tx_setup().
+
+Usually the RX_SETUP only handles and filters incoming traffic with one
+exception: When the RX_RTR_FRAME flag is set a predefined CAN frame is
+sent when a specific RTR frame is received. Therefore the rx bcm_op uses
+bcm_can_tx() which uses the bcm_tx_lock that was only initialized in
+bcm_tx_setup(). Add the missing spin_lock_init() when allocating the
+bcm_op in bcm_rx_setup() to handle the RTR case properly.
+
+Fixes: c2aba69d0c36 ("can: bcm: add locking for bcm_op runtime updates")
+Reported-by: syzbot+5b11eccc403dd1cea9f8@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/linux-can/699466e4.a70a0220.2c38d7.00ff.GAE@google.com/
+Signed-off-by: Oliver Hartkopp <socketcan@hartkopp.net>
+Link: https://patch.msgid.link/20260218-bcm_spin_lock_init-v1-1-592634c8a5b5@hartkopp.net
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/can/bcm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/can/bcm.c b/net/can/bcm.c
+index 75653584f31b9..35039645c4629 100644
+--- a/net/can/bcm.c
++++ b/net/can/bcm.c
+@@ -1128,6 +1128,7 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg,
+ if (!op)
+ return -ENOMEM;
+
++ spin_lock_init(&op->bcm_tx_lock);
+ op->can_id = msg_head->can_id;
+ op->nframes = msg_head->nframes;
+ op->cfsiz = CFSIZ(msg_head->flags);
+--
+2.51.0
+
--- /dev/null
+From b6678677734e87b45960e0ef64ad8fc83cccc2d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Feb 2026 15:47:05 +0100
+Subject: can: mcp251x: fix deadlock in error path of mcp251x_open
+
+From: Alban Bedel <alban.bedel@lht.dlh.de>
+
+[ Upstream commit ab3f894de216f4a62adc3b57e9191888cbf26885 ]
+
+The mcp251x_open() function call free_irq() in its error path with the
+mpc_lock mutex held. But if an interrupt already occurred the
+interrupt handler will be waiting for the mpc_lock and free_irq() will
+deadlock waiting for the handler to finish.
+
+This issue is similar to the one fixed in commit 7dd9c26bd6cf ("can:
+mcp251x: fix deadlock if an interrupt occurs during mcp251x_open") but
+for the error path.
+
+To solve this issue move the call to free_irq() after the lock is
+released. Setting `priv->force_quit = 1` beforehand ensure that the IRQ
+handler will exit right away once it acquired the lock.
+
+Signed-off-by: Alban Bedel <alban.bedel@lht.dlh.de>
+Link: https://patch.msgid.link/20260209144706.2261954-1-alban.bedel@lht.dlh.de
+Fixes: bf66f3736a94 ("can: mcp251x: Move to threaded interrupts instead of workqueues.")
+Signed-off-by: Marc Kleine-Budde <mkl@pengutronix.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/can/spi/mcp251x.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
+index 8c56f85e87c1a..72ae17b2313ec 100644
+--- a/drivers/net/can/spi/mcp251x.c
++++ b/drivers/net/can/spi/mcp251x.c
+@@ -1202,6 +1202,7 @@ static int mcp251x_open(struct net_device *net)
+ {
+ struct mcp251x_priv *priv = netdev_priv(net);
+ struct spi_device *spi = priv->spi;
++ bool release_irq = false;
+ unsigned long flags = 0;
+ int ret;
+
+@@ -1245,12 +1246,24 @@ static int mcp251x_open(struct net_device *net)
+ return 0;
+
+ out_free_irq:
+- free_irq(spi->irq, priv);
++ /* The IRQ handler might be running, and if so it will be waiting
++ * for the lock. But free_irq() must wait for the handler to finish
++ * so calling it here would deadlock.
++ *
++ * Setting priv->force_quit will let the handler exit right away
++ * without any access to the hardware. This make it safe to call
++ * free_irq() after the lock is released.
++ */
++ priv->force_quit = 1;
++ release_irq = true;
++
+ mcp251x_hw_sleep(spi);
+ out_close:
+ mcp251x_power_enable(priv->transceiver, 0);
+ close_candev(net);
+ mutex_unlock(&priv->mcp_lock);
++ if (release_irq)
++ free_irq(spi->irq, priv);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From aed6608235d5ed1d8ff7256ec1ec46eff6ee8bc8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Dec 2023 13:59:30 +0200
+Subject: dpaa2-switch: do not clear any interrupts automatically
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit f6da276479c63ca29774bc331a537b92f0550c45 ]
+
+The DPSW object has multiple event sources multiplexed over the same
+IRQ. The driver has the capability to configure only some of these
+events to trigger the IRQ.
+
+The dpsw_get_irq_status() can clear events automatically based on the
+value stored in the 'status' variable passed to it. We don't want that
+to happen because we could get into a situation when we are clearing
+more events than we actually handled.
+
+Just resort to manually clearing the events that we handled. Also, since
+status is not used on the out path we remove its initialization to zero.
+
+This change does not have a user-visible effect because the dpaa2-switch
+driver enables and handles all the DPSW events which exist at the
+moment.
+
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Stable-dep-of: 74badb9c20b1 ("dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ handler")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 2631732ab2164..e44ab53448500 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1518,9 +1518,9 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ struct device *dev = (struct device *)arg;
+ struct ethsw_core *ethsw = dev_get_drvdata(dev);
+ struct ethsw_port_priv *port_priv;
+- u32 status = ~0;
+ int err, if_id;
+ bool had_mac;
++ u32 status;
+
+ err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, &status);
+@@ -1553,12 +1553,12 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ dpaa2_switch_port_connect_mac(port_priv);
+ }
+
+-out:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+ dev_err(dev, "Can't clear irq status (err %d)\n", err);
+
++out:
+ return IRQ_HANDLED;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7f182c79e67e7769c2d07115906ac45d4e12eed3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:58:12 -0800
+Subject: dpaa2-switch: Fix interrupt storm after receiving bad if_id in IRQ
+ handler
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 74badb9c20b1a9c02a95c735c6d3cd6121679c93 ]
+
+Commit 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ
+handler") introduces a range check for if_id to avoid an out-of-bounds
+access. If an out-of-bounds if_id is detected, the interrupt status is
+not cleared. This may result in an interrupt storm.
+
+Clear the interrupt status after detecting an out-of-bounds if_id to avoid
+the problem.
+
+Found by an experimental AI code review agent at Google.
+
+Fixes: 31a7a0bbeb00 ("dpaa2-switch: add bounds check for if_id in IRQ handler")
+Cc: Junrui Luo <moonafterrain@outlook.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20260227055812.1777915-1-linux@roeck-us.net
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index e44ab53448500..176f7072338b2 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -1532,7 +1532,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ if_id = (status & 0xFFFF0000) >> 16;
+ if (if_id >= ethsw->sw_attr.num_ifs) {
+ dev_err(dev, "Invalid if_id %d in IRQ status\n", if_id);
+- goto out;
++ goto out_clear;
+ }
+ port_priv = ethsw->ports[if_id];
+
+@@ -1553,6 +1553,7 @@ static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg)
+ dpaa2_switch_port_connect_mac(port_priv);
+ }
+
++out_clear:
+ err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle,
+ DPSW_IRQ_INDEX_IF, status);
+ if (err)
+--
+2.51.0
+
--- /dev/null
+From 228f8d344300b11fab5cc9058bdc1e9c7bc08d68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 16:24:52 +0800
+Subject: drm/sched: Fix kernel-doc warning for drm_sched_job_done()
+
+From: Yujie Liu <yujie.liu@intel.com>
+
+[ Upstream commit 61ded1083b264ff67ca8c2de822c66b6febaf9a8 ]
+
+There is a kernel-doc warning for the scheduler:
+
+Warning: drivers/gpu/drm/scheduler/sched_main.c:367 function parameter 'result' not described in 'drm_sched_job_done'
+
+Fix the warning by describing the undocumented error code.
+
+Fixes: 539f9ee4b52a ("drm/scheduler: properly forward fence errors")
+Signed-off-by: Yujie Liu <yujie.liu@intel.com>
+[phasta: Flesh out commit message]
+Signed-off-by: Philipp Stanner <phasta@kernel.org>
+Link: https://patch.msgid.link/20260227082452.1802922-1-yujie.liu@intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/scheduler/sched_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
+index 4faa2108c0a73..50716bc5eef63 100644
+--- a/drivers/gpu/drm/scheduler/sched_main.c
++++ b/drivers/gpu/drm/scheduler/sched_main.c
+@@ -259,6 +259,7 @@ drm_sched_rq_select_entity_fifo(struct drm_sched_rq *rq)
+ /**
+ * drm_sched_job_done - complete a job
+ * @s_job: pointer to the job which is done
++ * @result: 0 on success, -ERRNO on error
+ *
+ * Finish the job's fence and wake up the worker thread.
+ */
+--
+2.51.0
+
--- /dev/null
+From f585c29f8caedb5cad76e5ef174da01394e75fbc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Feb 2026 19:09:32 +0100
+Subject: drm/solomon: Fix page start when updating rectangle in page
+ addressing mode
+
+From: Francesco Lavra <flavra@baylibre.com>
+
+[ Upstream commit 36d9579fed6c9429aa172f77bd28c58696ce8e2b ]
+
+In page addressing mode, the pixel values of a dirty rectangle must be sent
+to the display controller one page at a time. The range of pages
+corresponding to a given rectangle is being incorrectly calculated as if
+the Y value of the top left coordinate of the rectangle was 0. This can
+result in rectangle updates being displayed on wrong parts of the screen.
+
+Fix the above issue by consolidating the start page calculation in a single
+place at the beginning of the update_rect function, and using the
+calculated value for all addressing modes.
+
+Fixes: b0daaa5cfaa5 ("drm/ssd130x: Support page addressing mode")
+Signed-off-by: Francesco Lavra <flavra@baylibre.com>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Link: https://patch.msgid.link/20260210180932.736502-1-flavra@baylibre.com
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/solomon/ssd130x.c | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index 81c85e67fa070..0d6a2664cfbeb 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -463,6 +463,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ unsigned int height = drm_rect_height(rect);
+ unsigned int line_length = DIV_ROUND_UP(width, 8);
+ unsigned int page_height = SSD130X_PAGE_HEIGHT;
++ u8 page_start = ssd130x->page_offset + y / page_height;
+ unsigned int pages = DIV_ROUND_UP(height, page_height);
+ struct drm_device *drm = &ssd130x->drm;
+ u32 array_idx = 0;
+@@ -500,14 +501,11 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+
+ if (!ssd130x->page_address_mode) {
+- u8 page_start;
+-
+ /* Set address range for horizontal addressing mode */
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ return ret;
+
+- page_start = ssd130x->page_offset + y / page_height;
+ ret = ssd130x_set_page_range(ssd130x, page_start, pages);
+ if (ret < 0)
+ return ret;
+@@ -539,7 +537,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+ if (ssd130x->page_address_mode) {
+ ret = ssd130x_set_page_pos(ssd130x,
+- ssd130x->page_offset + i,
++ page_start + i,
+ ssd130x->col_offset + x);
+ if (ret < 0)
+ return ret;
+--
+2.51.0
+
--- /dev/null
+From e24354f6ef92a13f590ac384e78880b058609f7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Oct 2023 09:15:03 +0200
+Subject: drm/ssd130x: Replace .page_height field in device info with a
+ constant
+
+From: Javier Martinez Canillas <javierm@redhat.com>
+
+[ Upstream commit ec5dceb8180f0cb110dc7029d55d6a83d0583015 ]
+
+This deemed useful to avoid hardcoding a page height and allow to support
+other Solomon controller families, but dividing the screen in pages seems
+to be something that is specific to the SSD130x chip family.
+
+For example, SSD132x chip family divides the screen in segments (columns)
+and common outputs (rows), so the concept of screen pages does not exist
+for the SSD132x family.
+
+Let's drop this field from the device info struct and just use a constant
+SSD130X_PAGE_HEIGHT macro to define the page height. While being there,
+replace hardcoded 8 values in places where it is used as the page height.
+
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Thomas Zimmermann <tzimmermann@suse.de>
+Link: https://patchwork.freedesktop.org/patch/msgid/20231014071520.1342189-2-javierm@redhat.com
+Stable-dep-of: 36d9579fed6c ("drm/solomon: Fix page start when updating rectangle in page addressing mode")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/solomon/ssd130x.c | 37 +++++++++++++++----------------
+ drivers/gpu/drm/solomon/ssd130x.h | 1 -
+ 2 files changed, 18 insertions(+), 20 deletions(-)
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index ef3e3832add90..81c85e67fa070 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -42,6 +42,8 @@
+ #define DRIVER_MAJOR 1
+ #define DRIVER_MINOR 0
+
++#define SSD130X_PAGE_HEIGHT 8
++
+ #define SSD130X_PAGE_COL_START_LOW 0x00
+ #define SSD130X_PAGE_COL_START_HIGH 0x10
+ #define SSD130X_SET_ADDRESS_MODE 0x20
+@@ -102,7 +104,6 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
+ .default_width = 132,
+ .default_height = 64,
+ .page_mode_only = 1,
+- .page_height = 8,
+ },
+ [SSD1305_ID] = {
+ .default_vcomh = 0x34,
+@@ -110,7 +111,6 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
+ .default_dclk_frq = 7,
+ .default_width = 132,
+ .default_height = 64,
+- .page_height = 8,
+ },
+ [SSD1306_ID] = {
+ .default_vcomh = 0x20,
+@@ -119,7 +119,6 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
+ .need_chargepump = 1,
+ .default_width = 128,
+ .default_height = 64,
+- .page_height = 8,
+ },
+ [SSD1307_ID] = {
+ .default_vcomh = 0x20,
+@@ -128,7 +127,6 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
+ .need_pwm = 1,
+ .default_width = 128,
+ .default_height = 39,
+- .page_height = 8,
+ },
+ [SSD1309_ID] = {
+ .default_vcomh = 0x34,
+@@ -136,7 +134,6 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
+ .default_dclk_frq = 10,
+ .default_width = 128,
+ .default_height = 64,
+- .page_height = 8,
+ }
+ };
+ EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
+@@ -465,13 +462,13 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ unsigned int width = drm_rect_width(rect);
+ unsigned int height = drm_rect_height(rect);
+ unsigned int line_length = DIV_ROUND_UP(width, 8);
+- unsigned int page_height = ssd130x->device_info->page_height;
++ unsigned int page_height = SSD130X_PAGE_HEIGHT;
+ unsigned int pages = DIV_ROUND_UP(height, page_height);
+ struct drm_device *drm = &ssd130x->drm;
+ u32 array_idx = 0;
+ int ret, i, j, k;
+
+- drm_WARN_ONCE(drm, y % 8 != 0, "y must be aligned to screen page\n");
++ drm_WARN_ONCE(drm, y % page_height != 0, "y must be aligned to screen page\n");
+
+ /*
+ * The screen is divided in pages, each having a height of 8
+@@ -503,27 +500,32 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ */
+
+ if (!ssd130x->page_address_mode) {
++ u8 page_start;
++
+ /* Set address range for horizontal addressing mode */
+ ret = ssd130x_set_col_range(ssd130x, ssd130x->col_offset + x, width);
+ if (ret < 0)
+ return ret;
+
+- ret = ssd130x_set_page_range(ssd130x, ssd130x->page_offset + y / 8, pages);
++ page_start = ssd130x->page_offset + y / page_height;
++ ret = ssd130x_set_page_range(ssd130x, page_start, pages);
+ if (ret < 0)
+ return ret;
+ }
+
+ for (i = 0; i < pages; i++) {
+- int m = 8;
++ int m = page_height;
+
+ /* Last page may be partial */
+- if (8 * (y / 8 + i + 1) > ssd130x->height)
+- m = ssd130x->height % 8;
++ if (page_height * (y / page_height + i + 1) > ssd130x->height)
++ m = ssd130x->height % page_height;
++
+ for (j = 0; j < width; j++) {
+ u8 data = 0;
+
+ for (k = 0; k < m; k++) {
+- u8 byte = buf[(8 * i + k) * line_length + j / 8];
++ u32 idx = (page_height * i + k) * line_length + j / 8;
++ u8 byte = buf[idx];
+ u8 bit = (byte >> (j % 8)) & 1;
+
+ data |= bit << k;
+@@ -559,8 +561,7 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+
+ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
+ {
+- unsigned int page_height = ssd130x->device_info->page_height;
+- unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
++ unsigned int pages = DIV_ROUND_UP(ssd130x->height, SSD130X_PAGE_HEIGHT);
+ unsigned int width = ssd130x->width;
+ int ret, i;
+
+@@ -605,14 +606,13 @@ static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
+ u8 *buf, u8 *data_array)
+ {
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+- unsigned int page_height = ssd130x->device_info->page_height;
+ struct iosys_map dst;
+ unsigned int dst_pitch;
+ int ret = 0;
+
+ /* Align y to display page boundaries */
+- rect->y1 = round_down(rect->y1, page_height);
+- rect->y2 = min_t(unsigned int, round_up(rect->y2, page_height), ssd130x->height);
++ rect->y1 = round_down(rect->y1, SSD130X_PAGE_HEIGHT);
++ rect->y2 = min_t(unsigned int, round_up(rect->y2, SSD130X_PAGE_HEIGHT), ssd130x->height);
+
+ dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8);
+
+@@ -814,8 +814,7 @@ static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ struct ssd130x_crtc_state *ssd130x_state = to_ssd130x_crtc_state(crtc_state);
+- unsigned int page_height = ssd130x->device_info->page_height;
+- unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
++ unsigned int pages = DIV_ROUND_UP(ssd130x->height, SSD130X_PAGE_HEIGHT);
+ int ret;
+
+ ret = drm_crtc_helper_atomic_check(crtc, state);
+diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
+index aa39b13615ebe..bbe374453605b 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.h
++++ b/drivers/gpu/drm/solomon/ssd130x.h
+@@ -39,7 +39,6 @@ struct ssd130x_deviceinfo {
+ u32 default_dclk_frq;
+ u32 default_width;
+ u32 default_height;
+- u32 page_height;
+ bool need_pwm;
+ bool need_chargepump;
+ bool page_mode_only;
+--
+2.51.0
+
--- /dev/null
+From d6888cd89f402154580363b72f7da549c0155ac1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Sep 2023 07:29:25 +0200
+Subject: drm/ssd130x: Store the HW buffer in the driver-private CRTC state
+
+From: Javier Martinez Canillas <javierm@redhat.com>
+
+[ Upstream commit d51f9fbd98b6d88aef4f6431bbb575378a6c7a24 ]
+
+The commit 45b58669e532 ("drm/ssd130x: Allocate buffer in the plane's
+.atomic_check() callback") moved the allocation of the intermediate and
+HW buffers from the encoder's .atomic_enable callback, to the plane's
+.atomic_check callback.
+
+This was suggested by Maxime Ripard, because drivers aren't allowed to
+fail after the drm_atomic_helper_swap_state() function has been called.
+
+And the encoder's .atomic_enable happens after the new atomic state has
+been swapped, so allocations (that can fail) shouldn't be done there.
+
+But the HW buffer isn't really tied to the plane's state. It has a fixed
+size that only depends on the (also fixed) display resolution defined in
+the Device Tree Blob.
+
+That buffer can be considered part of the CRTC state, and for this reason
+makes more sense to do its allocation in the CRTC .atomic_check callback.
+
+The other allocated buffer (used to store a conversion from the emulated
+XR24 format to the native R1 format) is part of the plane's state, since
+it will be optional once the driver supports R1 and allows user-space to
+set that pixel format.
+
+So let's keep the allocation for it in the plane's .atomic_check callback,
+this can't be moved to the CRTC's .atomic_check because changing a format
+does not trigger a CRTC mode set.
+
+Reported-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Closes: https://lore.kernel.org/dri-devel/CAMuHMdWv_QSatDgihr8=2SXHhvp=icNxumZcZOPwT9Q_QiogNQ@mail.gmail.com/
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Acked-by: Maxime Ripard <mripard@kernel.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20230913052938.1114651-1-javierm@redhat.com
+Stable-dep-of: 36d9579fed6c ("drm/solomon: Fix page start when updating rectangle in page addressing mode")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/solomon/ssd130x.c | 153 +++++++++++++++++++++++-------
+ 1 file changed, 118 insertions(+), 35 deletions(-)
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.c b/drivers/gpu/drm/solomon/ssd130x.c
+index deec6acdcf646..ef3e3832add90 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.c
++++ b/drivers/gpu/drm/solomon/ssd130x.c
+@@ -141,14 +141,23 @@ const struct ssd130x_deviceinfo ssd130x_variants[] = {
+ };
+ EXPORT_SYMBOL_NS_GPL(ssd130x_variants, DRM_SSD130X);
+
++struct ssd130x_crtc_state {
++ struct drm_crtc_state base;
++ /* Buffer to store pixels in HW format and written to the panel */
++ u8 *data_array;
++};
++
+ struct ssd130x_plane_state {
+ struct drm_shadow_plane_state base;
+ /* Intermediate buffer to convert pixels from XRGB8888 to HW format */
+ u8 *buffer;
+- /* Buffer to store pixels in HW format and written to the panel */
+- u8 *data_array;
+ };
+
++static inline struct ssd130x_crtc_state *to_ssd130x_crtc_state(struct drm_crtc_state *state)
++{
++ return container_of(state, struct ssd130x_crtc_state, base);
++}
++
+ static inline struct ssd130x_plane_state *to_ssd130x_plane_state(struct drm_plane_state *state)
+ {
+ return container_of(state, struct ssd130x_plane_state, base.base);
+@@ -448,13 +457,11 @@ static int ssd130x_init(struct ssd130x_device *ssd130x)
+ }
+
+ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+- struct ssd130x_plane_state *ssd130x_state,
+- struct drm_rect *rect)
++ struct drm_rect *rect, u8 *buf,
++ u8 *data_array)
+ {
+ unsigned int x = rect->x1;
+ unsigned int y = rect->y1;
+- u8 *buf = ssd130x_state->buffer;
+- u8 *data_array = ssd130x_state->data_array;
+ unsigned int width = drm_rect_width(rect);
+ unsigned int height = drm_rect_height(rect);
+ unsigned int line_length = DIV_ROUND_UP(width, 8);
+@@ -550,12 +557,10 @@ static int ssd130x_update_rect(struct ssd130x_device *ssd130x,
+ return ret;
+ }
+
+-static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
+- struct ssd130x_plane_state *ssd130x_state)
++static void ssd130x_clear_screen(struct ssd130x_device *ssd130x, u8 *data_array)
+ {
+ unsigned int page_height = ssd130x->device_info->page_height;
+ unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
+- u8 *data_array = ssd130x_state->data_array;
+ unsigned int width = ssd130x->width;
+ int ret, i;
+
+@@ -594,15 +599,13 @@ static void ssd130x_clear_screen(struct ssd130x_device *ssd130x,
+ }
+ }
+
+-static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
++static int ssd130x_fb_blit_rect(struct drm_framebuffer *fb,
+ const struct iosys_map *vmap,
+- struct drm_rect *rect)
++ struct drm_rect *rect,
++ u8 *buf, u8 *data_array)
+ {
+- struct drm_framebuffer *fb = state->fb;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(fb->dev);
+ unsigned int page_height = ssd130x->device_info->page_height;
+- struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
+- u8 *buf = ssd130x_state->buffer;
+ struct iosys_map dst;
+ unsigned int dst_pitch;
+ int ret = 0;
+@@ -622,7 +625,7 @@ static int ssd130x_fb_blit_rect(struct drm_plane_state *state,
+
+ drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
+
+- ssd130x_update_rect(ssd130x, ssd130x_state, rect);
++ ssd130x_update_rect(ssd130x, rect, buf, data_array);
+
+ return ret;
+ }
+@@ -634,12 +637,19 @@ static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane_state);
+- unsigned int page_height = ssd130x->device_info->page_height;
+- unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
++ struct drm_crtc *crtc = plane_state->crtc;
++ struct drm_crtc_state *crtc_state;
+ const struct drm_format_info *fi;
+ unsigned int pitch;
+ int ret;
+
++ if (!crtc)
++ return -EINVAL;
++
++ crtc_state = drm_atomic_get_crtc_state(state, crtc);
++ if (IS_ERR(crtc_state))
++ return PTR_ERR(crtc_state);
++
+ ret = drm_plane_helper_atomic_check(plane, state);
+ if (ret)
+ return ret;
+@@ -654,14 +664,6 @@ static int ssd130x_primary_plane_helper_atomic_check(struct drm_plane *plane,
+ if (!ssd130x_state->buffer)
+ return -ENOMEM;
+
+- ssd130x_state->data_array = kcalloc(ssd130x->width, pages, GFP_KERNEL);
+- if (!ssd130x_state->data_array) {
+- kfree(ssd130x_state->buffer);
+- /* Set to prevent a double free in .atomic_destroy_state() */
+- ssd130x_state->buffer = NULL;
+- return -ENOMEM;
+- }
+-
+ return 0;
+ }
+
+@@ -671,6 +673,10 @@ static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
+ struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
++ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
++ struct ssd130x_crtc_state *ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
++ struct ssd130x_plane_state *ssd130x_plane_state = to_ssd130x_plane_state(plane_state);
++ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_device *drm = plane->dev;
+ struct drm_rect dst_clip;
+@@ -687,7 +693,9 @@ static void ssd130x_primary_plane_helper_atomic_update(struct drm_plane *plane,
+ if (!drm_rect_intersect(&dst_clip, &damage))
+ continue;
+
+- ssd130x_fb_blit_rect(plane_state, &shadow_plane_state->data[0], &dst_clip);
++ ssd130x_fb_blit_rect(fb, &shadow_plane_state->data[0], &dst_clip,
++ ssd130x_plane_state->buffer,
++ ssd130x_crtc_state->data_array);
+ }
+
+ drm_dev_exit(idx);
+@@ -698,13 +706,21 @@ static void ssd130x_primary_plane_helper_atomic_disable(struct drm_plane *plane,
+ {
+ struct drm_device *drm = plane->dev;
+ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
+- struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(plane->state);
++ struct drm_plane_state *plane_state = drm_atomic_get_new_plane_state(state, plane);
++ struct drm_crtc_state *crtc_state;
++ struct ssd130x_crtc_state *ssd130x_crtc_state;
+ int idx;
+
++ if (!plane_state->crtc)
++ return;
++
++ crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc);
++ ssd130x_crtc_state = to_ssd130x_crtc_state(crtc_state);
++
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+- ssd130x_clear_screen(ssd130x, ssd130x_state);
++ ssd130x_clear_screen(ssd130x, ssd130x_crtc_state->data_array);
+
+ drm_dev_exit(idx);
+ }
+@@ -737,9 +753,8 @@ static struct drm_plane_state *ssd130x_primary_plane_duplicate_state(struct drm_
+ if (!ssd130x_state)
+ return NULL;
+
+- /* The buffers are not duplicated and are allocated in .atomic_check */
++ /* The buffer is not duplicated and is allocated in .atomic_check */
+ ssd130x_state->buffer = NULL;
+- ssd130x_state->data_array = NULL;
+
+ new_shadow_plane_state = &ssd130x_state->base;
+
+@@ -753,7 +768,6 @@ static void ssd130x_primary_plane_destroy_state(struct drm_plane *plane,
+ {
+ struct ssd130x_plane_state *ssd130x_state = to_ssd130x_plane_state(state);
+
+- kfree(ssd130x_state->data_array);
+ kfree(ssd130x_state->buffer);
+
+ __drm_gem_destroy_shadow_plane_state(&ssd130x_state->base);
+@@ -793,6 +807,75 @@ static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc
+ return MODE_OK;
+ }
+
++static int ssd130x_crtc_helper_atomic_check(struct drm_crtc *crtc,
++ struct drm_atomic_state *state)
++{
++ struct drm_device *drm = crtc->dev;
++ struct ssd130x_device *ssd130x = drm_to_ssd130x(drm);
++ struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
++ struct ssd130x_crtc_state *ssd130x_state = to_ssd130x_crtc_state(crtc_state);
++ unsigned int page_height = ssd130x->device_info->page_height;
++ unsigned int pages = DIV_ROUND_UP(ssd130x->height, page_height);
++ int ret;
++
++ ret = drm_crtc_helper_atomic_check(crtc, state);
++ if (ret)
++ return ret;
++
++ ssd130x_state->data_array = kmalloc(ssd130x->width * pages, GFP_KERNEL);
++ if (!ssd130x_state->data_array)
++ return -ENOMEM;
++
++ return 0;
++}
++
++/* Called during init to allocate the CRTC's atomic state. */
++static void ssd130x_crtc_reset(struct drm_crtc *crtc)
++{
++ struct ssd130x_crtc_state *ssd130x_state;
++
++ WARN_ON(crtc->state);
++
++ ssd130x_state = kzalloc(sizeof(*ssd130x_state), GFP_KERNEL);
++ if (!ssd130x_state)
++ return;
++
++ __drm_atomic_helper_crtc_reset(crtc, &ssd130x_state->base);
++}
++
++static struct drm_crtc_state *ssd130x_crtc_duplicate_state(struct drm_crtc *crtc)
++{
++ struct ssd130x_crtc_state *old_ssd130x_state;
++ struct ssd130x_crtc_state *ssd130x_state;
++
++ if (WARN_ON(!crtc->state))
++ return NULL;
++
++ old_ssd130x_state = to_ssd130x_crtc_state(crtc->state);
++ ssd130x_state = kmemdup(old_ssd130x_state, sizeof(*ssd130x_state), GFP_KERNEL);
++ if (!ssd130x_state)
++ return NULL;
++
++ /* The buffer is not duplicated and is allocated in .atomic_check */
++ ssd130x_state->data_array = NULL;
++
++ __drm_atomic_helper_crtc_duplicate_state(crtc, &ssd130x_state->base);
++
++ return &ssd130x_state->base;
++}
++
++static void ssd130x_crtc_destroy_state(struct drm_crtc *crtc,
++ struct drm_crtc_state *state)
++{
++ struct ssd130x_crtc_state *ssd130x_state = to_ssd130x_crtc_state(state);
++
++ kfree(ssd130x_state->data_array);
++
++ __drm_atomic_helper_crtc_destroy_state(state);
++
++ kfree(ssd130x_state);
++}
++
+ /*
+ * The CRTC is always enabled. Screen updates are performed by
+ * the primary plane's atomic_update function. Disabling clears
+@@ -800,16 +883,16 @@ static enum drm_mode_status ssd130x_crtc_helper_mode_valid(struct drm_crtc *crtc
+ */
+ static const struct drm_crtc_helper_funcs ssd130x_crtc_helper_funcs = {
+ .mode_valid = ssd130x_crtc_helper_mode_valid,
+- .atomic_check = drm_crtc_helper_atomic_check,
++ .atomic_check = ssd130x_crtc_helper_atomic_check,
+ };
+
+ static const struct drm_crtc_funcs ssd130x_crtc_funcs = {
+- .reset = drm_atomic_helper_crtc_reset,
++ .reset = ssd130x_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
++ .atomic_duplicate_state = ssd130x_crtc_duplicate_state,
++ .atomic_destroy_state = ssd130x_crtc_destroy_state,
+ };
+
+ static void ssd130x_encoder_helper_atomic_enable(struct drm_encoder *encoder,
+--
+2.51.0
+
--- /dev/null
+From 65040725ebb663f67fbaae59e554c1924f1b9a1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Aug 2023 17:08:41 +0200
+Subject: drm/ssd130x: Use bool for ssd130x_deviceinfo flags
+
+From: Geert Uytterhoeven <geert@linux-m68k.org>
+
+[ Upstream commit 15d30b46573d75f5cb58cfacded8ebab9c76a2b0 ]
+
+The .need_pwm and .need_chargepump fields in struct ssd130x_deviceinfo
+are flags that can have only two possible values: 0 and 1.
+Reduce kernel size by changing their types from int to bool.
+
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Tested-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Javier Martinez Canillas <javierm@redhat.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/285005ff361969eff001386c5f97990f0e703838.1692888745.git.geert@linux-m68k.org
+Stable-dep-of: 36d9579fed6c ("drm/solomon: Fix page start when updating rectangle in page addressing mode")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/solomon/ssd130x.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/solomon/ssd130x.h b/drivers/gpu/drm/solomon/ssd130x.h
+index 87968b3e7fb82..aa39b13615ebe 100644
+--- a/drivers/gpu/drm/solomon/ssd130x.h
++++ b/drivers/gpu/drm/solomon/ssd130x.h
+@@ -40,8 +40,8 @@ struct ssd130x_deviceinfo {
+ u32 default_width;
+ u32 default_height;
+ u32 page_height;
+- int need_pwm;
+- int need_chargepump;
++ bool need_pwm;
++ bool need_chargepump;
+ bool page_mode_only;
+ };
+
+--
+2.51.0
+
--- /dev/null
+From d2408e437314da38bae65615c82e5e16dd3dd22c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jan 2026 16:14:20 +0200
+Subject: e1000e: clear DPG_EN after reset to avoid autonomous power-gating
+
+From: Vitaly Lifshits <vitaly.lifshits@intel.com>
+
+[ Upstream commit 0942fc6d324eb9c6b16187b2aa994c0823557f06 ]
+
+Panther Lake systems introduced an autonomous power gating feature for
+the integrated Gigabit Ethernet in shutdown state (S5) state. As part of
+it, the reset value of DPG_EN bit was changed to 1. Clear this bit after
+performing hardware reset to avoid errors such as Tx/Rx hangs, or packet
+loss/corruption.
+
+Fixes: 0c9183ce61bc ("e1000e: Add support for the next LOM generation")
+Signed-off-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Avigail Dahan <avigailx.dahan@intel.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/e1000e/defines.h | 1 +
+ drivers/net/ethernet/intel/e1000e/ich8lan.c | 9 +++++++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
+index 955bb11618572..c4db2927c6c42 100644
+--- a/drivers/net/ethernet/intel/e1000e/defines.h
++++ b/drivers/net/ethernet/intel/e1000e/defines.h
+@@ -33,6 +33,7 @@
+
+ /* Extended Device Control */
+ #define E1000_CTRL_EXT_LPCD 0x00000004 /* LCD Power Cycle Done */
++#define E1000_CTRL_EXT_DPG_EN 0x00000008 /* Dynamic Power Gating Enable */
+ #define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+ #define E1000_CTRL_EXT_FORCE_SMBUS 0x00000800 /* Force SMBus mode */
+ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
+diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+index df4e7d781cb1c..f9328caefe44b 100644
+--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
++++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
+@@ -4925,6 +4925,15 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+ reg |= E1000_KABGTXD_BGSQLBIAS;
+ ew32(KABGTXD, reg);
+
++ /* The hardware reset value of the DPG_EN bit is 1.
++ * Clear DPG_EN to prevent unexpected autonomous power gating.
++ */
++ if (hw->mac.type >= e1000_pch_ptp) {
++ reg = er32(CTRL_EXT);
++ reg &= ~E1000_CTRL_EXT_DPG_EN;
++ ew32(CTRL_EXT, reg);
++ }
++
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 34420ee5bf059f51ea3a659fef9e02a40431dd75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Nov 2025 15:13:20 +0530
+Subject: hwmon: (aht10) Add support for dht20
+
+From: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+
+[ Upstream commit 3eaf1b631506e8de2cb37c278d5bc042521e82c1 ]
+
+Add support for dht20 temperature and humidity sensor from Aosong.
+Modify aht10 driver to handle different init command for dht20 sensor by
+adding init_cmd entry in the driver data. dht20 sensor is compatible with
+aht10 hwmon driver with this change.
+
+Tested on TI am62x SK board with dht20 sensor connected at i2c-2 port.
+
+Signed-off-by: Akhilesh Patil <akhilesh@ee.iitb.ac.in>
+Link: https://lore.kernel.org/r/2025112-94320-906858@bhairav-test.ee.iitb.ac.in
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Stable-dep-of: b7497b5a99f5 ("hwmon: (aht10) Fix initialization commands for AHT20")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/hwmon/aht10.rst | 10 +++++++++-
+ drivers/hwmon/Kconfig | 6 +++---
+ drivers/hwmon/aht10.c | 19 ++++++++++++++++---
+ 3 files changed, 28 insertions(+), 7 deletions(-)
+
+diff --git a/Documentation/hwmon/aht10.rst b/Documentation/hwmon/aht10.rst
+index 213644b4ecba6..7903b6434326d 100644
+--- a/Documentation/hwmon/aht10.rst
++++ b/Documentation/hwmon/aht10.rst
+@@ -20,6 +20,14 @@ Supported chips:
+
+ English: http://www.aosong.com/userfiles/files/media/Data%20Sheet%20AHT20.pdf
+
++ * Aosong DHT20
++
++ Prefix: 'dht20'
++
++ Addresses scanned: None
++
++ Datasheet: https://www.digikey.co.nz/en/htmldatasheets/production/9184855/0/0/1/101020932
++
+ Author: Johannes Cornelis Draaijer <jcdra1@gmail.com>
+
+
+@@ -33,7 +41,7 @@ The address of this i2c device may only be 0x38
+ Special Features
+ ----------------
+
+-AHT20 has additional CRC8 support which is sent as the last byte of the sensor
++AHT20, DHT20 has additional CRC8 support which is sent as the last byte of the sensor
+ values.
+
+ Usage Notes
+diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
+index a4c361b6619c1..2b090dbd836c5 100644
+--- a/drivers/hwmon/Kconfig
++++ b/drivers/hwmon/Kconfig
+@@ -257,12 +257,12 @@ config SENSORS_ADT7475
+ will be called adt7475.
+
+ config SENSORS_AHT10
+- tristate "Aosong AHT10, AHT20"
++ tristate "Aosong AHT10, AHT20, DHT20"
+ depends on I2C
+ select CRC8
+ help
+- If you say yes here, you get support for the Aosong AHT10 and AHT20
+- temperature and humidity sensors
++ If you say yes here, you get support for the Aosong AHT10, AHT20 and
++ DHT20 temperature and humidity sensors
+
+ This driver can also be built as a module. If so, the module
+ will be called aht10.
+diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
+index f136bf3ff40ad..4f235dfb260f8 100644
+--- a/drivers/hwmon/aht10.c
++++ b/drivers/hwmon/aht10.c
+@@ -37,6 +37,8 @@
+ #define AHT10_CMD_MEAS 0b10101100
+ #define AHT10_CMD_RST 0b10111010
+
++#define DHT20_CMD_INIT 0x71
++
+ /*
+ * Flags in the answer byte/command
+ */
+@@ -48,11 +50,12 @@
+
+ #define AHT10_MAX_POLL_INTERVAL_LEN 30
+
+-enum aht10_variant { aht10, aht20 };
++enum aht10_variant { aht10, aht20, dht20};
+
+ static const struct i2c_device_id aht10_id[] = {
+ { "aht10", aht10 },
+ { "aht20", aht20 },
++ { "dht20", dht20 },
+ { },
+ };
+ MODULE_DEVICE_TABLE(i2c, aht10_id);
+@@ -77,6 +80,7 @@ MODULE_DEVICE_TABLE(i2c, aht10_id);
+ * AHT10/AHT20
+ * @crc8: crc8 support flag
+ * @meas_size: measurements data size
++ * @init_cmd: Initialization command
+ */
+
+ struct aht10_data {
+@@ -92,6 +96,7 @@ struct aht10_data {
+ int humidity;
+ bool crc8;
+ unsigned int meas_size;
++ u8 init_cmd;
+ };
+
+ /**
+@@ -101,13 +106,13 @@ struct aht10_data {
+ */
+ static int aht10_init(struct aht10_data *data)
+ {
+- const u8 cmd_init[] = {AHT10_CMD_INIT, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
++ const u8 cmd_init[] = {data->init_cmd, AHT10_CAL_ENABLED | AHT10_MODE_CYC,
+ 0x00};
+ int res;
+ u8 status;
+ struct i2c_client *client = data->client;
+
+- res = i2c_master_send(client, cmd_init, 3);
++ res = i2c_master_send(client, cmd_init, sizeof(cmd_init));
+ if (res < 0)
+ return res;
+
+@@ -353,9 +358,17 @@ static int aht10_probe(struct i2c_client *client)
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
++ data->init_cmd = AHT10_CMD_INIT;
++ break;
++ case dht20:
++ data->meas_size = AHT20_MEAS_SIZE;
++ data->crc8 = true;
++ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
++ data->init_cmd = DHT20_CMD_INIT;
+ break;
+ default:
+ data->meas_size = AHT10_MEAS_SIZE;
++ data->init_cmd = AHT10_CMD_INIT;
+ break;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7be6dbc28400ab3b28ccec1b37a2804137f51ad2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 01:03:31 +0800
+Subject: hwmon: (aht10) Fix initialization commands for AHT20
+
+From: Hao Yu <haoyufine@gmail.com>
+
+[ Upstream commit b7497b5a99f54ab8dcda5b14a308385b2fb03d8d ]
+
+According to the AHT20 datasheet (updated to V1.0 after the 2023.09
+version), the initialization command for AHT20 is 0b10111110 (0xBE).
+The previous sequence (0xE1) used in earlier versions is no longer
+compatible with newer AHT20 sensors. Update the initialization
+command to ensure the sensor is properly initialized.
+
+While at it, use binary notation for DHT20_CMD_INIT to match the notation
+used in the datasheet.
+
+Fixes: d2abcb5cc885 ("hwmon: (aht10) Add support for compatible aht20")
+Signed-off-by: Hao Yu <haoyufine@gmail.com>
+Link: https://lore.kernel.org/r/20260222170332.1616-3-haoyufine@gmail.com
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/aht10.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/aht10.c b/drivers/hwmon/aht10.c
+index 4f235dfb260f8..aa116957d9c96 100644
+--- a/drivers/hwmon/aht10.c
++++ b/drivers/hwmon/aht10.c
+@@ -37,7 +37,9 @@
+ #define AHT10_CMD_MEAS 0b10101100
+ #define AHT10_CMD_RST 0b10111010
+
+-#define DHT20_CMD_INIT 0x71
++#define AHT20_CMD_INIT 0b10111110
++
++#define DHT20_CMD_INIT 0b01110001
+
+ /*
+ * Flags in the answer byte/command
+@@ -358,7 +360,7 @@ static int aht10_probe(struct i2c_client *client)
+ data->meas_size = AHT20_MEAS_SIZE;
+ data->crc8 = true;
+ crc8_populate_msb(crc8_table, AHT20_CRC8_POLY);
+- data->init_cmd = AHT10_CMD_INIT;
++ data->init_cmd = AHT20_CMD_INIT;
+ break;
+ case dht20:
+ data->meas_size = AHT20_MEAS_SIZE;
+--
+2.51.0
+
--- /dev/null
+From 1b3f2e7c4cd5ec37044934990bf24c57a9df9cde Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:14 -0800
+Subject: hwmon: (it87) Check the it87_lock() return value
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 07ed4f05bbfd2bc014974dcc4297fd3aa1cb88c0 ]
+
+Return early in it87_resume() if it87_lock() fails instead of ignoring the
+return value of that function. This patch suppresses a Clang thread-safety
+warning.
+
+Cc: Frank Crawford <frank@crawford.emu.id.au>
+Cc: Guenter Roeck <linux@roeck-us.net>
+Cc: Jean Delvare <jdelvare@suse.com>
+Cc: linux-hwmon@vger.kernel.org
+Fixes: 376e1a937b30 ("hwmon: (it87) Add calls to smbus_enable/smbus_disable as required")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://lore.kernel.org/r/20260223220102.2158611-15-bart.vanassche@linux.dev
+[groeck: Declare 'ret' at the beginning of it87_resume()]
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/it87.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/it87.c b/drivers/hwmon/it87.c
+index fbe86cec60553..51882f7386cc8 100644
+--- a/drivers/hwmon/it87.c
++++ b/drivers/hwmon/it87.c
+@@ -3547,10 +3547,13 @@ static int it87_resume(struct device *dev)
+ {
+ struct platform_device *pdev = to_platform_device(dev);
+ struct it87_data *data = dev_get_drvdata(dev);
++ int err;
+
+ it87_resume_sio(pdev);
+
+- it87_lock(data);
++ err = it87_lock(data);
++ if (err)
++ return err;
+
+ it87_check_pwm(dev);
+ it87_check_limit_regs(data);
+--
+2.51.0
+
--- /dev/null
+From 29d0e52bdb925b77a0d503c1d102beab8097a4e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:46 +0100
+Subject: i40e: fix registering XDP RxQ info
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8f497dc8a61429cc004720aa8e713743355d80cf ]
+
+Current way of handling XDP RxQ info in i40e has a problem, where frag_size
+is not updated when xsk_buff_pool is detached or when MTU is changed, this
+leads to growing tail always failing for multi-buffer packets.
+
+Couple XDP RxQ info registering with buffer allocations and unregistering
+with cleaning the ring.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-6-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 34 ++++++++++++---------
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 5 +--
+ 2 files changed, 22 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 1f233fac9d4e3..ca35979482c67 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3637,18 +3637,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (ring->vsi->type != I40E_VSI_MAIN)
+ goto skip;
+
+- if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+- err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+- ring->queue_index,
+- ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
+- if (err)
+- return err;
+- }
+-
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- xdp_rxq_info_unreg(&ring->xdp_rxq);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+@@ -3660,17 +3650,23 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ dev_info(&vsi->back->pdev->dev,
+ "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
+ ring->queue_index);
+
+ } else {
++ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
++ ring->queue_index,
++ ring->q_vector->napi.napi_id,
++ ring->rx_buf_len);
++ if (err)
++ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+ MEM_TYPE_PAGE_SHARED,
+ NULL);
+ if (err)
+- return err;
++ goto unreg_xdp;
+ }
+
+ skip:
+@@ -3708,7 +3704,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* set the context in the HMC */
+@@ -3717,7 +3714,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ dev_info(&vsi->back->pdev->dev,
+ "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
+ ring->queue_index, pf_q, err);
+- return -ENOMEM;
++ err = -ENOMEM;
++ goto unreg_xdp;
+ }
+
+ /* configure Rx buffer alignment */
+@@ -3725,7 +3723,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (I40E_2K_TOO_SMALL_WITH_PADDING) {
+ dev_info(&vsi->back->pdev->dev,
+ "2k Rx buffer is too small to fit standard MTU and skb_shared_info\n");
+- return -EOPNOTSUPP;
++ err = -EOPNOTSUPP;
++ goto unreg_xdp;
+ }
+ clear_ring_build_skb_enabled(ring);
+ } else {
+@@ -3755,6 +3754,11 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ return 0;
++unreg_xdp:
++ if (ring->vsi->type == I40E_VSI_MAIN)
++ xdp_rxq_info_unreg(&ring->xdp_rxq);
++
++ return err;
+ }
+
+ /**
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 99604379c87b6..873fd080de939 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1473,6 +1473,9 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ if (!rx_ring->rx_bi)
+ return;
+
++ if (xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
++ xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
++
+ if (rx_ring->xsk_pool) {
+ i40e_xsk_clean_rx_ring(rx_ring);
+ goto skip_free;
+@@ -1530,8 +1533,6 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
+ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
+ {
+ i40e_clean_rx_ring(rx_ring);
+- if (rx_ring->vsi->type == I40E_VSI_MAIN)
+- xdp_rxq_info_unreg(&rx_ring->xdp_rxq);
+ rx_ring->xdp_prog = NULL;
+ kfree(rx_ring->rx_bi);
+ rx_ring->rx_bi = NULL;
+--
+2.51.0
+
--- /dev/null
+From 397197a41df8f4432f32d46ee501aaccd797d031 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:47 +0100
+Subject: i40e: use xdp.frame_sz as XDP RxQ info frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit c69d22c6c46a1d792ba8af3d8d6356fdc0e6f538 ]
+
+The only user of frag_size field in XDP RxQ info is
+bpf_xdp_frags_increase_tail(). It clearly expects whole buffer size instead
+of DMA write size. Different assumptions in i40e driver configuration lead
+to negative tailroom.
+
+Set frag_size to the same value as frame_sz in shared pages mode, use new
+helper to set frag_size when AF_XDP ZC is active.
+
+Fixes: a045d2f2d03d ("i40e: set xdp_rxq_info::frag_size")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-7-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index ca35979482c67..9bcd32d31da77 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3623,6 +3623,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ u16 pf_q = vsi->base_queue + ring->queue_index;
+ struct i40e_hw *hw = &vsi->back->hw;
+ struct i40e_hmc_obj_rxq rx_ctx;
++ u32 xdp_frame_sz;
+ int err = 0;
+ bool ok;
+
+@@ -3632,6 +3633,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ memset(&rx_ctx, 0, sizeof(rx_ctx));
+
+ ring->rx_buf_len = vsi->rx_buf_len;
++ xdp_frame_sz = i40e_rx_pg_size(ring) / 2;
+
+ /* XDP RX-queue info only needed for RX rings exposed to XDP */
+ if (ring->vsi->type != I40E_VSI_MAIN)
+@@ -3639,11 +3641,12 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
++ xdp_frame_sz = xsk_pool_get_rx_frag_step(ring->xsk_pool);
+ ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3659,7 +3662,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+ ring->queue_index,
+ ring->q_vector->napi.napi_id,
+- ring->rx_buf_len);
++ xdp_frame_sz);
+ if (err)
+ return err;
+ err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -3670,7 +3673,7 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ }
+
+ skip:
+- xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
++ xdp_init_buff(&ring->xdp, xdp_frame_sz, &ring->xdp_rxq);
+
+ rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
+ BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
+--
+2.51.0
+
--- /dev/null
+From 31504ab9038fcd472374f604719adb253ba932e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 17:26:03 +0000
+Subject: indirect_call_wrapper: do not reevaluate function pointer
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 710f5c76580306cdb9ec51fac8fcf6a8faff7821 ]
+
+We have an increasing number of READ_ONCE(xxx->function)
+combined with INDIRECT_CALL_[1234]() helpers.
+
+Unfortunately this forces INDIRECT_CALL_[1234]() to read
+xxx->function many times, which is not what we wanted.
+
+Fix these macros so that xxx->function value is not reloaded.
+
+$ scripts/bloat-o-meter -t vmlinux.0 vmlinux
+add/remove: 0/0 grow/shrink: 1/65 up/down: 122/-1084 (-962)
+Function old new delta
+ip_push_pending_frames 59 181 +122
+ip6_finish_output 687 681 -6
+__udp_enqueue_schedule_skb 1078 1072 -6
+ioam6_output 2319 2312 -7
+xfrm4_rcv_encap_finish2 64 56 -8
+xfrm4_output 297 289 -8
+vrf_ip_local_out 278 270 -8
+vrf_ip6_local_out 278 270 -8
+seg6_input_finish 64 56 -8
+rpl_output 700 692 -8
+ipmr_forward_finish 124 116 -8
+ip_forward_finish 143 135 -8
+ip6mr_forward2_finish 100 92 -8
+ip6_forward_finish 73 65 -8
+input_action_end_bpf 1091 1083 -8
+dst_input 52 44 -8
+__xfrm6_output 801 793 -8
+__xfrm4_output 83 75 -8
+bpf_input 500 491 -9
+__tcp_check_space 530 521 -9
+input_action_end_dt6 291 280 -11
+vti6_tnl_xmit 1634 1622 -12
+bpf_xmit 1203 1191 -12
+rpl_input 497 483 -14
+rawv6_send_hdrinc 1355 1341 -14
+ndisc_send_skb 1030 1016 -14
+ipv6_srh_rcv 1377 1363 -14
+ip_send_unicast_reply 1253 1239 -14
+ip_rcv_finish 226 212 -14
+ip6_rcv_finish 300 286 -14
+input_action_end_x_core 205 191 -14
+input_action_end_x 355 341 -14
+input_action_end_t 205 191 -14
+input_action_end_dx6_finish 127 113 -14
+input_action_end_dx4_finish 373 359 -14
+input_action_end_dt4 426 412 -14
+input_action_end_core 186 172 -14
+input_action_end_b6_encap 292 278 -14
+input_action_end_b6 198 184 -14
+igmp6_send 1332 1318 -14
+ip_sublist_rcv 864 848 -16
+ip6_sublist_rcv 1091 1075 -16
+ipv6_rpl_srh_rcv 1937 1920 -17
+xfrm_policy_queue_process 1246 1228 -18
+seg6_output_core 903 885 -18
+mld_sendpack 856 836 -20
+NF_HOOK 756 736 -20
+vti_tunnel_xmit 1447 1426 -21
+input_action_end_dx6 664 642 -22
+input_action_end 1502 1480 -22
+sock_sendmsg_nosec 134 111 -23
+ip6mr_forward2 388 364 -24
+sock_recvmsg_nosec 134 109 -25
+seg6_input_core 836 810 -26
+ip_send_skb 172 146 -26
+ip_local_out 140 114 -26
+ip6_local_out 140 114 -26
+__sock_sendmsg 162 136 -26
+__ip_queue_xmit 1196 1170 -26
+__ip_finish_output 405 379 -26
+ipmr_queue_fwd_xmit 373 346 -27
+sock_recvmsg 173 145 -28
+ip6_xmit 1635 1607 -28
+xfrm_output_resume 1418 1389 -29
+ip_build_and_send_pkt 625 591 -34
+dst_output 504 432 -72
+Total: Before=25217686, After=25216724, chg -0.00%
+
+Fixes: 283c16a2dfd3 ("indirect call wrappers: helpers to speed-up indirect calls of builtin")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20260227172603.1700433-1-edumazet@google.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/indirect_call_wrapper.h | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/include/linux/indirect_call_wrapper.h b/include/linux/indirect_call_wrapper.h
+index c1c76a70a6ce9..227cee5e2a98b 100644
+--- a/include/linux/indirect_call_wrapper.h
++++ b/include/linux/indirect_call_wrapper.h
+@@ -16,22 +16,26 @@
+ */
+ #define INDIRECT_CALL_1(f, f1, ...) \
+ ({ \
+- likely(f == f1) ? f1(__VA_ARGS__) : f(__VA_ARGS__); \
++ typeof(f) __f1 = (f); \
++ likely(__f1 == f1) ? f1(__VA_ARGS__) : __f1(__VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_2(f, f2, f1, ...) \
+ ({ \
+- likely(f == f2) ? f2(__VA_ARGS__) : \
+- INDIRECT_CALL_1(f, f1, __VA_ARGS__); \
++ typeof(f) __f2 = (f); \
++ likely(__f2 == f2) ? f2(__VA_ARGS__) : \
++ INDIRECT_CALL_1(__f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_3(f, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f3) ? f3(__VA_ARGS__) : \
+- INDIRECT_CALL_2(f, f2, f1, __VA_ARGS__); \
++ typeof(f) __f3 = (f); \
++ likely(__f3 == f3) ? f3(__VA_ARGS__) : \
++ INDIRECT_CALL_2(__f3, f2, f1, __VA_ARGS__); \
+ })
+ #define INDIRECT_CALL_4(f, f4, f3, f2, f1, ...) \
+ ({ \
+- likely(f == f4) ? f4(__VA_ARGS__) : \
+- INDIRECT_CALL_3(f, f3, f2, f1, __VA_ARGS__); \
++ typeof(f) __f4 = (f); \
++ likely(__f4 == f4) ? f4(__VA_ARGS__) : \
++ INDIRECT_CALL_3(__f4, f3, f2, f1, __VA_ARGS__); \
+ })
+
+ #define INDIRECT_CALLABLE_DECLARE(f) f
+--
+2.51.0
+
--- /dev/null
+From 5e483bbb1e7308826f3730bbfe0568c0997d8440 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 11:45:48 -0800
+Subject: ipv6: fix NULL pointer deref in ip6_rt_get_dev_rcu()
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2ffb4f5c2ccb2fa1c049dd11899aee7967deef5a ]
+
+l3mdev_master_dev_rcu() can return NULL when the slave device is being
+un-slaved from a VRF. All other callers deal with this, but we lost
+the fallback to loopback in ip6_rt_pcpu_alloc() -> ip6_rt_get_dev_rcu()
+with commit 4832c30d5458 ("net: ipv6: put host and anycast routes on
+device with address").
+
+ KASAN: null-ptr-deref in range [0x0000000000000108-0x000000000000010f]
+ RIP: 0010:ip6_rt_pcpu_alloc (net/ipv6/route.c:1418)
+ Call Trace:
+ ip6_pol_route (net/ipv6/route.c:2318)
+ fib6_rule_lookup (net/ipv6/fib6_rules.c:115)
+ ip6_route_output_flags (net/ipv6/route.c:2607)
+ vrf_process_v6_outbound (drivers/net/vrf.c:437)
+
+I was tempted to rework the un-slaving code to clear the flag first
+and insert synchronize_rcu() before we remove the upper. But looks like
+the explicit fallback to loopback_dev is an established pattern.
+And I guess avoiding the synchronize_rcu() is nice, too.
+
+Fixes: 4832c30d5458 ("net: ipv6: put host and anycast routes on device with address")
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260301194548.927324-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index ad452a04d7299..72853ef73e821 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -1065,7 +1065,8 @@ static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
+ */
+ if (netif_is_l3_slave(dev) &&
+ !rt6_need_strict(&res->f6i->fib6_dst.addr))
+- dev = l3mdev_master_dev_rcu(dev);
++ dev = l3mdev_master_dev_rcu(dev) ? :
++ dev_net(dev)->loopback_dev;
+ else if (!netif_is_l3_master(dev))
+ dev = dev_net(dev)->loopback_dev;
+ /* last case is netif_is_l3_master(dev) is true in which
+--
+2.51.0
+
--- /dev/null
+From 2fe56ad26448c1adc0be8d48ace9eeb24f76e067 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 21:14:10 +0545
+Subject: kunit: tool: copy caller args in run_kernel to prevent mutation
+
+From: Shuvam Pandey <shuvampandey1@gmail.com>
+
+[ Upstream commit 40804c4974b8df2adab72f6475d343eaff72b7f6 ]
+
+run_kernel() appended KUnit flags directly to the caller-provided args
+list. When exec_tests() calls run_kernel() repeatedly (e.g. with
+--run_isolated), each call mutated the same list, causing later runs
+to inherit stale filter_glob values and duplicate kunit.enable flags.
+
+Fix this by copying args at the start of run_kernel(). Add a regression
+test that calls run_kernel() twice with the same list and verifies the
+original remains unchanged.
+
+Fixes: ff9e09a3762f ("kunit: tool: support running each suite/test separately")
+Signed-off-by: Shuvam Pandey <shuvampandey1@gmail.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/kunit/kunit_kernel.py | 6 ++++--
+ tools/testing/kunit/kunit_tool_test.py | 26 ++++++++++++++++++++++++++
+ 2 files changed, 30 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
+index 0b6488efed47a..df7622dac0ff1 100644
+--- a/tools/testing/kunit/kunit_kernel.py
++++ b/tools/testing/kunit/kunit_kernel.py
+@@ -331,8 +331,10 @@ class LinuxSourceTree:
+ return self.validate_config(build_dir)
+
+ def run_kernel(self, args: Optional[List[str]]=None, build_dir: str='', filter_glob: str='', filter: str='', filter_action: Optional[str]=None, timeout: Optional[int]=None) -> Iterator[str]:
+- if not args:
+- args = []
++ # Copy to avoid mutating the caller-supplied list. exec_tests() reuses
++ # the same args across repeated run_kernel() calls (e.g. --run_isolated),
++ # so appending to the original would accumulate stale flags on each call.
++ args = list(args) if args else []
+ if filter_glob:
+ args.append('kunit.filter_glob=' + filter_glob)
+ if filter:
+diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
+index b28c1510be2eb..5254a25ad2d9d 100755
+--- a/tools/testing/kunit/kunit_tool_test.py
++++ b/tools/testing/kunit/kunit_tool_test.py
+@@ -461,6 +461,32 @@ class LinuxSourceTreeTest(unittest.TestCase):
+ with open(kunit_kernel.get_outfile_path(build_dir), 'rt') as outfile:
+ self.assertEqual(outfile.read(), 'hi\nbye\n', msg='Missing some output')
+
++ def test_run_kernel_args_not_mutated(self):
++ """Verify run_kernel() copies args so callers can reuse them."""
++ start_calls = []
++
++ def fake_start(start_args, unused_build_dir):
++ start_calls.append(list(start_args))
++ return subprocess.Popen(['printf', 'KTAP version 1\n'],
++ text=True, stdout=subprocess.PIPE)
++
++ with tempfile.TemporaryDirectory('') as build_dir:
++ tree = kunit_kernel.LinuxSourceTree(build_dir,
++ kunitconfig_paths=[os.devnull])
++ with mock.patch.object(tree._ops, 'start', side_effect=fake_start), \
++ mock.patch.object(kunit_kernel.subprocess, 'call'):
++ kernel_args = ['mem=1G']
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test1'):
++ pass
++ for _ in tree.run_kernel(args=kernel_args, build_dir=build_dir,
++ filter_glob='suite.test2'):
++ pass
++ self.assertEqual(kernel_args, ['mem=1G'],
++ 'run_kernel() should not modify caller args')
++ self.assertIn('kunit.filter_glob=suite.test1', start_calls[0])
++ self.assertIn('kunit.filter_glob=suite.test2', start_calls[1])
++
+ def test_build_reconfig_no_config(self):
+ with tempfile.TemporaryDirectory('') as build_dir:
+ with open(kunit_kernel.get_kunitconfig_path(build_dir), 'w') as f:
+--
+2.51.0
+
--- /dev/null
+From a5e0c5ed4dc85d7b64c6638f5a3c67b792bc4b52 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:56 +0100
+Subject: net: bridge: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit e5e890630533bdc15b26a34bb8e7ef539bdf1322 ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. Then, if neigh_suppress is enabled and an ICMPv6
+Neighbor Discovery packet reaches the bridge, br_do_suppress_nd() will
+dereference ipv6_stub->nd_tbl which is NULL, passing it to
+neigh_lookup(). This causes a kernel NULL pointer dereference.
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000268
+ Oops: 0000 [#1] PREEMPT SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x16/0xe0
+ [...]
+ Call Trace:
+ <IRQ>
+ ? neigh_lookup+0x16/0xe0
+ br_do_suppress_nd+0x160/0x290 [bridge]
+ br_handle_frame_finish+0x500/0x620 [bridge]
+ br_handle_frame+0x353/0x440 [bridge]
+ __netif_receive_skb_core.constprop.0+0x298/0x1110
+ __netif_receive_skb_one_core+0x3d/0xa0
+ process_backlog+0xa0/0x140
+ __napi_poll+0x2c/0x170
+ net_rx_action+0x2c4/0x3a0
+ handle_softirqs+0xd0/0x270
+ do_softirq+0x3f/0x60
+
+Fix this by replacing IS_ENABLED(IPV6) call with ipv6_mod_enabled() in
+the callers. This is in essence disabling NS/NA suppression when IPv6 is
+disabled.
+
+Fixes: ed842faeb2bd ("bridge: suppress nd pkts on BR_NEIGH_SUPPRESS ports")
+Reported-by: Guruprasad C P <gurucp2005@gmail.com>
+Closes: https://lore.kernel.org/netdev/CAHXs0ORzd62QOG-Fttqa2Cx_A_VFp=utE2H2VTX5nqfgs7LDxQ@mail.gmail.com/
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Acked-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20260304120357.9778-1-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bridge/br_device.c | 2 +-
+ net/bridge/br_input.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
+index 42d4c3727bf76..4af3e4c67038d 100644
+--- a/net/bridge/br_device.c
++++ b/net/bridge/br_device.c
+@@ -72,7 +72,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
+ br_do_proxy_suppress_arp(skb, br, vid, NULL);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
+index 847fe03a08ee8..46d2b20afd5ff 100644
+--- a/net/bridge/br_input.c
++++ b/net/bridge/br_input.c
+@@ -165,7 +165,7 @@ int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb
+ (skb->protocol == htons(ETH_P_ARP) ||
+ skb->protocol == htons(ETH_P_RARP))) {
+ br_do_proxy_suppress_arp(skb, br, vid, p);
+- } else if (IS_ENABLED(CONFIG_IPV6) &&
++ } else if (ipv6_mod_enabled() &&
+ skb->protocol == htons(ETH_P_IPV6) &&
+ br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
+ pskb_may_pull(skb, sizeof(struct ipv6hdr) +
+--
+2.51.0
+
--- /dev/null
+From 667d7ad915631bed0071d1cd28bc108ac1b3e3d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2026 18:13:14 -0300
+Subject: net: dsa: realtek: rtl8365mb: fix rtl8365mb_phy_ocp_write return
+ value
+
+From: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+
+[ Upstream commit 7cbe98f7bef965241a5908d50d557008cf998aee ]
+
+Function rtl8365mb_phy_ocp_write() always returns 0, even when an error
+occurs during register access. This patch fixes the return value to
+propagate the actual error code from regmap operations.
+
+Link: https://lore.kernel.org/netdev/a2dfde3c-d46f-434b-9d16-1e251e449068@yahoo.com/
+Fixes: 2796728460b8 ("net: dsa: realtek: rtl8365mb: serialize indirect PHY register access")
+Signed-off-by: Mieczyslaw Nalewaj <namiltd@yahoo.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: Luiz Angelo Daros de Luca <luizluca@gmail.com>
+Reviewed-by: Linus Walleij <linusw@kernel.org>
+Link: https://patch.msgid.link/20260301-realtek_namiltd_fix1-v1-1-43a6bb707f9c@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/realtek/rtl8365mb.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c
+index 41ea3b5a42b14..318eced8f0d34 100644
+--- a/drivers/net/dsa/realtek/rtl8365mb.c
++++ b/drivers/net/dsa/realtek/rtl8365mb.c
+@@ -766,7 +766,7 @@ static int rtl8365mb_phy_ocp_write(struct realtek_priv *priv, int phy,
+ out:
+ mutex_unlock(&priv->map_lock);
+
+- return 0;
++ return ret;
+ }
+
+ static int rtl8365mb_phy_read(struct realtek_priv *priv, int phy, int regnum)
+--
+2.51.0
+
--- /dev/null
+From 11e732fdd2c30c38cec7ef10762545abdf4520c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 18:56:39 +0100
+Subject: net: ethernet: mtk_eth_soc: Reset prog ptr to old_prog in case of
+ error in mtk_xdp_setup()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 0abc73c8a40fd64ac1739c90bb4f42c418d27a5e ]
+
+Reset eBPF program pointer to old_prog and do not decrease its ref-count
+if mtk_open routine in mtk_xdp_setup() fails.
+
+Fixes: 7c26c20da5d42 ("net: ethernet: mtk_eth_soc: add basic XDP support")
+Suggested-by: Paolo Valerio <pvalerio@redhat.com>
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260303-mtk-xdp-prog-ptr-fix-v2-1-97b6dbbe240f@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mediatek/mtk_eth_soc.c | 15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+index c843e6531449b..e2d3bda1dc923 100644
+--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
++++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+@@ -3529,12 +3529,21 @@ static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
++
++ if (netif_running(dev) && need_update) {
++ int err;
++
++ err = mtk_open(dev);
++ if (err) {
++ rcu_assign_pointer(eth->prog, old_prog);
++
++ return err;
++ }
++ }
++
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+- if (netif_running(dev) && need_update)
+- return mtk_open(dev);
+-
+ return 0;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 33df05c6a3d88ddfcbef7c349fd531f01f81dc5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 23:43:59 +0530
+Subject: net: ethernet: ti: am65-cpsw-nuss/cpsw-ale: Fix multicast entry
+ handling in ALE table
+
+From: Chintan Vankar <c-vankar@ti.com>
+
+[ Upstream commit be11a537224d72b906db6b98510619770298c8a4 ]
+
+In the current implementation, flushing multicast entries in MAC mode
+incorrectly deletes entries for all ports instead of only the target port,
+disrupting multicast traffic on other ports. The cause is adding multicast
+entries by setting only host port bit, and not setting the MAC port bits.
+
+Fix this by setting the MAC port's bit in the port mask while adding the
+multicast entry. Also fix the flush logic to preserve the host port bit
+during removal of MAC port and free ALE entries when mask contains only
+host port.
+
+Fixes: 5c50a856d550 ("drivers: net: ethernet: cpsw: add multicast address to ALE table")
+Signed-off-by: Chintan Vankar <c-vankar@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20260224181359.2055322-1-c-vankar@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/am65-cpsw-nuss.c | 2 +-
+ drivers/net/ethernet/ti/cpsw_ale.c | 9 ++++-----
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+index 28cc23736a69b..93cb4193cf0ac 100644
+--- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c
++++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
+@@ -261,7 +261,7 @@ static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+- port_mask = ALE_PORT_HOST;
++ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 9eccc7064c2b0..bf0b2950272cf 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -422,14 +422,13 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+- mask &= ~port_mask;
++ mask &= (~port_mask | ALE_PORT_HOST);
+
+- /* free if only remaining port is host port */
+- if (mask)
++ if (mask == 0x0 || mask == ALE_PORT_HOST)
++ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
++ else
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+- else
+- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ }
+
+ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+--
+2.51.0
+
--- /dev/null
+From 14dd1bb704d68b1a988311e2d570df5d70525b42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 19:38:13 +0800
+Subject: net: ipv6: fix panic when IPv4 route references loopback IPv6 nexthop
+
+From: Jiayuan Chen <jiayuan.chen@shopee.com>
+
+[ Upstream commit 21ec92774d1536f71bdc90b0e3d052eff99cf093 ]
+
+When a standalone IPv6 nexthop object is created with a loopback device
+(e.g., "ip -6 nexthop add id 100 dev lo"), fib6_nh_init() misclassifies
+it as a reject route. This is because nexthop objects have no destination
+prefix (fc_dst=::), causing fib6_is_reject() to match any loopback
+nexthop. The reject path skips fib_nh_common_init(), leaving
+nhc_pcpu_rth_output unallocated. If an IPv4 route later references this
+nexthop, __mkroute_output() dereferences NULL nhc_pcpu_rth_output and
+panics.
+
+Simplify the check in fib6_nh_init() to only match explicit reject
+routes (RTF_REJECT) instead of using fib6_is_reject(). The loopback
+promotion heuristic in fib6_is_reject() is handled separately by
+ip6_route_info_create_nh(). After this change, the three cases behave
+as follows:
+
+1. Explicit reject route ("ip -6 route add unreachable 2001:db8::/64"):
+ RTF_REJECT is set, enters reject path, skips fib_nh_common_init().
+ No behavior change.
+
+2. Implicit loopback reject route ("ip -6 route add 2001:db8::/32 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. ip6_route_info_create_nh() still promotes it to reject
+ afterward. nhc_pcpu_rth_output is allocated but unused, which is
+ harmless.
+
+3. Standalone nexthop object ("ip -6 nexthop add id 100 dev lo"):
+ RTF_REJECT is not set, takes normal path, fib_nh_common_init() is
+ called. nhc_pcpu_rth_output is properly allocated, fixing the crash
+ when IPv4 routes reference this nexthop.
+
+Suggested-by: Ido Schimmel <idosch@nvidia.com>
+Fixes: 493ced1ac47c ("ipv4: Allow routes to use nexthop objects")
+Reported-by: syzbot+334190e097a98a1b81bb@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/all/698f8482.a70a0220.2c38d7.00ca.GAE@google.com/T/
+Signed-off-by: Jiayuan Chen <jiayuan.chen@shopee.com>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Reviewed-by: David Ahern <dsahern@kernel.org>
+Link: https://patch.msgid.link/20260304113817.294966-2-jiayuan.chen@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/route.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/route.c b/net/ipv6/route.c
+index 72853ef73e821..7a91e539bbd14 100644
+--- a/net/ipv6/route.c
++++ b/net/ipv6/route.c
+@@ -3567,7 +3567,6 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+ netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
+ struct net_device *dev = NULL;
+ struct inet6_dev *idev = NULL;
+- int addr_type;
+ int err;
+
+ fib6_nh->fib_nh_family = AF_INET6;
+@@ -3609,11 +3608,10 @@ int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
+
+ fib6_nh->fib_nh_weight = 1;
+
+- /* We cannot add true routes via loopback here,
+- * they would result in kernel looping; promote them to reject routes
++ /* Reset the nexthop device to the loopback device in case of reject
++ * routes.
+ */
+- addr_type = ipv6_addr_type(&cfg->fc_dst);
+- if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
++ if (cfg->fc_flags & RTF_REJECT) {
+ /* hold loopback dev/idev if we haven't done so. */
+ if (dev != net->loopback_dev) {
+ if (dev) {
+--
+2.51.0
+
--- /dev/null
+From 1ada8cfc18c36767cb22dec1de3423aa8a796fc0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 2 Mar 2026 18:32:37 +0200
+Subject: net: nfc: nci: Fix zero-length proprietary notifications
+
+From: Ian Ray <ian.ray@gehealthcare.com>
+
+[ Upstream commit f7d92f11bd33a6eb49c7c812255ef4ab13681f0f ]
+
+NCI NFC controllers may have proprietary OIDs with zero-length payload.
+One example is: drivers/nfc/nxp-nci/core.c, NXP_NCI_RF_TXLDO_ERROR_NTF.
+
+Allow a zero length payload in proprietary notifications *only*.
+
+Before:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+-- >8 --
+
+After:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 3
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x23, plen=0
+kernel: nci: nci_ntf_packet: unknown ntf opcode 0x123
+kernel: nfc nfc0: NFC: RF transmitter couldn't start. Bad power and/or configuration?
+-- >8 --
+
+After fixing the hardware:
+
+-- >8 --
+kernel: nci: nci_recv_frame: len 27
+kernel: nci: nci_ntf_packet: NCI RX: MT=ntf, PBF=0, GID=0x1, OID=0x5, plen=24
+kernel: nci: nci_rf_intf_activated_ntf_packet: rf_discovery_id 1
+-- >8 --
+
+Fixes: d24b03535e5e ("nfc: nci: Fix uninit-value in nci_dev_up and nci_ntf_packet")
+Signed-off-by: Ian Ray <ian.ray@gehealthcare.com>
+Link: https://patch.msgid.link/20260302163238.140576-1-ian.ray@gehealthcare.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index b7d4952a7dcf8..7a4742a092626 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1471,10 +1471,20 @@ static bool nci_valid_size(struct sk_buff *skb)
+ unsigned int hdr_size = NCI_CTRL_HDR_SIZE;
+
+ if (skb->len < hdr_size ||
+- !nci_plen(skb->data) ||
+ skb->len < hdr_size + nci_plen(skb->data)) {
+ return false;
+ }
++
++ if (!nci_plen(skb->data)) {
++ /* Allow zero length in proprietary notifications (0x20 - 0x3F). */
++ if (nci_opcode_oid(nci_opcode(skb->data)) >= 0x20 &&
++ nci_mt(skb->data) == NCI_MT_NTF_PKT)
++ return true;
++
++ /* Disallow zero length otherwise. */
++ return false;
++ }
++
+ return true;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 345cc49b3c56d0760d3032d657d4e809021ad117 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 13:23:36 -0700
+Subject: net/rds: Fix circular locking dependency in rds_tcp_tune
+
+From: Allison Henderson <achender@kernel.org>
+
+[ Upstream commit 6a877ececd6daa002a9a0002cd0fbca6592a9244 ]
+
+syzbot reported a circular locking dependency in rds_tcp_tune() where
+sk_net_refcnt_upgrade() is called while holding the socket lock:
+
+======================================================
+WARNING: possible circular locking dependency detected
+======================================================
+kworker/u10:8/15040 is trying to acquire lock:
+ffffffff8e9aaf80 (fs_reclaim){+.+.}-{0:0},
+at: __kmalloc_cache_noprof+0x4b/0x6f0
+
+but task is already holding lock:
+ffff88805a3c1ce0 (k-sk_lock-AF_INET6){+.+.}-{0:0},
+at: rds_tcp_tune+0xd7/0x930
+
+The issue occurs because sk_net_refcnt_upgrade() performs memory
+allocation (via get_net_track() -> ref_tracker_alloc()) while the
+socket lock is held, creating a circular dependency with fs_reclaim.
+
+Fix this by moving sk_net_refcnt_upgrade() outside the socket lock
+critical section. This is safe because the fields modified by the
+sk_net_refcnt_upgrade() call (sk_net_refcnt, ns_tracker) are not
+accessed by any concurrent code path at this point.
+
+v2:
+ - Corrected fixes tag
+ - check patch line wrap nits
+ - ai commentary nits
+
+Reported-by: syzbot+2e2cf5331207053b8106@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=2e2cf5331207053b8106
+Fixes: 3a58f13a881e ("net: rds: acquire refcount on TCP sockets")
+Signed-off-by: Allison Henderson <achender@kernel.org>
+Link: https://patch.msgid.link/20260227202336.167757-1-achender@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rds/tcp.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/net/rds/tcp.c b/net/rds/tcp.c
+index 985b05f38b674..dee18da64a322 100644
+--- a/net/rds/tcp.c
++++ b/net/rds/tcp.c
+@@ -494,18 +494,24 @@ bool rds_tcp_tune(struct socket *sock)
+ struct rds_tcp_net *rtn;
+
+ tcp_sock_set_nodelay(sock->sk);
+- lock_sock(sk);
+ /* TCP timer functions might access net namespace even after
+ * a process which created this net namespace terminated.
+ */
+ if (!sk->sk_net_refcnt) {
+- if (!maybe_get_net(net)) {
+- release_sock(sk);
++ if (!maybe_get_net(net))
+ return false;
+- }
++ /*
++ * sk_net_refcnt_upgrade() must be called before lock_sock()
++ * because it does a GFP_KERNEL allocation, which can trigger
++ * fs_reclaim and create a circular lock dependency with the
++ * socket lock. The fields it modifies (sk_net_refcnt,
++ * ns_tracker) are not accessed by any concurrent code path
++ * at this point.
++ */
+ sk_net_refcnt_upgrade(sk);
+ put_net(net);
+ }
++ lock_sock(sk);
+ rtn = net_generic(net, rds_tcp_netid);
+ if (rtn->sndbuf_size > 0) {
+ sk->sk_sndbuf = rtn->sndbuf_size;
+--
+2.51.0
+
--- /dev/null
+From 9faf87f2bfcb8893fe38eced39ee8343009c736e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 09:06:02 -0500
+Subject: net/sched: act_ife: Fix metalist update behavior
+
+From: Jamal Hadi Salim <jhs@mojatatu.com>
+
+[ Upstream commit e2cedd400c3ec0302ffca2490e8751772906ac23 ]
+
+Whenever an ife action replace changes the metalist, instead of
+replacing the old data on the metalist, the current ife code is appending
+the new metadata. Aside from being innapropriate behavior, this may lead
+to an unbounded addition of metadata to the metalist which might cause an
+out of bounds error when running the encode op:
+
+[ 138.423369][ C1] ==================================================================
+[ 138.424317][ C1] BUG: KASAN: slab-out-of-bounds in ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.424906][ C1] Write of size 4 at addr ffff8880077f4ffe by task ife_out_out_bou/255
+[ 138.425778][ C1] CPU: 1 UID: 0 PID: 255 Comm: ife_out_out_bou Not tainted 7.0.0-rc1-00169-gfbdfa8da05b6 #624 PREEMPT(full)
+[ 138.425795][ C1] Hardware name: Bochs Bochs, BIOS Bochs 01/01/2011
+[ 138.425800][ C1] Call Trace:
+[ 138.425804][ C1] <IRQ>
+[ 138.425808][ C1] dump_stack_lvl (lib/dump_stack.c:122)
+[ 138.425828][ C1] print_report (mm/kasan/report.c:379 mm/kasan/report.c:482)
+[ 138.425839][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425844][ C1] ? __virt_addr_valid (./arch/x86/include/asm/preempt.h:95 (discriminator 1) ./include/linux/rcupdate.h:975 (discriminator 1) ./include/linux/mmzone.h:2207 (discriminator 1) arch/x86/mm/physaddr.c:54 (discriminator 1))
+[ 138.425853][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425859][ C1] kasan_report (mm/kasan/report.c:221 mm/kasan/report.c:597)
+[ 138.425868][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425878][ C1] kasan_check_range (mm/kasan/generic.c:186 (discriminator 1) mm/kasan/generic.c:200 (discriminator 1))
+[ 138.425884][ C1] __asan_memset (mm/kasan/shadow.c:84 (discriminator 2))
+[ 138.425889][ C1] ife_tlv_meta_encode (net/ife/ife.c:168)
+[ 138.425893][ C1] ? ife_tlv_meta_encode (net/ife/ife.c:171)
+[ 138.425898][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425903][ C1] ife_encode_meta_u16 (net/sched/act_ife.c:57)
+[ 138.425910][ C1] ? __pfx_do_raw_spin_lock (kernel/locking/spinlock_debug.c:114)
+[ 138.425916][ C1] ? __asan_memcpy (mm/kasan/shadow.c:105 (discriminator 3))
+[ 138.425921][ C1] ? __pfx_ife_encode_meta_u16 (net/sched/act_ife.c:45)
+[ 138.425927][ C1] ? srso_alias_return_thunk (arch/x86/lib/retpoline.S:221)
+[ 138.425931][ C1] tcf_ife_act (net/sched/act_ife.c:847 net/sched/act_ife.c:879)
+
+To solve this issue, fix the replace behavior by adding the metalist to
+the ife rcu data structure.
+
+Fixes: aa9fd9a325d51 ("sched: act: ife: update parameters via rcu handling")
+Reported-by: Ruitong Liu <cnitlrt@gmail.com>
+Tested-by: Ruitong Liu <cnitlrt@gmail.com>
+Co-developed-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Victor Nogueira <victor@mojatatu.com>
+Signed-off-by: Jamal Hadi Salim <jhs@mojatatu.com>
+Link: https://patch.msgid.link/20260304140603.76500-1-jhs@mojatatu.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/tc_act/tc_ife.h | 4 +-
+ net/sched/act_ife.c | 93 ++++++++++++++++++-------------------
+ 2 files changed, 45 insertions(+), 52 deletions(-)
+
+diff --git a/include/net/tc_act/tc_ife.h b/include/net/tc_act/tc_ife.h
+index c7f24a2da1cad..24d4d5a62b3c2 100644
+--- a/include/net/tc_act/tc_ife.h
++++ b/include/net/tc_act/tc_ife.h
+@@ -13,15 +13,13 @@ struct tcf_ife_params {
+ u8 eth_src[ETH_ALEN];
+ u16 eth_type;
+ u16 flags;
+-
++ struct list_head metalist;
+ struct rcu_head rcu;
+ };
+
+ struct tcf_ife_info {
+ struct tc_action common;
+ struct tcf_ife_params __rcu *params;
+- /* list of metaids allowed */
+- struct list_head metalist;
+ };
+ #define to_ife(a) ((struct tcf_ife_info *)a)
+
+diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
+index 58c1ab02bd0d2..bf772401b1f41 100644
+--- a/net/sched/act_ife.c
++++ b/net/sched/act_ife.c
+@@ -293,8 +293,8 @@ static int load_metaops_and_vet(u32 metaid, void *val, int len, bool rtnl_held)
+ /* called when adding new meta information
+ */
+ static int __add_metainfo(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool atomic, bool exists)
++ struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len, bool atomic)
+ {
+ struct tcf_meta_info *mi = NULL;
+ int ret = 0;
+@@ -313,45 +313,40 @@ static int __add_metainfo(const struct tcf_meta_ops *ops,
+ }
+ }
+
+- if (exists)
+- spin_lock_bh(&ife->tcf_lock);
+- list_add_tail(&mi->metalist, &ife->metalist);
+- if (exists)
+- spin_unlock_bh(&ife->tcf_lock);
++ list_add_tail(&mi->metalist, &p->metalist);
+
+ return ret;
+ }
+
+ static int add_metainfo_and_get_ops(const struct tcf_meta_ops *ops,
+- struct tcf_ife_info *ife, u32 metaid,
+- bool exists)
++ struct tcf_ife_params *p, u32 metaid)
+ {
+ int ret;
+
+ if (!try_module_get(ops->owner))
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, NULL, 0, true, exists);
++ ret = __add_metainfo(ops, p, metaid, NULL, 0, true);
+ if (ret)
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int add_metainfo(struct tcf_ife_info *ife, u32 metaid, void *metaval,
+- int len, bool exists)
++static int add_metainfo(struct tcf_ife_params *p, u32 metaid, void *metaval,
++ int len)
+ {
+ const struct tcf_meta_ops *ops = find_ife_oplist(metaid);
+ int ret;
+
+ if (!ops)
+ return -ENOENT;
+- ret = __add_metainfo(ops, ife, metaid, metaval, len, false, exists);
++ ret = __add_metainfo(ops, p, metaid, metaval, len, false);
+ if (ret)
+ /*put back what find_ife_oplist took */
+ module_put(ops->owner);
+ return ret;
+ }
+
+-static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
++static int use_all_metadata(struct tcf_ife_params *p)
+ {
+ struct tcf_meta_ops *o;
+ int rc = 0;
+@@ -359,7 +354,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+
+ read_lock(&ife_mod_lock);
+ list_for_each_entry(o, &ifeoplist, list) {
+- rc = add_metainfo_and_get_ops(o, ife, o->metaid, exists);
++ rc = add_metainfo_and_get_ops(o, p, o->metaid);
+ if (rc == 0)
+ installed += 1;
+ }
+@@ -371,7 +366,7 @@ static int use_all_metadata(struct tcf_ife_info *ife, bool exists)
+ return -EINVAL;
+ }
+
+-static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int dump_metalist(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+ struct tcf_meta_info *e;
+ struct nlattr *nest;
+@@ -379,14 +374,14 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ int total_encoded = 0;
+
+ /*can only happen on decode */
+- if (list_empty(&ife->metalist))
++ if (list_empty(&p->metalist))
+ return 0;
+
+ nest = nla_nest_start_noflag(skb, TCA_IFE_METALST);
+ if (!nest)
+ goto out_nlmsg_trim;
+
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry(e, &p->metalist, metalist) {
+ if (!e->ops->get(skb, e))
+ total_encoded += 1;
+ }
+@@ -403,13 +398,11 @@ static int dump_metalist(struct sk_buff *skb, struct tcf_ife_info *ife)
+ return -1;
+ }
+
+-/* under ife->tcf_lock */
+-static void _tcf_ife_cleanup(struct tc_action *a)
++static void __tcf_ife_cleanup(struct tcf_ife_params *p)
+ {
+- struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_meta_info *e, *n;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_safe(e, n, &p->metalist, metalist) {
+ list_del(&e->metalist);
+ if (e->metaval) {
+ if (e->ops->release)
+@@ -422,18 +415,23 @@ static void _tcf_ife_cleanup(struct tc_action *a)
+ }
+ }
+
++static void tcf_ife_cleanup_params(struct rcu_head *head)
++{
++ struct tcf_ife_params *p = container_of(head, struct tcf_ife_params,
++ rcu);
++
++ __tcf_ife_cleanup(p);
++ kfree(p);
++}
++
+ static void tcf_ife_cleanup(struct tc_action *a)
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ struct tcf_ife_params *p;
+
+- spin_lock_bh(&ife->tcf_lock);
+- _tcf_ife_cleanup(a);
+- spin_unlock_bh(&ife->tcf_lock);
+-
+ p = rcu_dereference_protected(ife->params, 1);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+ }
+
+ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+@@ -455,8 +453,7 @@ static int load_metalist(struct nlattr **tb, bool rtnl_held)
+ return 0;
+ }
+
+-static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+- bool exists, bool rtnl_held)
++static int populate_metalist(struct tcf_ife_params *p, struct nlattr **tb)
+ {
+ int len = 0;
+ int rc = 0;
+@@ -468,7 +465,7 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb,
+ val = nla_data(tb[i]);
+ len = nla_len(tb[i]);
+
+- rc = add_metainfo(ife, i, val, len, exists);
++ rc = add_metainfo(p, i, val, len);
+ if (rc)
+ return rc;
+ }
+@@ -523,6 +520,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
++ INIT_LIST_HEAD(&p->metalist);
+
+ if (tb[TCA_IFE_METALST]) {
+ err = nla_parse_nested_deprecated(tb2, IFE_META_MAX,
+@@ -567,8 +565,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ ife = to_ife(*a);
+- if (ret == ACT_P_CREATED)
+- INIT_LIST_HEAD(&ife->metalist);
+
+ err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
+ if (err < 0)
+@@ -600,8 +596,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ }
+
+ if (tb[TCA_IFE_METALST]) {
+- err = populate_metalist(ife, tb2, exists,
+- !(flags & TCA_ACT_FLAGS_NO_RTNL));
++ err = populate_metalist(p, tb2);
+ if (err)
+ goto metadata_parse_err;
+ } else {
+@@ -610,7 +605,7 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ * as we can. You better have at least one else we are
+ * going to bail out
+ */
+- err = use_all_metadata(ife, exists);
++ err = use_all_metadata(p);
+ if (err)
+ goto metadata_parse_err;
+ }
+@@ -626,13 +621,14 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ if (p)
+- kfree_rcu(p, rcu);
++ call_rcu(&p->rcu, tcf_ife_cleanup_params);
+
+ return ret;
+ metadata_parse_err:
+ if (goto_ch)
+ tcf_chain_put_by_act(goto_ch);
+ release_idr:
++ __tcf_ife_cleanup(p);
+ kfree(p);
+ tcf_idr_release(*a, bind);
+ return err;
+@@ -679,7 +675,7 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ if (nla_put(skb, TCA_IFE_TYPE, 2, &p->eth_type))
+ goto nla_put_failure;
+
+- if (dump_metalist(skb, ife)) {
++ if (dump_metalist(skb, p)) {
+ /*ignore failure to dump metalist */
+ pr_info("Failed to dump metalist\n");
+ }
+@@ -693,13 +689,13 @@ static int tcf_ife_dump(struct sk_buff *skb, struct tc_action *a, int bind,
+ return -1;
+ }
+
+-static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife,
++static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_params *p,
+ u16 metaid, u16 mlen, void *mdata)
+ {
+ struct tcf_meta_info *e;
+
+ /* XXX: use hash to speed up */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (metaid == e->metaid) {
+ if (e->ops) {
+ /* We check for decode presence already */
+@@ -716,10 +712,13 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ {
+ struct tcf_ife_info *ife = to_ife(a);
+ int action = ife->tcf_action;
++ struct tcf_ife_params *p;
+ u8 *ifehdr_end;
+ u8 *tlv_data;
+ u16 metalen;
+
++ p = rcu_dereference_bh(ife->params);
++
+ bstats_update(this_cpu_ptr(ife->common.cpu_bstats), skb);
+ tcf_lastuse_update(&ife->tcf_tm);
+
+@@ -745,7 +744,7 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ return TC_ACT_SHOT;
+ }
+
+- if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) {
++ if (find_decode_metaid(skb, p, mtype, dlen, curr_data)) {
+ /* abuse overlimits to count when we receive metadata
+ * but dont have an ops for it
+ */
+@@ -769,12 +768,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a,
+ /*XXX: check if we can do this at install time instead of current
+ * send data path
+ **/
+-static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_info *ife)
++static int ife_get_sz(struct sk_buff *skb, struct tcf_ife_params *p)
+ {
+- struct tcf_meta_info *e, *n;
++ struct tcf_meta_info *e;
+ int tot_run_sz = 0, run_sz = 0;
+
+- list_for_each_entry_safe(e, n, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->check_presence) {
+ run_sz = e->ops->check_presence(skb, e);
+ tot_run_sz += run_sz;
+@@ -795,7 +794,7 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ OUTERHDR:TOTMETALEN:{TLVHDR:Metadatum:TLVHDR..}:ORIGDATA
+ where ORIGDATA = original ethernet header ...
+ */
+- u16 metalen = ife_get_sz(skb, ife);
++ u16 metalen = ife_get_sz(skb, p);
+ int hdrm = metalen + skb->dev->hard_header_len + IFE_METAHDRLEN;
+ unsigned int skboff = 0;
+ int new_len = skb->len + hdrm;
+@@ -833,25 +832,21 @@ static int tcf_ife_encode(struct sk_buff *skb, const struct tc_action *a,
+ if (!ife_meta)
+ goto drop;
+
+- spin_lock(&ife->tcf_lock);
+-
+ /* XXX: we dont have a clever way of telling encode to
+ * not repeat some of the computations that are done by
+ * ops->presence_check...
+ */
+- list_for_each_entry(e, &ife->metalist, metalist) {
++ list_for_each_entry_rcu(e, &p->metalist, metalist) {
+ if (e->ops->encode) {
+ err = e->ops->encode(skb, (void *)(ife_meta + skboff),
+ e);
+ }
+ if (err < 0) {
+ /* too corrupt to keep around if overwritten */
+- spin_unlock(&ife->tcf_lock);
+ goto drop;
+ }
+ skboff += err;
+ }
+- spin_unlock(&ife->tcf_lock);
+ oethh = (struct ethhdr *)skb->data;
+
+ if (!is_zero_ether_addr(p->eth_src))
+--
+2.51.0
+
--- /dev/null
+From 344573d2532e9559e31d8940db396ffc9da63b75 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 28 Feb 2026 23:53:07 +0900
+Subject: net: sched: avoid qdisc_reset_all_tx_gt() vs dequeue race for
+ lockless qdiscs
+
+From: Koichiro Den <den@valinux.co.jp>
+
+[ Upstream commit 7f083faf59d14c04e01ec05a7507f036c965acf8 ]
+
+When shrinking the number of real tx queues,
+netif_set_real_num_tx_queues() calls qdisc_reset_all_tx_gt() to flush
+qdiscs for queues which will no longer be used.
+
+qdisc_reset_all_tx_gt() currently serializes qdisc_reset() with
+qdisc_lock(). However, for lockless qdiscs, the dequeue path is
+serialized by qdisc_run_begin/end() using qdisc->seqlock instead, so
+qdisc_reset() can run concurrently with __qdisc_run() and free skbs
+while they are still being dequeued, leading to UAF.
+
+This can easily be reproduced on e.g. virtio-net by imposing heavy
+traffic while frequently changing the number of queue pairs:
+
+ iperf3 -ub0 -c $peer -t 0 &
+ while :; do
+ ethtool -L eth0 combined 1
+ ethtool -L eth0 combined 2
+ done
+
+With KASAN enabled, this leads to reports like:
+
+ BUG: KASAN: slab-use-after-free in __qdisc_run+0x133f/0x1760
+ ...
+ Call Trace:
+ <TASK>
+ ...
+ __qdisc_run+0x133f/0x1760
+ __dev_queue_xmit+0x248f/0x3550
+ ip_finish_output2+0xa42/0x2110
+ ip_output+0x1a7/0x410
+ ip_send_skb+0x2e6/0x480
+ udp_send_skb+0xb0a/0x1590
+ udp_sendmsg+0x13c9/0x1fc0
+ ...
+ </TASK>
+
+ Allocated by task 1270 on cpu 5 at 44.558414s:
+ ...
+ alloc_skb_with_frags+0x84/0x7c0
+ sock_alloc_send_pskb+0x69a/0x830
+ __ip_append_data+0x1b86/0x48c0
+ ip_make_skb+0x1e8/0x2b0
+ udp_sendmsg+0x13a6/0x1fc0
+ ...
+
+ Freed by task 1306 on cpu 3 at 44.558445s:
+ ...
+ kmem_cache_free+0x117/0x5e0
+ pfifo_fast_reset+0x14d/0x580
+ qdisc_reset+0x9e/0x5f0
+ netif_set_real_num_tx_queues+0x303/0x840
+ virtnet_set_channels+0x1bf/0x260 [virtio_net]
+ ethnl_set_channels+0x684/0xae0
+ ethnl_default_set_doit+0x31a/0x890
+ ...
+
+Serialize qdisc_reset_all_tx_gt() against the lockless dequeue path by
+taking qdisc->seqlock for TCQ_F_NOLOCK qdiscs, matching the
+serialization model already used by dev_reset_queue().
+
+Additionally clear QDISC_STATE_NON_EMPTY after reset so the qdisc state
+reflects an empty queue, avoiding needless re-scheduling.
+
+Fixes: 6b3ba9146fe6 ("net: sched: allow qdiscs to handle locking")
+Signed-off-by: Koichiro Den <den@valinux.co.jp>
+Link: https://patch.msgid.link/20260228145307.3955532-1-den@valinux.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 3287988a6a987..232b7b22e993a 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -756,13 +756,23 @@ static inline bool skb_skip_tc_classify(struct sk_buff *skb)
+ static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
+ {
+ struct Qdisc *qdisc;
++ bool nolock;
+
+ for (; i < dev->num_tx_queues; i++) {
+ qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
+ if (qdisc) {
++ nolock = qdisc->flags & TCQ_F_NOLOCK;
++
++ if (nolock)
++ spin_lock_bh(&qdisc->seqlock);
+ spin_lock_bh(qdisc_lock(qdisc));
+ qdisc_reset(qdisc);
+ spin_unlock_bh(qdisc_lock(qdisc));
++ if (nolock) {
++ clear_bit(__QDISC_STATE_MISSED, &qdisc->state);
++ clear_bit(__QDISC_STATE_DRAINING, &qdisc->state);
++ spin_unlock_bh(&qdisc->seqlock);
++ }
+ }
+ }
+ }
+--
+2.51.0
+
--- /dev/null
+From d67005e6dbd7ebca05454c392bb1866f8f64953f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 14:58:25 +0000
+Subject: net: stmmac: Fix error handling in VLAN add and delete paths
+
+From: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+
+[ Upstream commit 35dfedce442c4060cfe5b98368bc9643fb995716 ]
+
+stmmac_vlan_rx_add_vid() updates active_vlans and the VLAN hash
+register before writing the HW filter entry. If the filter write
+fails, it leaves a stale VID in active_vlans and the hash register.
+
+stmmac_vlan_rx_kill_vid() has the reverse problem: it clears
+active_vlans before removing the HW filter. On failure, the VID is
+gone from active_vlans but still present in the HW filter table.
+
+To fix this, reorder the operations to update the hash table first,
+then attempt the HW filter operation. If the HW filter fails, roll
+back both the active_vlans bitmap and the hash table by calling
+stmmac_vlan_update() again.
+
+Fixes: ed64639bc1e0 ("net: stmmac: Add support for VLAN Rx filtering")
+Signed-off-by: Ovidiu Panait <ovidiu.panait.rb@renesas.com>
+Link: https://patch.msgid.link/20260303145828.7845-2-ovidiu.panait.rb@renesas.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/stmicro/stmmac/stmmac_main.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+index 7a8861d77e047..42df435c4d838 100644
+--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
++++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+@@ -6549,9 +6549,13 @@ static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ clear_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto err_pm_put;
++ }
+ }
++
+ err_pm_put:
+ pm_runtime_put(priv->device);
+
+@@ -6572,15 +6576,21 @@ static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vi
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
++ ret = stmmac_vlan_update(priv, is_double);
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ goto del_vlan_error;
++ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+- if (ret)
++ if (ret) {
++ set_bit(vid, priv->active_vlans);
++ stmmac_vlan_update(priv, is_double);
+ goto del_vlan_error;
++ }
+ }
+
+- ret = stmmac_vlan_update(priv, is_double);
+-
+ del_vlan_error:
+ pm_runtime_put(priv->device);
+
+--
+2.51.0
+
--- /dev/null
+From 2c107385c2384dae83c06dd127d9de7472eceaa8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2026 13:03:57 +0100
+Subject: net: vxlan: fix nd_tbl NULL dereference when IPv6 is disabled
+
+From: Fernando Fernandez Mancera <fmancera@suse.de>
+
+[ Upstream commit 168ff39e4758897d2eee4756977d036d52884c7e ]
+
+When booting with the 'ipv6.disable=1' parameter, the nd_tbl is never
+initialized because inet6_init() exits before ndisc_init() is called
+which initializes it. If an IPv6 packet is injected into the interface,
+route_shortcircuit() is called and a NULL pointer dereference happens on
+neigh_lookup().
+
+ BUG: kernel NULL pointer dereference, address: 0000000000000380
+ Oops: Oops: 0000 [#1] SMP NOPTI
+ [...]
+ RIP: 0010:neigh_lookup+0x20/0x270
+ [...]
+ Call Trace:
+ <TASK>
+ vxlan_xmit+0x638/0x1ef0 [vxlan]
+ dev_hard_start_xmit+0x9e/0x2e0
+ __dev_queue_xmit+0xbee/0x14e0
+ packet_sendmsg+0x116f/0x1930
+ __sys_sendto+0x1f5/0x200
+ __x64_sys_sendto+0x24/0x30
+ do_syscall_64+0x12f/0x1590
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fix this by adding an early check on route_shortcircuit() when protocol
+is ETH_P_IPV6. Note that ipv6_mod_enabled() cannot be used here because
+VXLAN can be built-in even when IPv6 is built as a module.
+
+Fixes: e15a00aafa4b ("vxlan: add ipv6 route short circuit support")
+Signed-off-by: Fernando Fernandez Mancera <fmancera@suse.de>
+Link: https://patch.msgid.link/20260304120357.9778-2-fmancera@suse.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/vxlan/vxlan_core.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c
+index 1b6b6acd34894..a862998fb3ba6 100644
+--- a/drivers/net/vxlan/vxlan_core.c
++++ b/drivers/net/vxlan/vxlan_core.c
+@@ -2126,6 +2126,11 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
+ {
+ struct ipv6hdr *pip6;
+
++ /* check if nd_tbl is not initiliazed due to
++ * ipv6.disable=1 set during boot
++ */
++ if (!ipv6_stub->nd_tbl)
++ return false;
+ if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
+ return false;
+ pip6 = ipv6_hdr(skb);
+--
+2.51.0
+
--- /dev/null
+From f6062e65644217ed500fd396401c5b1341151e9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:44 -0800
+Subject: nfc: nci: clear NCI_DATA_EXCHANGE before calling completion callback
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0efdc02f4f6d52f8ca5d5889560f325a836ce0a8 ]
+
+Move clear_bit(NCI_DATA_EXCHANGE) before invoking the data exchange
+callback in nci_data_exchange_complete().
+
+The callback (e.g. rawsock_data_exchange_complete) may immediately
+schedule another data exchange via schedule_work(tx_work). On a
+multi-CPU system, tx_work can run and reach nci_transceive() before
+the current nci_data_exchange_complete() clears the flag, causing
+test_and_set_bit(NCI_DATA_EXCHANGE) to return -EBUSY and the new
+transfer to fail.
+
+This causes intermittent flakes in nci/nci_dev in NIPA:
+
+ # # RUN NCI.NCI1_0.t4t_tag_read ...
+ # # t4t_tag_read: Test terminated by timeout
+ # # FAIL NCI.NCI1_0.t4t_tag_read
+ # not ok 3 NCI.NCI1_0.t4t_tag_read
+
+Fixes: 38f04c6b1b68 ("NFC: protect nci_data_exchange transactions")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-5-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/data.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/net/nfc/nci/data.c b/net/nfc/nci/data.c
+index 3d36ea5701f02..7a3fb2a397a1e 100644
+--- a/net/nfc/nci/data.c
++++ b/net/nfc/nci/data.c
+@@ -33,7 +33,8 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
+ if (!conn_info) {
+ kfree_skb(skb);
+- goto exit;
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++ return;
+ }
+
+ cb = conn_info->data_exchange_cb;
+@@ -45,6 +46,12 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ del_timer_sync(&ndev->data_timer);
+ clear_bit(NCI_DATA_EXCHANGE_TO, &ndev->flags);
+
++ /* Mark the exchange as done before calling the callback.
++ * The callback (e.g. rawsock_data_exchange_complete) may
++ * want to immediately queue another data exchange.
++ */
++ clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
++
+ if (cb) {
+ /* forward skb to nfc core */
+ cb(cb_context, skb, err);
+@@ -54,9 +61,6 @@ void nci_data_exchange_complete(struct nci_dev *ndev, struct sk_buff *skb,
+ /* no waiting callback, free skb */
+ kfree_skb(skb);
+ }
+-
+-exit:
+- clear_bit(NCI_DATA_EXCHANGE, &ndev->flags);
+ }
+
+ /* ----------------- NCI TX Data ----------------- */
+--
+2.51.0
+
--- /dev/null
+From 6f450461cdc6b8f2134b52c4a738463037fda9d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:41 -0800
+Subject: nfc: nci: free skb on nci_transceive early error paths
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 7bd4b0c4779f978a6528c9b7937d2ca18e936e2c ]
+
+nci_transceive() takes ownership of the skb passed by the caller,
+but the -EPROTO, -EINVAL, and -EBUSY error paths return without
+freeing it.
+
+Due to issues clearing NCI_DATA_EXCHANGE fixed by subsequent changes
+the nci/nci_dev selftest hits the error path occasionally in NIPA,
+and kmemleak detects leaks:
+
+unreferenced object 0xff11000015ce6a40 (size 640):
+ comm "nci_dev", pid 3954, jiffies 4295441246
+ hex dump (first 32 bytes):
+ 6b 6b 6b 6b 00 a4 00 0c 02 e1 03 6b 6b 6b 6b 6b kkkk.......kkkkk
+ 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk
+ backtrace (crc 7c40cc2a):
+ kmem_cache_alloc_node_noprof+0x492/0x630
+ __alloc_skb+0x11e/0x5f0
+ alloc_skb_with_frags+0xc6/0x8f0
+ sock_alloc_send_pskb+0x326/0x3f0
+ nfc_alloc_send_skb+0x94/0x1d0
+ rawsock_sendmsg+0x162/0x4c0
+ do_syscall_64+0x117/0xfc0
+
+Fixes: 6a2968aaf50c ("NFC: basic NCI protocol implementation")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-2-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/nci/core.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
+index 7a4742a092626..1f33da345bea6 100644
+--- a/net/nfc/nci/core.c
++++ b/net/nfc/nci/core.c
+@@ -1024,18 +1024,23 @@ static int nci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct nci_conn_info *conn_info;
+
+ conn_info = ndev->rf_conn_info;
+- if (!conn_info)
++ if (!conn_info) {
++ kfree_skb(skb);
+ return -EPROTO;
++ }
+
+ pr_debug("target_idx %d, len %d\n", target->idx, skb->len);
+
+ if (!ndev->target_active_prot) {
+ pr_err("unable to exchange data, no active target\n");
++ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+- if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags))
++ if (test_and_set_bit(NCI_DATA_EXCHANGE, &ndev->flags)) {
++ kfree_skb(skb);
+ return -EBUSY;
++ }
+
+ /* store cb and context to be used on receiving data */
+ conn_info->data_exchange_cb = cb;
+--
+2.51.0
+
--- /dev/null
+From 25e9be7196bbb88af03c5c01203a32f57e5eccbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 08:23:45 -0800
+Subject: nfc: rawsock: cancel tx_work before socket teardown
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit d793458c45df2aed498d7f74145eab7ee22d25aa ]
+
+In rawsock_release(), cancel any pending tx_work and purge the write
+queue before orphaning the socket. rawsock_tx_work runs on the system
+workqueue and calls nfc_data_exchange which dereferences the NCI
+device. Without synchronization, tx_work can race with socket and
+device teardown when a process is killed (e.g. by SIGKILL), leading
+to use-after-free or leaked references.
+
+Set SEND_SHUTDOWN first so that if tx_work is already running it will
+see the flag and skip transmitting, then use cancel_work_sync to wait
+for any in-progress execution to finish, and finally purge any
+remaining queued skbs.
+
+Fixes: 23b7869c0fd0 ("NFC: add the NFC socket raw protocol")
+Reviewed-by: Joe Damato <joe@dama.to>
+Link: https://patch.msgid.link/20260303162346.2071888-6-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/nfc/rawsock.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
+index 5125392bb68eb..028b4daafaf83 100644
+--- a/net/nfc/rawsock.c
++++ b/net/nfc/rawsock.c
+@@ -67,6 +67,17 @@ static int rawsock_release(struct socket *sock)
+ if (sock->type == SOCK_RAW)
+ nfc_sock_unlink(&raw_sk_list, sk);
+
++ if (sk->sk_state == TCP_ESTABLISHED) {
++ /* Prevent rawsock_tx_work from starting new transmits and
++ * wait for any in-progress work to finish. This must happen
++ * before the socket is orphaned to avoid a race where
++ * rawsock_tx_work runs after the NCI device has been freed.
++ */
++ sk->sk_shutdown |= SEND_SHUTDOWN;
++ cancel_work_sync(&nfc_rawsock(sk)->tx_work);
++ rawsock_write_queue_purge(sk);
++ }
++
+ sock_orphan(sk);
+ sock_put(sk);
+
+--
+2.51.0
+
--- /dev/null
+From bb512cda3d8beb792af9d262c82d8a6000fb5c31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 19:19:28 -0500
+Subject: nvme: fix memory allocation in nvme_pr_read_keys()
+
+From: Sungwoo Kim <iam@sung-woo.kim>
+
+[ Upstream commit c3320153769f05fd7fe9d840cb555dd3080ae424 ]
+
+nvme_pr_read_keys() takes num_keys from userspace and uses it to
+calculate the allocation size for rse via struct_size(). The upper
+limit is PR_KEYS_MAX (64K).
+
+A malicious or buggy userspace can pass a large num_keys value that
+results in a 4MB allocation attempt at most, causing a warning in
+the page allocator when the order exceeds MAX_PAGE_ORDER.
+
+To fix this, use kvzalloc() instead of kzalloc().
+
+This bug has the same reasoning and fix with the patch below:
+https://lore.kernel.org/linux-block/20251212013510.3576091-1-kartikey406@gmail.com/
+
+Warning log:
+WARNING: mm/page_alloc.c:5216 at __alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216, CPU#1: syz-executor117/272
+Modules linked in:
+CPU: 1 UID: 0 PID: 272 Comm: syz-executor117 Not tainted 6.19.0 #1 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+RIP: 0010:__alloc_frozen_pages_noprof+0x5aa/0x2300 mm/page_alloc.c:5216
+Code: ff 83 bd a8 fe ff ff 0a 0f 86 69 fb ff ff 0f b6 1d f9 f9 c4 04 80 fb 01 0f 87 3b 76 30 ff 83 e3 01 75 09 c6 05 e4 f9 c4 04 01 <0f> 0b 48 c7 85 70 fe ff ff 00 00 00 00 e9 8f fd ff ff 31 c0 e9 0d
+RSP: 0018:ffffc90000fcf450 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: 1ffff920001f9ea0
+RDX: 0000000000000000 RSI: 000000000000000b RDI: 0000000000040dc0
+RBP: ffffc90000fcf648 R08: ffff88800b6c3380 R09: 0000000000000001
+R10: ffffc90000fcf840 R11: ffff88807ffad280 R12: 0000000000000000
+R13: 0000000000040dc0 R14: 0000000000000001 R15: ffffc90000fcf620
+FS: 0000555565db33c0(0000) GS:ffff8880be26c000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 000000002000000c CR3: 0000000003b72000 CR4: 00000000000006f0
+Call Trace:
+ <TASK>
+ alloc_pages_mpol+0x236/0x4d0 mm/mempolicy.c:2486
+ alloc_frozen_pages_noprof+0x149/0x180 mm/mempolicy.c:2557
+ ___kmalloc_large_node+0x10c/0x140 mm/slub.c:5598
+ __kmalloc_large_node_noprof+0x25/0xc0 mm/slub.c:5629
+ __do_kmalloc_node mm/slub.c:5645 [inline]
+ __kmalloc_noprof+0x483/0x6f0 mm/slub.c:5669
+ kmalloc_noprof include/linux/slab.h:961 [inline]
+ kzalloc_noprof include/linux/slab.h:1094 [inline]
+ nvme_pr_read_keys+0x8f/0x4c0 drivers/nvme/host/pr.c:245
+ blkdev_pr_read_keys block/ioctl.c:456 [inline]
+ blkdev_common_ioctl+0x1b71/0x29b0 block/ioctl.c:730
+ blkdev_ioctl+0x299/0x700 block/ioctl.c:786
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:597 [inline]
+ __se_sys_ioctl fs/ioctl.c:583 [inline]
+ __x64_sys_ioctl+0x1bf/0x220 fs/ioctl.c:583
+ x64_sys_call+0x1280/0x21b0 mnt/fuzznvme_1/fuzznvme/linux-build/v6.19/./arch/x86/include/generated/asm/syscalls_64.h:17
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0x71/0x330 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+RIP: 0033:0x7fb893d3108d
+Code: 28 c3 e8 46 1e 00 00 66 0f 1f 44 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007ffff61f2f38 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007ffff61f3138 RCX: 00007fb893d3108d
+RDX: 0000000020000040 RSI: 00000000c01070ce RDI: 0000000000000003
+RBP: 0000000000000001 R08: 0000000000000000 R09: 00007ffff61f3138
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001
+R13: 00007ffff61f3128 R14: 00007fb893dae530 R15: 0000000000000001
+ </TASK>
+
+Fixes: 5fd96a4e15de (nvme: Add pr_ops read_keys support)
+Acked-by: Chao Shi <cshi008@fiu.edu>
+Acked-by: Weidong Zhu <weizhu@fiu.edu>
+Acked-by: Dave Tian <daveti@purdue.edu>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: Sungwoo Kim <iam@sung-woo.kim>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pr.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index 0636fa4d6f77b..1df7cb3155601 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -217,7 +217,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ if (rse_len > U32_MAX)
+ return -EINVAL;
+
+- rse = kzalloc(rse_len, GFP_KERNEL);
++ rse = kvzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+
+@@ -242,7 +242,7 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ }
+
+ free_rse:
+- kfree(rse);
++ kvfree(rse);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From e03ebf9424c6ef8dd60b3e8fcf5f710aaf5db924 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Dec 2025 16:43:27 -0500
+Subject: nvme: reject invalid pr_read_keys() num_keys values
+
+From: Stefan Hajnoczi <stefanha@redhat.com>
+
+[ Upstream commit 38ec8469f39e0e96e7dd9b76f05e0f8eb78be681 ]
+
+The pr_read_keys() interface has a u32 num_keys parameter. The NVMe
+Reservation Report command has a u32 maximum length. Reject num_keys
+values that are too large to fit.
+
+This will become important when pr_read_keys() is exposed to untrusted
+userspace via an <linux/pr.h> ioctl.
+
+Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Stable-dep-of: c3320153769f ("nvme: fix memory allocation in nvme_pr_read_keys()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/pr.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/host/pr.c b/drivers/nvme/host/pr.c
+index 803efc97fd1ea..0636fa4d6f77b 100644
+--- a/drivers/nvme/host/pr.c
++++ b/drivers/nvme/host/pr.c
+@@ -203,7 +203,8 @@ static int nvme_pr_resv_report(struct block_device *bdev, void *data,
+ static int nvme_pr_read_keys(struct block_device *bdev,
+ struct pr_keys *keys_info)
+ {
+- u32 rse_len, num_keys = keys_info->num_keys;
++ size_t rse_len;
++ u32 num_keys = keys_info->num_keys;
+ struct nvme_reservation_status_ext *rse;
+ int ret, i;
+ bool eds;
+@@ -213,6 +214,9 @@ static int nvme_pr_read_keys(struct block_device *bdev,
+ * enough to get enough keys to fill the return keys buffer.
+ */
+ rse_len = struct_size(rse, regctl_eds, num_keys);
++ if (rse_len > U32_MAX)
++ return -EINVAL;
++
+ rse = kzalloc(rse_len, GFP_KERNEL);
+ if (!rse)
+ return -ENOMEM;
+--
+2.51.0
+
--- /dev/null
+From 384aed2d0756acce67571acc68106ee88d42f9fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:58 +0000
+Subject: octeon_ep: avoid compiler and IQ/OQ reordering
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 43b3160cb639079a15daeb5f080120afbfbfc918 ]
+
+Utilize READ_ONCE and WRITE_ONCE APIs for IO queue Tx/Rx
+variable access to prevent compiler optimization and reordering.
+Additionally, ensure IO queue OUT/IN_CNT registers are flushed
+by performing a read-back after writing.
+
+The compiler could reorder reads/writes to pkts_pending, last_pkt_count,
+etc., causing stale values to be used when calculating packets to process
+or register updates to send to hardware. The Octeon hardware requires a
+read-back after writing to OUT_CNT/IN_CNT registers to ensure the write
+has been flushed through any posted write buffers before the interrupt
+resend bit is set. Without this, we have observed cases where the hardware
+didn't properly update its internal state.
+
+wmb/rmb only provides ordering guarantees but doesn't prevent the compiler
+from performing optimizations like caching in registers, load tearing etc.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-3-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 21 +++++++++------
+ .../net/ethernet/marvell/octeon_ep/octep_rx.c | 27 +++++++++++++------
+ 2 files changed, 32 insertions(+), 16 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 111caa5ce12fa..e4a78d5e73495 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -541,17 +541,22 @@ static void octep_clean_irqs(struct octep_device *oct)
+ */
+ static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+- u32 pkts_pend = oq->pkts_pending;
++ u32 pkts_pend = READ_ONCE(oq->pkts_pending);
++ u32 last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ u32 pkts_processed = READ_ONCE(iq->pkts_processed);
++ u32 pkt_in_done = READ_ONCE(iq->pkt_in_done);
+
+ netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no);
+- if (iq->pkts_processed) {
+- writel(iq->pkts_processed, iq->inst_cnt_reg);
+- iq->pkt_in_done -= iq->pkts_processed;
+- iq->pkts_processed = 0;
++ if (pkts_processed) {
++ writel(pkts_processed, iq->inst_cnt_reg);
++ readl(iq->inst_cnt_reg);
++ WRITE_ONCE(iq->pkt_in_done, (pkt_in_done - pkts_processed));
++ WRITE_ONCE(iq->pkts_processed, 0);
+ }
+- if (oq->last_pkt_count - pkts_pend) {
+- writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg);
+- oq->last_pkt_count = pkts_pend;
++ if (last_pkt_count - pkts_pend) {
++ writel(last_pkt_count - pkts_pend, oq->pkts_sent_reg);
++ readl(oq->pkts_sent_reg);
++ WRITE_ONCE(oq->last_pkt_count, pkts_pend);
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+index 60afb6bf2f679..e0c1e13e48c02 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_rx.c
+@@ -323,10 +323,16 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ struct octep_oq *oq)
+ {
+ u32 pkt_count, new_pkts;
++ u32 last_pkt_count, pkts_pending;
+
+ pkt_count = readl(oq->pkts_sent_reg);
+- new_pkts = pkt_count - oq->last_pkt_count;
++ last_pkt_count = READ_ONCE(oq->last_pkt_count);
++ new_pkts = pkt_count - last_pkt_count;
+
++ if (pkt_count < last_pkt_count) {
++ dev_err(oq->dev, "OQ-%u pkt_count(%u) < oq->last_pkt_count(%u)\n",
++ oq->q_no, pkt_count, last_pkt_count);
++ }
+ /* Clear the hardware packets counter register if the rx queue is
+ * being processed continuously with-in a single interrupt and
+ * reached half its max value.
+@@ -337,8 +343,9 @@ static int octep_oq_check_hw_for_pkts(struct octep_device *oct,
+ pkt_count = readl(oq->pkts_sent_reg);
+ new_pkts += pkt_count;
+ }
+- oq->last_pkt_count = pkt_count;
+- oq->pkts_pending += new_pkts;
++ WRITE_ONCE(oq->last_pkt_count, pkt_count);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending + new_pkts));
+ return new_pkts;
+ }
+
+@@ -411,7 +418,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ u16 data_offset;
+ u32 read_idx;
+
+- read_idx = oq->host_read_idx;
++ read_idx = READ_ONCE(oq->host_read_idx);
+ rx_bytes = 0;
+ desc_used = 0;
+ for (pkt = 0; pkt < pkts_to_process; pkt++) {
+@@ -494,7 +501,7 @@ static int __octep_oq_process_rx(struct octep_device *oct,
+ napi_gro_receive(oq->napi, skb);
+ }
+
+- oq->host_read_idx = read_idx;
++ WRITE_ONCE(oq->host_read_idx, read_idx);
+ oq->refill_count += desc_used;
+ oq->stats.packets += pkt;
+ oq->stats.bytes += rx_bytes;
+@@ -517,22 +524,26 @@ int octep_oq_process_rx(struct octep_oq *oq, int budget)
+ {
+ u32 pkts_available, pkts_processed, total_pkts_processed;
+ struct octep_device *oct = oq->octep_dev;
++ u32 pkts_pending;
+
+ pkts_available = 0;
+ pkts_processed = 0;
+ total_pkts_processed = 0;
+ while (total_pkts_processed < budget) {
+ /* update pending count only when current one exhausted */
+- if (oq->pkts_pending == 0)
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ if (pkts_pending == 0)
+ octep_oq_check_hw_for_pkts(oct, oq);
++ pkts_pending = READ_ONCE(oq->pkts_pending);
+ pkts_available = min(budget - total_pkts_processed,
+- oq->pkts_pending);
++ pkts_pending);
+ if (!pkts_available)
+ break;
+
+ pkts_processed = __octep_oq_process_rx(oct, oq,
+ pkts_available);
+- oq->pkts_pending -= pkts_processed;
++ pkts_pending = READ_ONCE(oq->pkts_pending);
++ WRITE_ONCE(oq->pkts_pending, (pkts_pending - pkts_processed));
+ total_pkts_processed += pkts_processed;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1f01cd5804d7a191a60ec2229301d49456d8deb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Feb 2026 09:13:57 +0000
+Subject: octeon_ep: Relocate counter updates before NAPI
+
+From: Vimlesh Kumar <vimleshk@marvell.com>
+
+[ Upstream commit 18c04a808c436d629d5812ce883e3822a5f5a47f ]
+
+Relocate IQ/OQ IN/OUT_CNTS updates to occur before NAPI completion,
+and replace napi_complete with napi_complete_done.
+
+Moving the IQ/OQ counter updates before napi_complete_done ensures
+1. Counter registers are updated before re-enabling interrupts.
+2. Prevents a race where new packets arrive but counters aren't properly
+ synchronized.
+napi_complete_done (vs napi_complete) allows for better
+interrupt coalescing.
+
+Fixes: 37d79d0596062 ("octeon_ep: add Tx/Rx processing and interrupt support")
+Signed-off-by: Sathesh Edara <sedara@marvell.com>
+Signed-off-by: Shinas Rasheed <srasheed@marvell.com>
+Signed-off-by: Vimlesh Kumar <vimleshk@marvell.com>
+Link: https://patch.msgid.link/20260227091402.1773833-2-vimleshk@marvell.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../ethernet/marvell/octeon_ep/octep_main.c | 19 +++++++++++++++----
+ 1 file changed, 15 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index db24c290a9079..111caa5ce12fa 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -534,12 +534,12 @@ static void octep_clean_irqs(struct octep_device *oct)
+ }
+
+ /**
+- * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ * octep_update_pkt() - Update IQ/OQ IN/OUT_CNT registers.
+ *
+ * @iq: Octeon Tx queue data structure.
+ * @oq: Octeon Rx queue data structure.
+ */
+-static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++static void octep_update_pkt(struct octep_iq *iq, struct octep_oq *oq)
+ {
+ u32 pkts_pend = oq->pkts_pending;
+
+@@ -555,7 +555,17 @@ static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
+ }
+
+ /* Flush the previous wrties before writing to RESEND bit */
+- wmb();
++ smp_wmb();
++}
++
++/**
++ * octep_enable_ioq_irq() - Enable MSI-x interrupt of a Tx/Rx queue.
++ *
++ * @iq: Octeon Tx queue data structure.
++ * @oq: Octeon Rx queue data structure.
++ */
++static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq)
++{
+ writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq->pkts_sent_reg);
+ writeq(1UL << OCTEP_IQ_INTR_RESEND_BIT, iq->inst_cnt_reg);
+ }
+@@ -581,7 +591,8 @@ static int octep_napi_poll(struct napi_struct *napi, int budget)
+ if (tx_pending || rx_done >= budget)
+ return budget;
+
+- napi_complete(napi);
++ octep_update_pkt(ioq_vector->iq, ioq_vector->oq);
++ napi_complete_done(napi, rx_done);
+ octep_enable_ioq_irq(ioq_vector->iq, ioq_vector->oq);
+ return rx_done;
+ }
+--
+2.51.0
+
--- /dev/null
+From 412649c17c08419d3faadc5fe88047d1d7e9403d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 17:39:07 +0800
+Subject: pinctrl: cirrus: cs42l43: Fix double-put in cs42l43_pin_probe()
+
+From: Felix Gu <ustc.gu@gmail.com>
+
+[ Upstream commit fd5bed798f45eb3a178ad527b43ab92705faaf8a ]
+
+devm_add_action_or_reset() already invokes the action on failure,
+so the explicit put causes a double-put.
+
+Fixes: 9b07cdf86a0b ("pinctrl: cirrus: Fix fwnode leak in cs42l43_pin_probe()")
+Signed-off-by: Felix Gu <ustc.gu@gmail.com>
+Reviewed-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/cirrus/pinctrl-cs42l43.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+index e1ac89be7c847..1640c5522f0e8 100644
+--- a/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
++++ b/drivers/pinctrl/cirrus/pinctrl-cs42l43.c
+@@ -574,10 +574,9 @@ static int cs42l43_pin_probe(struct platform_device *pdev)
+ if (child) {
+ ret = devm_add_action_or_reset(&pdev->dev,
+ cs42l43_fwnode_put, child);
+- if (ret) {
+- fwnode_handle_put(child);
++ if (ret)
+ return ret;
+- }
++
+ if (!child->dev)
+ child->dev = priv->dev;
+ fwnode = child;
+--
+2.51.0
+
--- /dev/null
+From 6d8d3271989b8174cc223b04c67a52dd04b8eb1e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:46 +0100
+Subject: pinctrl: equilibrium: fix warning trace on load
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 3e00b1b332e54ba50cca6691f628b9c06574024f ]
+
+The callback functions 'eqbr_irq_mask()' and 'eqbr_irq_ack()' are also
+called in the callback function 'eqbr_irq_mask_ack()'. This is done to
+avoid source code duplication. The problem, is that in the function
+'eqbr_irq_mask()' also calles the gpiolib function 'gpiochip_disable_irq()'
+
+This generates the following warning trace in the log for every gpio on
+load.
+
+[ 6.088111] ------------[ cut here ]------------
+[ 6.092440] WARNING: CPU: 3 PID: 1 at drivers/gpio/gpiolib.c:3810 gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Modules linked in:
+[ 6.097847] CPU: 3 UID: 0 PID: 1 Comm: swapper/0 Tainted: G W 6.12.59+ #0
+[ 6.097847] Tainted: [W]=WARN
+[ 6.097847] RIP: 0010:gpiochip_disable_irq+0x39/0x50
+[ 6.097847] Code: 39 c6 48 19 c0 21 c6 48 c1 e6 05 48 03 b2 38 03 00 00 48 81 fe 00 f0 ff ff 77 11 48 8b 46 08 f6 c4 02 74 06 f0 80 66 09 fb c3 <0f> 0b 90 0f 1f 40 00 c3 66 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40
+[ 6.097847] RSP: 0000:ffffc9000000b830 EFLAGS: 00010046
+[ 6.097847] RAX: 0000000000000045 RBX: ffff888001be02a0 RCX: 0000000000000008
+[ 6.097847] RDX: ffff888001be9000 RSI: ffff888001b2dd00 RDI: ffff888001be02a0
+[ 6.097847] RBP: ffffc9000000b860 R08: 0000000000000000 R09: 0000000000000000
+[ 6.097847] R10: 0000000000000001 R11: ffff888001b2a154 R12: ffff888001be0514
+[ 6.097847] R13: ffff888001be02a0 R14: 0000000000000008 R15: 0000000000000000
+[ 6.097847] FS: 0000000000000000(0000) GS:ffff888041d80000(0000) knlGS:0000000000000000
+[ 6.097847] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 6.097847] CR2: 0000000000000000 CR3: 0000000003030000 CR4: 00000000001026b0
+[ 6.097847] Call Trace:
+[ 6.097847] <TASK>
+[ 6.097847] ? eqbr_irq_mask+0x63/0x70
+[ 6.097847] ? no_action+0x10/0x10
+[ 6.097847] eqbr_irq_mask_ack+0x11/0x60
+
+In an other driver (drivers/pinctrl/starfive/pinctrl-starfive-jh7100.c) the
+interrupt is not disabled here.
+
+To fix this, do not call the 'eqbr_irq_mask()' and 'eqbr_irq_ack()'
+function. Implement instead this directly without disabling the interrupts.
+
+Fixes: 52066a53bd11 ("pinctrl: equilibrium: Convert to immutable irq_chip")
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index a5f7e34146c7c..e1d8c656576a8 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -63,8 +63,15 @@ static void eqbr_irq_ack(struct irq_data *d)
+
+ static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_irq_mask(d);
+- eqbr_irq_ack(d);
++ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
++ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
++ unsigned int offset = irqd_to_hwirq(d);
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&gctrl->lock, flags);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNENCLR);
++ writel(BIT(offset), gctrl->membase + GPIO_IRNCR);
++ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+--
+2.51.0
+
--- /dev/null
+From 72cd36dca32fdec45a75f9bf200ac4f7b5b23c36 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Feb 2026 13:55:45 +0100
+Subject: pinctrl: equilibrium: rename irq_chip function callbacks
+
+From: Florian Eckert <fe@dev.tdt.de>
+
+[ Upstream commit 1f96b84835eafb3e6f366dc3a66c0e69504cec9d ]
+
+Renaming of the irq_chip callback functions to improve clarity.
+
+Signed-off-by: Florian Eckert <fe@dev.tdt.de>
+Signed-off-by: Linus Walleij <linusw@kernel.org>
+Stable-dep-of: 3e00b1b332e5 ("pinctrl: equilibrium: fix warning trace on load")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/pinctrl-equilibrium.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/pinctrl/pinctrl-equilibrium.c b/drivers/pinctrl/pinctrl-equilibrium.c
+index d7c89c310b373..a5f7e34146c7c 100644
+--- a/drivers/pinctrl/pinctrl-equilibrium.c
++++ b/drivers/pinctrl/pinctrl-equilibrium.c
+@@ -22,7 +22,7 @@
+ #define PIN_NAME_LEN 10
+ #define PAD_REG_OFF 0x100
+
+-static void eqbr_gpio_disable_irq(struct irq_data *d)
++static void eqbr_irq_mask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -35,7 +35,7 @@ static void eqbr_gpio_disable_irq(struct irq_data *d)
+ gpiochip_disable_irq(gc, offset);
+ }
+
+-static void eqbr_gpio_enable_irq(struct irq_data *d)
++static void eqbr_irq_unmask(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -49,7 +49,7 @@ static void eqbr_gpio_enable_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_ack_irq(struct irq_data *d)
++static void eqbr_irq_ack(struct irq_data *d)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -61,10 +61,10 @@ static void eqbr_gpio_ack_irq(struct irq_data *d)
+ raw_spin_unlock_irqrestore(&gctrl->lock, flags);
+ }
+
+-static void eqbr_gpio_mask_ack_irq(struct irq_data *d)
++static void eqbr_irq_mask_ack(struct irq_data *d)
+ {
+- eqbr_gpio_disable_irq(d);
+- eqbr_gpio_ack_irq(d);
++ eqbr_irq_mask(d);
++ eqbr_irq_ack(d);
+ }
+
+ static inline void eqbr_cfg_bit(void __iomem *addr,
+@@ -91,7 +91,7 @@ static int eqbr_irq_type_cfg(struct gpio_irq_type *type,
+ return 0;
+ }
+
+-static int eqbr_gpio_set_irq_type(struct irq_data *d, unsigned int type)
++static int eqbr_irq_set_type(struct irq_data *d, unsigned int type)
+ {
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ struct eqbr_gpio_ctrl *gctrl = gpiochip_get_data(gc);
+@@ -165,11 +165,11 @@ static void eqbr_irq_handler(struct irq_desc *desc)
+
+ static const struct irq_chip eqbr_irq_chip = {
+ .name = "gpio_irq",
+- .irq_mask = eqbr_gpio_disable_irq,
+- .irq_unmask = eqbr_gpio_enable_irq,
+- .irq_ack = eqbr_gpio_ack_irq,
+- .irq_mask_ack = eqbr_gpio_mask_ack_irq,
+- .irq_set_type = eqbr_gpio_set_irq_type,
++ .irq_ack = eqbr_irq_ack,
++ .irq_mask = eqbr_irq_mask,
++ .irq_mask_ack = eqbr_irq_mask_ack,
++ .irq_unmask = eqbr_irq_unmask,
++ .irq_set_type = eqbr_irq_set_type,
+ .flags = IRQCHIP_IMMUTABLE,
+ GPIOCHIP_IRQ_RESOURCE_HELPERS,
+ };
+--
+2.51.0
+
--- /dev/null
+From a6a82209b01bbcaafac4af22d8a6894234fa65f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Feb 2026 01:01:29 +0000
+Subject: platform/x86: thinkpad_acpi: Fix errors reading battery thresholds
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Teh <jonathan.teh@outlook.com>
+
+[ Upstream commit 53e977b1d50c46f2c4ec3865cd13a822f58ad3cd ]
+
+Check whether the battery supports the relevant charge threshold before
+reading the value to silence these errors:
+
+thinkpad_acpi: acpi_evalf(BCTG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCTG: evaluate failed
+thinkpad_acpi: acpi_evalf(BCSG, dd, ...) failed: AE_NOT_FOUND
+ACPI: \_SB_.PCI0.LPC_.EC__.HKEY: BCSG: evaluate failed
+
+when reading the charge thresholds via sysfs on platforms that do not
+support them such as the ThinkPad T400.
+
+Fixes: 2801b9683f74 ("thinkpad_acpi: Add support for battery thresholds")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=202619
+Signed-off-by: Jonathan Teh <jonathan.teh@outlook.com>
+Reviewed-by: Mark Pearson <mpearson-lenovo@squebb.ca>
+Link: https://patch.msgid.link/MI0P293MB01967B206E1CA6F337EBFB12926CA@MI0P293MB0196.ITAP293.PROD.OUTLOOK.COM
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/thinkpad_acpi.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
+index 88364a5502e69..be46479d54afe 100644
+--- a/drivers/platform/x86/thinkpad_acpi.c
++++ b/drivers/platform/x86/thinkpad_acpi.c
+@@ -9441,14 +9441,16 @@ static int tpacpi_battery_get(int what, int battery, int *ret)
+ {
+ switch (what) {
+ case THRESHOLD_START:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery))
++ if (!battery_info.batteries[battery].start_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_START, ret, battery)))
+ return -ENODEV;
+
+ /* The value is in the low 8 bits of the response */
+ *ret = *ret & 0xFF;
+ return 0;
+ case THRESHOLD_STOP:
+- if ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery))
++ if (!battery_info.batteries[battery].stop_support ||
++ ACPI_FAILURE(tpacpi_battery_acpi_eval(GET_STOP, ret, battery)))
+ return -ENODEV;
+ /* Value is in lower 8 bits */
+ *ret = *ret & 0xFF;
+--
+2.51.0
+
--- /dev/null
+From e215b25215598b2aec7b01654dd69871ecf85385 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 19:37:56 +0900
+Subject: rust: kunit: fix warning when !CONFIG_PRINTK
+
+From: Alexandre Courbot <acourbot@nvidia.com>
+
+[ Upstream commit 7dd34dfc8dfa92a7244242098110388367996ac3 ]
+
+If `CONFIG_PRINTK` is not set, then the following warnings are issued
+during build:
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:16:12
+ |
+ 16 | pub fn err(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+ |
+ = note: `#[warn(unused_variables)]` (part of `#[warn(unused)]`) on by default
+
+ warning: unused variable: `args`
+ --> ../rust/kernel/kunit.rs:32:13
+ |
+ 32 | pub fn info(args: fmt::Arguments<'_>) {
+ | ^^^^ help: if this is intentional, prefix it with an underscore: `_args`
+
+Fix this by adding a no-op assignment using `args` when `CONFIG_PRINTK`
+is not set.
+
+Fixes: a66d733da801 ("rust: support running Rust documentation tests as KUnit ones")
+Signed-off-by: Alexandre Courbot <acourbot@nvidia.com>
+Reviewed-by: Alice Ryhl <aliceryhl@google.com>
+Reviewed-by: David Gow <david@davidgow.net>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ rust/kernel/kunit.rs | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/rust/kernel/kunit.rs b/rust/kernel/kunit.rs
+index 722655b2d62df..f33d8f5f1851a 100644
+--- a/rust/kernel/kunit.rs
++++ b/rust/kernel/kunit.rs
+@@ -13,6 +13,10 @@
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn err(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+@@ -29,6 +33,10 @@ pub fn err(args: fmt::Arguments<'_>) {
+ /// Public but hidden since it should only be used from KUnit generated code.
+ #[doc(hidden)]
+ pub fn info(args: fmt::Arguments<'_>) {
++ // `args` is unused if `CONFIG_PRINTK` is not set - this avoids a build-time warning.
++ #[cfg(not(CONFIG_PRINTK))]
++ let _ = args;
++
+ // SAFETY: The format string is null-terminated and the `%pA` specifier matches the argument we
+ // are passing.
+ #[cfg(CONFIG_PRINTK)]
+--
+2.51.0
+
--- /dev/null
+From 066112b020f90b9075ffb88fc4a5ce1ee7ca3de2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 09:36:37 +0800
+Subject: selftest/arm64: Fix sve2p1_sigill() to hwcap test
+
+From: Yifan Wu <wuyifan50@huawei.com>
+
+[ Upstream commit d87c828daa7ead9763416f75cc416496969cf1dc ]
+
+The FEAT_SVE2p1 is indicated by ID_AA64ZFR0_EL1.SVEver. However,
+the BFADD requires the FEAT_SVE_B16B16, which is indicated by
+ID_AA64ZFR0_EL1.B16B16. This could cause the test to incorrectly
+fail on a CPU that supports FEAT_SVE2.1 but not FEAT_SVE_B16B16.
+
+LD1Q Gather load quadwords which is decoded from SVE encodings and
+implied by FEAT_SVE2p1.
+
+Fixes: c5195b027d29 ("kselftest/arm64: Add SVE 2.1 to hwcap test")
+Signed-off-by: Yifan Wu <wuyifan50@huawei.com>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/arm64/abi/hwcap.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/tools/testing/selftests/arm64/abi/hwcap.c b/tools/testing/selftests/arm64/abi/hwcap.c
+index e3d262831d919..311a2a65f7cf2 100644
+--- a/tools/testing/selftests/arm64/abi/hwcap.c
++++ b/tools/testing/selftests/arm64/abi/hwcap.c
+@@ -216,8 +216,8 @@ static void sve2_sigill(void)
+
+ static void sve2p1_sigill(void)
+ {
+- /* BFADD Z0.H, Z0.H, Z0.H */
+- asm volatile(".inst 0x65000000" : : : "z0");
++ /* LD1Q {Z0.Q}, P0/Z, [Z0.D, X0] */
++ asm volatile(".inst 0xC400A000" : : : "z0");
+ }
+
+ static void sveaes_sigill(void)
+--
+2.51.0
+
selftests-mptcp-more-stable-simult_flows-tests.patch
selftests-mptcp-join-check-removing-signal-subflow-endp.patch
arm-clean-up-the-memset64-c-wrapper.patch
+hwmon-aht10-add-support-for-dht20.patch
+hwmon-aht10-fix-initialization-commands-for-aht20.patch
+pinctrl-equilibrium-rename-irq_chip-function-callbac.patch
+pinctrl-equilibrium-fix-warning-trace-on-load.patch
+platform-x86-thinkpad_acpi-fix-errors-reading-batter.patch
+pinctrl-cirrus-cs42l43-fix-double-put-in-cs42l43_pin.patch
+hwmon-it87-check-the-it87_lock-return-value.patch
+e1000e-clear-dpg_en-after-reset-to-avoid-autonomous-.patch
+drm-ssd130x-use-bool-for-ssd130x_deviceinfo-flags.patch
+drm-ssd130x-store-the-hw-buffer-in-the-driver-privat.patch
+drm-ssd130x-replace-.page_height-field-in-device-inf.patch
+drm-solomon-fix-page-start-when-updating-rectangle-i.patch
+net-ethernet-ti-am65-cpsw-nuss-cpsw-ale-fix-multicas.patch
+xsk-get-rid-of-xdp_buff_xsk-xskb_list_node.patch
+xsk-s-free_list_node-list_node.patch
+xsk-fix-fragment-node-deletion-to-prevent-buffer-lea.patch
+xsk-fix-zero-copy-af_xdp-fragment-drop.patch
+dpaa2-switch-do-not-clear-any-interrupts-automatical.patch
+dpaa2-switch-fix-interrupt-storm-after-receiving-bad.patch
+atm-lec-fix-null-ptr-deref-in-lec_arp_clear_vccs.patch
+amd-xgbe-fix-mac_tcr_ss-register-width-for-2.5g-and-.patch
+can-bcm-fix-locking-for-bcm_op-runtime-updates.patch
+can-mcp251x-fix-deadlock-in-error-path-of-mcp251x_op.patch
+rust-kunit-fix-warning-when-config_printk.patch
+kunit-tool-copy-caller-args-in-run_kernel-to-prevent.patch
+net-dsa-realtek-rtl8365mb-fix-rtl8365mb_phy_ocp_writ.patch
+bpf-bonding-reject-vlan-srcmac-xmit_hash_policy-chan.patch
+octeon_ep-relocate-counter-updates-before-napi.patch
+octeon_ep-avoid-compiler-and-iq-oq-reordering.patch
+wifi-cw1200-fix-locking-in-error-paths.patch
+wifi-wlcore-fix-a-locking-bug.patch
+wifi-mt76-mt7996-fix-possible-oob-access-in-mt7996_m.patch
+wifi-mt76-fix-possible-oob-access-in-mt76_connac2_ma.patch
+indirect_call_wrapper-do-not-reevaluate-function-poi.patch
+net-rds-fix-circular-locking-dependency-in-rds_tcp_t.patch
+xen-acpi-processor-fix-_cst-detection-using-undersiz.patch
+bpf-export-bpf_link_inc_not_zero.patch
+bpf-fix-a-uaf-issue-in-bpf_trampoline_link_cgroup_sh.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch
+smb-client-fix-buffer-size-for-smb311_posix_qinfo-in.patch-5028
+ipv6-fix-null-pointer-deref-in-ip6_rt_get_dev_rcu.patch
+amd-xgbe-fix-sleep-while-atomic-on-suspend-resume.patch
+drm-sched-fix-kernel-doc-warning-for-drm_sched_job_d.patch
+nvme-reject-invalid-pr_read_keys-num_keys-values.patch
+nvme-fix-memory-allocation-in-nvme_pr_read_keys.patch
+net-sched-avoid-qdisc_reset_all_tx_gt-vs-dequeue-rac.patch
+net-nfc-nci-fix-zero-length-proprietary-notification.patch
+nfc-nci-free-skb-on-nci_transceive-early-error-paths.patch
+nfc-nci-clear-nci_data_exchange-before-calling-compl.patch
+nfc-rawsock-cancel-tx_work-before-socket-teardown.patch
+net-stmmac-fix-error-handling-in-vlan-add-and-delete.patch
+net-ethernet-mtk_eth_soc-reset-prog-ptr-to-old_prog-.patch
+net-bridge-fix-nd_tbl-null-dereference-when-ipv6-is-.patch
+net-vxlan-fix-nd_tbl-null-dereference-when-ipv6-is-d.patch
+net-ipv6-fix-panic-when-ipv4-route-references-loopba.patch
+net-sched-act_ife-fix-metalist-update-behavior.patch
+xdp-use-modulo-operation-to-calculate-xdp-frag-tailr.patch
+xsk-introduce-helper-to-determine-rxq-frag_size.patch
+i40e-fix-registering-xdp-rxq-info.patch
+i40e-use-xdp.frame_sz-as-xdp-rxq-info-frag_size.patch
+xdp-produce-a-warning-when-calculated-tailroom-is-ne.patch
+selftest-arm64-fix-sve2p1_sigill-to-hwcap-test.patch
+tracing-add-null-pointer-check-to-trigger_data_free.patch
--- /dev/null
+From cee467e48801f92f97b5b3635433f40c511f8250 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:11 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ smb2_compound_op()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 12c43a062acb0ac137fc2a4a106d4d084b8c5416 ]
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: 6a5f6592a0b6 ("SMB311: Add support for query info using posix extensions (level 100)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2inode.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
+index d6086394d0b84..c576d82799acb 100644
+--- a/fs/smb/client/smb2inode.c
++++ b/fs/smb/client/smb2inode.c
+@@ -315,7 +315,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ cfile->fid.volatile_fid,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ } else {
+@@ -325,7 +325,7 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
+ COMPOUND_FID,
+ SMB_FIND_FILE_POSIX_INFO,
+ SMB2_O_INFO_FILE, 0,
+- sizeof(struct smb311_posix_qinfo *) +
++ sizeof(struct smb311_posix_qinfo) +
+ (PATH_MAX * 2) +
+ (sizeof(struct smb_sid) * 2), 0, NULL);
+ }
+--
+2.51.0
+
--- /dev/null
+From a6cc581501d23a904e26f97743bf00d9148747b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2026 15:13:12 +0000
+Subject: smb/client: fix buffer size for smb311_posix_qinfo in
+ SMB311_posix_query_info()
+
+From: ZhangGuoDong <zhangguodong@kylinos.cn>
+
+[ Upstream commit 9621b996e4db1dbc2b3dc5d5910b7d6179397320 ]
+
+SMB311_posix_query_info() is currently unused, but it may still be used in
+some stable versions, so these changes are submitted as a separate patch.
+
+Use `sizeof(struct smb311_posix_qinfo)` instead of sizeof its pointer,
+so the allocated buffer matches the actual struct size.
+
+Fixes: b1bc1874b885 ("smb311: Add support for SMB311 query info (non-compounded)")
+Reported-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: ZhangGuoDong <zhangguodong@kylinos.cn>
+Reviewed-by: ChenXiaoSong <chenxiaosong@kylinos.cn>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smb2pdu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index d1d332f08883a..094f431e428fa 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -3927,7 +3927,7 @@ int
+ SMB311_posix_query_info(const unsigned int xid, struct cifs_tcon *tcon,
+ u64 persistent_fid, u64 volatile_fid, struct smb311_posix_qinfo *data, u32 *plen)
+ {
+- size_t output_len = sizeof(struct smb311_posix_qinfo *) +
++ size_t output_len = sizeof(struct smb311_posix_qinfo) +
+ (sizeof(struct smb_sid) * 2) + (PATH_MAX * 2);
+ *plen = 0;
+
+--
+2.51.0
+
--- /dev/null
+From 9e9c1ce8b22499de1bb34be62a8c9e1f7f5aa8fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 11:33:39 -0800
+Subject: tracing: Add NULL pointer check to trigger_data_free()
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 457965c13f0837a289c9164b842d0860133f6274 ]
+
+If trigger_data_alloc() fails and returns NULL, event_hist_trigger_parse()
+jumps to the out_free error path. While kfree() safely handles a NULL
+pointer, trigger_data_free() does not. This causes a NULL pointer
+dereference in trigger_data_free() when evaluating
+data->cmd_ops->set_filter.
+
+Fix the problem by adding a NULL pointer check to trigger_data_free().
+
+The problem was found by an experimental code review agent based on
+gemini-3.1-pro while reviewing backports into v6.18.y.
+
+Cc: Miaoqian Lin <linmq006@gmail.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+Cc: Steven Rostedt (Google) <rostedt@goodmis.org>
+Link: https://patch.msgid.link/20260305193339.2810953-1-linux@roeck-us.net
+Fixes: 0550069cc25f ("tracing: Properly process error handling in event_hist_trigger_parse()")
+Assisted-by: Gemini:gemini-3.1-pro
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_events_trigger.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
+index fe079ff82ef1b..3ef1fe15493d3 100644
+--- a/kernel/trace/trace_events_trigger.c
++++ b/kernel/trace/trace_events_trigger.c
+@@ -19,6 +19,9 @@ static DEFINE_MUTEX(trigger_cmd_mutex);
+
+ void trigger_data_free(struct event_trigger_data *data)
+ {
++ if (!data)
++ return;
++
+ if (data->cmd_ops->set_filter)
+ data->cmd_ops->set_filter(NULL, data, NULL);
+
+--
+2.51.0
+
--- /dev/null
+From 7bb79c1c4d80e963f095de0f226ec1702ba9d629 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:24 -0800
+Subject: wifi: cw1200: Fix locking in error paths
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit d98c24617a831e92e7224a07dcaed2dd0b02af96 ]
+
+cw1200_wow_suspend() must only return with priv->conf_mutex locked if it
+returns zero. This mutex must be unlocked if an error is returned. Add
+mutex_unlock() calls to the error paths from which that call is missing.
+This has been detected by the Clang thread-safety analyzer.
+
+Fixes: a910e4a94f69 ("cw1200: add driver for the ST-E CW1100 & CW1200 WLAN chipsets")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-25-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/st/cw1200/pm.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/st/cw1200/pm.c b/drivers/net/wireless/st/cw1200/pm.c
+index a20ab577a3644..212b6f2af8de4 100644
+--- a/drivers/net/wireless/st/cw1200/pm.c
++++ b/drivers/net/wireless/st/cw1200/pm.c
+@@ -264,12 +264,14 @@ int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
+ wiphy_err(priv->hw->wiphy,
+ "PM request failed: %d. WoW is disabled.\n", ret);
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EBUSY;
+ }
+
+ /* Force resume if event is coming from the device. */
+ if (atomic_read(&priv->bh_rx)) {
+ cw1200_wow_resume(hw);
++ mutex_unlock(&priv->conf_mutex);
+ return -EAGAIN;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 3bfa34e19118d265c8ae1fc3f78bacf79c4528ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:16 +0100
+Subject: wifi: mt76: Fix possible oob access in
+ mt76_connac2_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 4e10a730d1b511ff49723371ed6d694dd1b2c785 ]
+
+Check frame length before accessing the mgmt fields in
+mt76_connac2_mac_write_txwi_80211 in order to avoid a possible oob
+access.
+
+Fixes: 577dbc6c656d ("mt76: mt7915: enable offloading of sequence number assignment")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-3-b0f6d1ad4850@kernel.org
+[fix check to also cover mgmt->u.action.u.addba_req.capab,
+correct Fixes tag]
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+index 87479c6c2b505..570c9dcbc505e 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mac.c
+@@ -394,6 +394,7 @@ mt76_connac2_mac_write_txwi_80211(struct mt76_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 + 1 + 2 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ) {
+ u16 capab = le16_to_cpu(mgmt->u.action.u.addba_req.capab);
+--
+2.51.0
+
--- /dev/null
+From 0d21a9ed1c14b42386e15aecc38e8d4a8aa2d234 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Feb 2026 20:11:14 +0100
+Subject: wifi: mt76: mt7996: Fix possible oob access in
+ mt7996_mac_write_txwi_80211()
+
+From: Lorenzo Bianconi <lorenzo@kernel.org>
+
+[ Upstream commit 60862846308627e9e15546bb647a00de44deb27b ]
+
+Check frame length before accessing the mgmt fields in
+mt7996_mac_write_txwi_80211 in order to avoid a possible oob access.
+
+Fixes: 98686cd21624c ("wifi: mt76: mt7996: add driver for MediaTek Wi-Fi 7 (802.11be) devices")
+Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org>
+Link: https://patch.msgid.link/20260226-mt76-addba-req-oob-access-v1-1-b0f6d1ad4850@kernel.org
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mt7996/mac.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+index 8fa16f95e6a7b..3dd503b363ce0 100644
+--- a/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
++++ b/drivers/net/wireless/mediatek/mt76/mt7996/mac.c
+@@ -784,6 +784,7 @@ mt7996_mac_write_txwi_80211(struct mt7996_dev *dev, __le32 *txwi,
+ u32 val;
+
+ if (ieee80211_is_action(fc) &&
++ skb->len >= IEEE80211_MIN_ACTION_SIZE + 1 &&
+ mgmt->u.action.category == WLAN_CATEGORY_BACK &&
+ mgmt->u.action.u.addba_req.action_code == WLAN_ACTION_ADDBA_REQ)
+ tid = MT_TX_ADDBA;
+--
+2.51.0
+
--- /dev/null
+From 90dac0a73aaca365f2bcde1b366b980aa72c4661 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Feb 2026 14:00:25 -0800
+Subject: wifi: wlcore: Fix a locking bug
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit 72c6df8f284b3a49812ce2ac136727ace70acc7c ]
+
+Make sure that wl->mutex is locked before it is unlocked. This has been
+detected by the Clang thread-safety analyzer.
+
+Fixes: 45aa7f071b06 ("wlcore: Use generic runtime pm calls for wowlan elp configuration")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Link: https://patch.msgid.link/20260223220102.2158611-26-bart.vanassche@linux.dev
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 9706240ddd416..d818485d7e6af 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -1800,6 +1800,8 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ wl->wow_enabled);
+ WARN_ON(!wl->wow_enabled);
+
++ mutex_lock(&wl->mutex);
++
+ ret = pm_runtime_force_resume(wl->dev);
+ if (ret < 0) {
+ wl1271_error("ELP wakeup failure!");
+@@ -1816,8 +1818,6 @@ static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
+ run_irq_work = true;
+ spin_unlock_irqrestore(&wl->wl_lock, flags);
+
+- mutex_lock(&wl->mutex);
+-
+ /* test the recovery flag before calling any SDIO functions */
+ pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
+ &wl->flags);
+--
+2.51.0
+
--- /dev/null
+From d25c400e24167f3daf181cc220edd02414c27de2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:50 +0100
+Subject: xdp: produce a warning when calculated tailroom is negative
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 8821e857759be9db3cde337ad328b71fe5c8a55f ]
+
+Many ethernet drivers report xdp Rx queue frag size as being the same as
+DMA write size. However, the only user of this field, namely
+bpf_xdp_frags_increase_tail(), clearly expects a truesize.
+
+Such difference leads to unspecific memory corruption issues under certain
+circumstances, e.g. in ixgbevf maximum DMA write size is 3 KB, so when
+running xskxceiver's XDP_ADJUST_TAIL_GROW_MULTI_BUFF, 6K packet fully uses
+all DMA-writable space in 2 buffers. This would be fine, if only
+rxq->frag_size was properly set to 4K, but value of 3K results in a
+negative tailroom, because there is a non-zero page offset.
+
+We are supposed to return -EINVAL and be done with it in such case, but due
+to tailroom being stored as an unsigned int, it is reported to be somewhere
+near UINT_MAX, resulting in a tail being grown, even if the requested
+offset is too much (it is around 2K in the abovementioned test). This later
+leads to all kinds of unspecific calltraces.
+
+[ 7340.337579] xskxceiver[1440]: segfault at 1da718 ip 00007f4161aeac9d sp 00007f41615a6a00 error 6
+[ 7340.338040] xskxceiver[1441]: segfault at 7f410000000b ip 00000000004042b5 sp 00007f415bffecf0 error 4
+[ 7340.338179] in libc.so.6[61c9d,7f4161aaf000+160000]
+[ 7340.339230] in xskxceiver[42b5,400000+69000]
+[ 7340.340300] likely on CPU 6 (core 0, socket 6)
+[ 7340.340302] Code: ff ff 01 e9 f4 fe ff ff 0f 1f 44 00 00 4c 39 f0 74 73 31 c0 ba 01 00 00 00 f0 0f b1 17 0f 85 ba 00 00 00 49 8b 87 88 00 00 00 <4c> 89 70 08 eb cc 0f 1f 44 00 00 48 8d bd f0 fe ff ff 89 85 ec fe
+[ 7340.340888] likely on CPU 3 (core 0, socket 3)
+[ 7340.345088] Code: 00 00 00 ba 00 00 00 00 be 00 00 00 00 89 c7 e8 31 ca ff ff 89 45 ec 8b 45 ec 85 c0 78 07 b8 00 00 00 00 eb 46 e8 0b c8 ff ff <8b> 00 83 f8 69 74 24 e8 ff c7 ff ff 8b 00 83 f8 0b 74 18 e8 f3 c7
+[ 7340.404334] Oops: general protection fault, probably for non-canonical address 0x6d255010bdffc: 0000 [#1] SMP NOPTI
+[ 7340.405972] CPU: 7 UID: 0 PID: 1439 Comm: xskxceiver Not tainted 6.19.0-rc1+ #21 PREEMPT(lazy)
+[ 7340.408006] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.17.0-5.fc42 04/01/2014
+[ 7340.409716] RIP: 0010:lookup_swap_cgroup_id+0x44/0x80
+[ 7340.410455] Code: 83 f8 1c 73 39 48 ba ff ff ff ff ff ff ff 03 48 8b 04 c5 20 55 fa bd 48 21 d1 48 89 ca 83 e1 01 48 d1 ea c1 e1 04 48 8d 04 90 <8b> 00 48 83 c4 10 d3 e8 c3 cc cc cc cc 31 c0 e9 98 b7 dd 00 48 89
+[ 7340.412787] RSP: 0018:ffffcc5c04f7f6d0 EFLAGS: 00010202
+[ 7340.413494] RAX: 0006d255010bdffc RBX: ffff891f477895a8 RCX: 0000000000000010
+[ 7340.414431] RDX: 0001c17e3fffffff RSI: 00fa070000000000 RDI: 000382fc7fffffff
+[ 7340.415354] RBP: 00fa070000000000 R08: ffffcc5c04f7f8f8 R09: ffffcc5c04f7f7d0
+[ 7340.416283] R10: ffff891f4c1a7000 R11: ffffcc5c04f7f9c8 R12: ffffcc5c04f7f7d0
+[ 7340.417218] R13: 03ffffffffffffff R14: 00fa06fffffffe00 R15: ffff891f47789500
+[ 7340.418229] FS: 0000000000000000(0000) GS:ffff891ffdfaa000(0000) knlGS:0000000000000000
+[ 7340.419489] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 7340.420286] CR2: 00007f415bfffd58 CR3: 0000000103f03002 CR4: 0000000000772ef0
+[ 7340.421237] PKRU: 55555554
+[ 7340.421623] Call Trace:
+[ 7340.421987] <TASK>
+[ 7340.422309] ? softleaf_from_pte+0x77/0xa0
+[ 7340.422855] swap_pte_batch+0xa7/0x290
+[ 7340.423363] zap_nonpresent_ptes.constprop.0.isra.0+0xd1/0x270
+[ 7340.424102] zap_pte_range+0x281/0x580
+[ 7340.424607] zap_pmd_range.isra.0+0xc9/0x240
+[ 7340.425177] unmap_page_range+0x24d/0x420
+[ 7340.425714] unmap_vmas+0xa1/0x180
+[ 7340.426185] exit_mmap+0xe1/0x3b0
+[ 7340.426644] __mmput+0x41/0x150
+[ 7340.427098] exit_mm+0xb1/0x110
+[ 7340.427539] do_exit+0x1b2/0x460
+[ 7340.427992] do_group_exit+0x2d/0xc0
+[ 7340.428477] get_signal+0x79d/0x7e0
+[ 7340.428957] arch_do_signal_or_restart+0x34/0x100
+[ 7340.429571] exit_to_user_mode_loop+0x8e/0x4c0
+[ 7340.430159] do_syscall_64+0x188/0x6b0
+[ 7340.430672] ? __do_sys_clone3+0xd9/0x120
+[ 7340.431212] ? switch_fpu_return+0x4e/0xd0
+[ 7340.431761] ? arch_exit_to_user_mode_prepare.isra.0+0xa1/0xc0
+[ 7340.432498] ? do_syscall_64+0xbb/0x6b0
+[ 7340.433015] ? __handle_mm_fault+0x445/0x690
+[ 7340.433582] ? count_memcg_events+0xd6/0x210
+[ 7340.434151] ? handle_mm_fault+0x212/0x340
+[ 7340.434697] ? do_user_addr_fault+0x2b4/0x7b0
+[ 7340.435271] ? clear_bhb_loop+0x30/0x80
+[ 7340.435788] ? clear_bhb_loop+0x30/0x80
+[ 7340.436299] ? clear_bhb_loop+0x30/0x80
+[ 7340.436812] ? clear_bhb_loop+0x30/0x80
+[ 7340.437323] entry_SYSCALL_64_after_hwframe+0x76/0x7e
+[ 7340.437973] RIP: 0033:0x7f4161b14169
+[ 7340.438468] Code: Unable to access opcode bytes at 0x7f4161b1413f.
+[ 7340.439242] RSP: 002b:00007ffc6ebfa770 EFLAGS: 00000246 ORIG_RAX: 00000000000000ca
+[ 7340.440173] RAX: fffffffffffffe00 RBX: 00000000000005a1 RCX: 00007f4161b14169
+[ 7340.441061] RDX: 00000000000005a1 RSI: 0000000000000109 RDI: 00007f415bfff990
+[ 7340.441943] RBP: 00007ffc6ebfa7a0 R08: 0000000000000000 R09: 00000000ffffffff
+[ 7340.442824] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+[ 7340.443707] R13: 0000000000000000 R14: 00007f415bfff990 R15: 00007f415bfff6c0
+[ 7340.444586] </TASK>
+[ 7340.444922] Modules linked in: rfkill intel_rapl_msr intel_rapl_common intel_uncore_frequency_common skx_edac_common nfit libnvdimm kvm_intel vfat fat kvm snd_pcm irqbypass rapl iTCO_wdt snd_timer intel_pmc_bxt iTCO_vendor_support snd ixgbevf virtio_net soundcore i2c_i801 pcspkr libeth_xdp net_failover i2c_smbus lpc_ich failover libeth virtio_balloon joydev 9p fuse loop zram lz4hc_compress lz4_compress 9pnet_virtio 9pnet netfs ghash_clmulni_intel serio_raw qemu_fw_cfg
+[ 7340.449650] ---[ end trace 0000000000000000 ]---
+
+The issue can be fixed in all in-tree drivers, but we cannot just trust OOT
+drivers to not do this. Therefore, make tailroom a signed int and produce a
+warning when it is negative to prevent such mistakes in the future.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Toke Høiland-Jørgensen <toke@redhat.com>
+Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-10-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index 58109f6201b76..b1e9abb3891cc 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4132,13 +4132,14 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
+ skb_frag_t *frag = &sinfo->frags[sinfo->nr_frags - 1];
+ struct xdp_rxq_info *rxq = xdp->rxq;
+- unsigned int tailroom;
++ int tailroom;
+
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+ tailroom = rxq->frag_size - skb_frag_size(frag) -
+ skb_frag_off(frag) % rxq->frag_size;
++ WARN_ON_ONCE(tailroom < 0);
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From f5cd92d75f9f0e91f08faeb98dff118c4f23a80b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:42 +0100
+Subject: xdp: use modulo operation to calculate XDP frag tailroom
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 88b6b7f7b216108a09887b074395fa7b751880b1 ]
+
+The current formula for calculating XDP tailroom in mbuf packets works only
+if each frag has its own page (if rxq->frag_size is PAGE_SIZE), this
+defeats the purpose of the parameter overall and without any indication
+leads to negative calculated tailroom on at least half of frags, if shared
+pages are used.
+
+There are not many drivers that set rxq->frag_size. Among them:
+* i40e and enetc always split page uniformly between frags, use shared
+ pages
+* ice uses page_pool frags via libeth, those are power-of-2 and uniformly
+ distributed across page
+* idpf has variable frag_size with XDP on, so current API is not applicable
+* mlx5, mtk and mvneta use PAGE_SIZE or 0 as frag_size for page_pool
+
+As for AF_XDP ZC, only ice, i40e and idpf declare frag_size for it. Modulo
+operation yields good results for aligned chunks, they are all power-of-2,
+between 2K and PAGE_SIZE. Formula without modulo fails when chunk_size is
+2K. Buffers in unaligned mode are not distributed uniformly, so modulo
+operation would not work.
+
+To accommodate unaligned buffers, we could define frag_size as
+data + tailroom, and hence do not subtract offset when calculating
+tailroom, but this would necessitate more changes in the drivers.
+
+Define rxq->frag_size as an even portion of a page that fully belongs to a
+single frag. When calculating tailroom, locate the data start within such
+portion by performing a modulo operation on page offset.
+
+Fixes: bf25146a5595 ("bpf: add frags support to the bpf_xdp_adjust_tail() API")
+Acked-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-2-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index e5dc1f699297b..58109f6201b76 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -4137,7 +4137,8 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
+ if (!rxq->frag_size || rxq->frag_size > xdp->frame_sz)
+ return -EOPNOTSUPP;
+
+- tailroom = rxq->frag_size - skb_frag_size(frag) - skb_frag_off(frag);
++ tailroom = rxq->frag_size - skb_frag_size(frag) -
++ skb_frag_off(frag) % rxq->frag_size;
+ if (unlikely(offset > tailroom))
+ return -EINVAL;
+
+--
+2.51.0
+
--- /dev/null
+From 74c81975d620e4bca41a7edb7b96a6b77a54ad7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Feb 2026 09:37:11 +0000
+Subject: xen/acpi-processor: fix _CST detection using undersized evaluation
+ buffer
+
+From: David Thomson <dt@linux-mail.net>
+
+[ Upstream commit 8b57227d59a86fc06d4f09de08f98133680f2cae ]
+
+read_acpi_id() attempts to evaluate _CST using a stack buffer of
+sizeof(union acpi_object) (48 bytes), but _CST returns a nested Package
+of sub-Packages (one per C-state, each containing a register descriptor,
+type, latency, and power) requiring hundreds of bytes. The evaluation
+always fails with AE_BUFFER_OVERFLOW.
+
+On modern systems using FFH/MWAIT entry (where pblk is zero), this
+causes the function to return before setting the acpi_id_cst_present
+bit. In check_acpi_ids(), flags.power is then zero for all Phase 2 CPUs
+(physical CPUs beyond dom0's vCPU count), so push_cxx_to_hypervisor() is
+never called for them.
+
+On a system with dom0_max_vcpus=2 and 8 physical CPUs, only PCPUs 0-1
+receive C-state data. PCPUs 2-7 are stuck in C0/C1 idle, unable to
+enter C2/C3. This costs measurable wall power (4W observed on an Intel
+Core Ultra 7 265K with Xen 4.20).
+
+The function never uses the _CST return value -- it only needs to know
+whether _CST exists. Replace the broken acpi_evaluate_object() call with
+acpi_has_method(), which correctly detects _CST presence using
+acpi_get_handle() without any buffer allocation. This brings C-state
+detection to parity with the P-state path, which already works correctly
+for Phase 2 CPUs.
+
+Fixes: 59a568029181 ("xen/acpi-processor: C and P-state driver that uploads said data to hypervisor.")
+Signed-off-by: David Thomson <dt@linux-mail.net>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Message-ID: <20260224093707.19679-1-dt@linux-mail.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/xen-acpi-processor.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
+index 2967039398463..520756159d3d3 100644
+--- a/drivers/xen/xen-acpi-processor.c
++++ b/drivers/xen/xen-acpi-processor.c
+@@ -379,11 +379,8 @@ read_acpi_id(acpi_handle handle, u32 lvl, void *context, void **rv)
+ acpi_psd[acpi_id].domain);
+ }
+
+- status = acpi_evaluate_object(handle, "_CST", NULL, &buffer);
+- if (ACPI_FAILURE(status)) {
+- if (!pblk)
+- return AE_OK;
+- }
++ if (!pblk && !acpi_has_method(handle, "_CST"))
++ return AE_OK;
+ /* .. and it has a C-state */
+ __set_bit(acpi_id, acpi_id_cst_present);
+
+--
+2.51.0
+
--- /dev/null
+From d646549a2fd865549c3dc62f955fecd00b629ac0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:26 +0000
+Subject: xsk: Fix fragment node deletion to prevent buffer leak
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit 60abb0ac11dccd6b98fd9182bc5f85b621688861 ]
+
+After commit b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node"),
+the list_node field is reused for both the xskb pool list and the buffer
+free list, this causes a buffer leak as described below.
+
+xp_free() checks if a buffer is already on the free list using
+list_empty(&xskb->list_node). When list_del() is used to remove a node
+from the xskb pool list, it doesn't reinitialize the node pointers.
+This means list_empty() will return false even after the node has been
+removed, causing xp_free() to incorrectly skip adding the buffer to the
+free list.
+
+Fix this by using list_del_init() instead of list_del() in all fragment
+handling paths, this ensures the list node is reinitialized after removal,
+allowing the list_empty() to work correctly.
+
+Fixes: b692bf9a7543 ("xsk: Get rid of xdp_buff_xsk::xskb_list_node")
+Acked-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-2-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: f7387d6579d6 ("xsk: Fix zero-copy AF_XDP fragment drop")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 6 +++---
+ net/xdp/xsk.c | 2 +-
+ 2 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 7be51bdd9c63a..91339ffd2f2a8 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -115,7 +115,7 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ goto out;
+
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ xp_free(pos);
+ }
+
+@@ -140,7 +140,7 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+ struct xdp_buff_xsk, list_node);
+ if (frag) {
+- list_del(&frag->list_node);
++ list_del_init(&frag->list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -151,7 +151,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+ {
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->list_node);
++ list_del_init(&xskb->list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 8ccc2f2a99d97..8f3971a94d967 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -179,7 +179,7 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->list_node);
++ list_del_init(&pos->list_node);
+ }
+
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 4d75acbdaeb4751257991487497395cb080d9132 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Feb 2026 00:00:27 +0000
+Subject: xsk: Fix zero-copy AF_XDP fragment drop
+
+From: Nikhil P. Rao <nikhil.rao@amd.com>
+
+[ Upstream commit f7387d6579d65efd490a864254101cb665f2e7a7 ]
+
+AF_XDP should ensure that only a complete packet is sent to application.
+In the zero-copy case, if the Rx queue gets full as fragments are being
+enqueued, the remaining fragments are dropped.
+
+For the multi-buffer case, add a check to ensure that the Rx queue has
+enough space for all fragments of a packet before starting to enqueue
+them.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Signed-off-by: Nikhil P. Rao <nikhil.rao@amd.com>
+Link: https://patch.msgid.link/20260225000456.107806-3-nikhil.rao@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/xdp/xsk.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 8f3971a94d967..9e1ac917f9708 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -160,25 +160,31 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ struct xdp_buff_xsk *pos, *tmp;
+ struct list_head *xskb_list;
+ u32 contd = 0;
++ u32 num_desc;
+ int err;
+
+- if (frags)
+- contd = XDP_PKT_CONTD;
++ if (likely(!frags)) {
++ err = __xsk_rcv_zc(xs, xskb, len, contd);
++ if (err)
++ goto err;
++ return 0;
++ }
+
+- err = __xsk_rcv_zc(xs, xskb, len, contd);
+- if (err)
++ contd = XDP_PKT_CONTD;
++ num_desc = xdp_get_shared_info_from_buff(xdp)->nr_frags + 1;
++ if (xskq_prod_nb_free(xs->rx, num_desc) < num_desc) {
++ xs->rx_queue_full++;
++ err = -ENOBUFS;
+ goto err;
+- if (likely(!frags))
+- return 0;
++ }
+
++ __xsk_rcv_zc(xs, xskb, len, contd);
+ xskb_list = &xskb->pool->xskb_list;
+ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+- err = __xsk_rcv_zc(xs, pos, len, contd);
+- if (err)
+- goto err;
++ __xsk_rcv_zc(xs, pos, len, contd);
+ list_del_init(&pos->list_node);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From ecc368d09f286d96f3d236be732cee375d4e2080 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 14:24:53 +0200
+Subject: xsk: Get rid of xdp_buff_xsk::xskb_list_node
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit b692bf9a7543af7ad11a59d182a3757578f0ba53 ]
+
+Let's bring xdp_buff_xsk back to occupying 2 cachelines by removing
+xskb_list_node - for the purpose of gathering the xskb frags
+free_list_node can be used, head of the list (xsk_buff_pool::xskb_list)
+stays as-is, just reuse the node ptr.
+
+It is safe to do as a single xdp_buff_xsk can never reside in two
+pool's lists simultaneously.
+
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Link: https://lore.kernel.org/bpf/20241007122458.282590-2-maciej.fijalkowski@intel.com
+Stable-dep-of: f7387d6579d6 ("xsk: Fix zero-copy AF_XDP fragment drop")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 14 +++++++-------
+ include/net/xsk_buff_pool.h | 1 -
+ net/xdp/xsk.c | 4 ++--
+ net/xdp/xsk_buff_pool.c | 1 -
+ 4 files changed, 9 insertions(+), 11 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 5425f7ad5ebde..39b43eb2b799d 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -114,8 +114,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
+- list_del(&pos->xskb_list_node);
++ list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
++ list_del(&pos->free_list_node);
+ xp_free(pos);
+ }
+
+@@ -128,7 +128,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+ {
+ struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+- list_add_tail(&frag->xskb_list_node, &frag->pool->xskb_list);
++ list_add_tail(&frag->free_list_node, &frag->pool->xskb_list);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+@@ -138,9 +138,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+- struct xdp_buff_xsk, xskb_list_node);
++ struct xdp_buff_xsk, free_list_node);
+ if (frag) {
+- list_del(&frag->xskb_list_node);
++ list_del(&frag->free_list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -151,7 +151,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+ {
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->xskb_list_node);
++ list_del(&xskb->free_list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+@@ -160,7 +160,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+- xskb_list_node);
++ free_list_node);
+ return &frag->xdp;
+ }
+
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index f0d6ce4bda7a2..d6cba1d4076ea 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -29,7 +29,6 @@ struct xdp_buff_xsk {
+ struct xsk_buff_pool *pool;
+ u64 orig_addr;
+ struct list_head free_list_node;
+- struct list_head xskb_list_node;
+ };
+
+ #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index 569d39f19c569..bb8f52c345868 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -172,14 +172,14 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ return 0;
+
+ xskb_list = &xskb->pool->xskb_list;
+- list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
++ list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->xskb_list_node);
++ list_del(&pos->free_list_node);
+ }
+
+ return 0;
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index 380b0b3f3d8d0..8bc7cdf4719c8 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -101,7 +101,6 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ xskb->pool = pool;
+ xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
+ INIT_LIST_HEAD(&xskb->free_list_node);
+- INIT_LIST_HEAD(&xskb->xskb_list_node);
+ if (pool->unaligned)
+ pool->free_heads[i] = xskb;
+ else
+--
+2.51.0
+
--- /dev/null
+From 5528c421e17c5882437126d5bbeaa519dd93f298 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2026 12:12:43 +0100
+Subject: xsk: introduce helper to determine rxq->frag_size
+
+From: Larysa Zaremba <larysa.zaremba@intel.com>
+
+[ Upstream commit 16394d80539937d348dd3b9ea32415c54e67a81b ]
+
+rxq->frag_size is basically a step between consecutive strictly aligned
+frames. In ZC mode, chunk size fits exactly, but if chunks are unaligned,
+there is no safe way to determine accessible space to grow tailroom.
+
+Report frag_size to be zero, if chunks are unaligned, chunk_size otherwise.
+
+Fixes: 24ea50127ecf ("xsk: support mbuf on ZC RX")
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Signed-off-by: Larysa Zaremba <larysa.zaremba@intel.com>
+Link: https://patch.msgid.link/20260305111253.2317394-3-larysa.zaremba@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 91339ffd2f2a8..7dc08a4646242 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -41,6 +41,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return pool->unaligned ? 0 : xsk_pool_get_chunk_size(pool);
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+@@ -263,6 +268,11 @@ static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
+ return 0;
+ }
+
++static inline u32 xsk_pool_get_rx_frag_step(struct xsk_buff_pool *pool)
++{
++ return 0;
++}
++
+ static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
+ struct xdp_rxq_info *rxq)
+ {
+--
+2.51.0
+
--- /dev/null
+From 802b2be8509222acc69bf614f7a636b1b3191da1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 7 Oct 2024 14:24:54 +0200
+Subject: xsk: s/free_list_node/list_node/
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit 30ec2c1baaead43903ad63ff8e3083949059083c ]
+
+Now that free_list_node's purpose is two-folded, make it just a
+'list_node'.
+
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Magnus Karlsson <magnus.karlsson@intel.com>
+Link: https://lore.kernel.org/bpf/20241007122458.282590-3-maciej.fijalkowski@intel.com
+Stable-dep-of: f7387d6579d6 ("xsk: Fix zero-copy AF_XDP fragment drop")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/xdp_sock_drv.h | 14 +++++++-------
+ include/net/xsk_buff_pool.h | 2 +-
+ net/xdp/xsk.c | 4 ++--
+ net/xdp/xsk_buff_pool.c | 14 +++++++-------
+ 4 files changed, 17 insertions(+), 17 deletions(-)
+
+diff --git a/include/net/xdp_sock_drv.h b/include/net/xdp_sock_drv.h
+index 39b43eb2b799d..7be51bdd9c63a 100644
+--- a/include/net/xdp_sock_drv.h
++++ b/include/net/xdp_sock_drv.h
+@@ -114,8 +114,8 @@ static inline void xsk_buff_free(struct xdp_buff *xdp)
+ if (likely(!xdp_buff_has_frags(xdp)))
+ goto out;
+
+- list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
+- list_del(&pos->free_list_node);
++ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
++ list_del(&pos->list_node);
+ xp_free(pos);
+ }
+
+@@ -128,7 +128,7 @@ static inline void xsk_buff_add_frag(struct xdp_buff *xdp)
+ {
+ struct xdp_buff_xsk *frag = container_of(xdp, struct xdp_buff_xsk, xdp);
+
+- list_add_tail(&frag->free_list_node, &frag->pool->xskb_list);
++ list_add_tail(&frag->list_node, &frag->pool->xskb_list);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+@@ -138,9 +138,9 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_first_entry_or_null(&xskb->pool->xskb_list,
+- struct xdp_buff_xsk, free_list_node);
++ struct xdp_buff_xsk, list_node);
+ if (frag) {
+- list_del(&frag->free_list_node);
++ list_del(&frag->list_node);
+ ret = &frag->xdp;
+ }
+
+@@ -151,7 +151,7 @@ static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+ {
+ struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+- list_del(&xskb->free_list_node);
++ list_del(&xskb->list_node);
+ }
+
+ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+@@ -160,7 +160,7 @@ static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+ struct xdp_buff_xsk *frag;
+
+ frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+- free_list_node);
++ list_node);
+ return &frag->xdp;
+ }
+
+diff --git a/include/net/xsk_buff_pool.h b/include/net/xsk_buff_pool.h
+index d6cba1d4076ea..97392fd7712c6 100644
+--- a/include/net/xsk_buff_pool.h
++++ b/include/net/xsk_buff_pool.h
+@@ -28,7 +28,7 @@ struct xdp_buff_xsk {
+ dma_addr_t frame_dma;
+ struct xsk_buff_pool *pool;
+ u64 orig_addr;
+- struct list_head free_list_node;
++ struct list_head list_node;
+ };
+
+ #define XSK_CHECK_PRIV_TYPE(t) BUILD_BUG_ON(sizeof(t) > offsetofend(struct xdp_buff_xsk, cb))
+diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
+index bb8f52c345868..8ccc2f2a99d97 100644
+--- a/net/xdp/xsk.c
++++ b/net/xdp/xsk.c
+@@ -172,14 +172,14 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
+ return 0;
+
+ xskb_list = &xskb->pool->xskb_list;
+- list_for_each_entry_safe(pos, tmp, xskb_list, free_list_node) {
++ list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
+ if (list_is_singular(xskb_list))
+ contd = 0;
+ len = pos->xdp.data_end - pos->xdp.data;
+ err = __xsk_rcv_zc(xs, pos, len, contd);
+ if (err)
+ goto err;
+- list_del(&pos->free_list_node);
++ list_del(&pos->list_node);
+ }
+
+ return 0;
+diff --git a/net/xdp/xsk_buff_pool.c b/net/xdp/xsk_buff_pool.c
+index 8bc7cdf4719c8..6789d99fd99e0 100644
+--- a/net/xdp/xsk_buff_pool.c
++++ b/net/xdp/xsk_buff_pool.c
+@@ -100,7 +100,7 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
+ xskb = &pool->heads[i];
+ xskb->pool = pool;
+ xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
+- INIT_LIST_HEAD(&xskb->free_list_node);
++ INIT_LIST_HEAD(&xskb->list_node);
+ if (pool->unaligned)
+ pool->free_heads[i] = xskb;
+ else
+@@ -534,8 +534,8 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
+ } else {
+ pool->free_list_cnt--;
+ xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
+- free_list_node);
+- list_del_init(&xskb->free_list_node);
++ list_node);
++ list_del_init(&xskb->list_node);
+ }
+
+ xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
+@@ -603,8 +603,8 @@ static u32 xp_alloc_reused(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u3
+
+ i = nb_entries;
+ while (i--) {
+- xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, free_list_node);
+- list_del_init(&xskb->free_list_node);
++ xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk, list_node);
++ list_del_init(&xskb->list_node);
+
+ *xdp = &xskb->xdp;
+ xdp++;
+@@ -655,11 +655,11 @@ EXPORT_SYMBOL(xp_can_alloc);
+
+ void xp_free(struct xdp_buff_xsk *xskb)
+ {
+- if (!list_empty(&xskb->free_list_node))
++ if (!list_empty(&xskb->list_node))
+ return;
+
+ xskb->pool->free_list_cnt++;
+- list_add(&xskb->free_list_node, &xskb->pool->free_list);
++ list_add(&xskb->list_node, &xskb->pool->free_list);
+ }
+ EXPORT_SYMBOL(xp_free);
+
+--
+2.51.0
+