--- /dev/null
+From 9368ba0587ad09478b494791a4f579e52b382fdf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 08:37:49 +0900
+Subject: ALSA: firewire-motu: drop EPOLLOUT from poll return values as write
+ is not supported
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit aea3493246c474bc917d124d6fb627663ab6bef0 ]
+
+The ALSA HwDep character device of the firewire-motu driver incorrectly
+returns EPOLLOUT in poll(2), even though the driver implements no operation
+for write(2). This misleads userspace applications to believe write() is
+allowed, potentially resulting in unnecessarily wakeups.
+
+This issue dates back to the driver's initial code added by a commit
+71c3797779d3 ("ALSA: firewire-motu: add hwdep interface"), and persisted
+when POLLOUT was updated to EPOLLOUT by a commit a9a08845e9ac ('vfs: do
+bulk POLL* -> EPOLL* replacement("").').
+
+This commit fixes the bug.
+
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://patch.msgid.link/20250829233749.366222-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/motu/motu-hwdep.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 0764a477052a2..5e1254f106bf8 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -73,7 +73,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+--
+2.51.0
+
--- /dev/null
+From 0599b73a17f857476da6ba4b645562a7245dbda9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:07:24 +0000
+Subject: cgroup: split cgroup_destroy_wq into 3 workqueues
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Ridong <chenridong@huawei.com>
+
+[ Upstream commit 79f919a89c9d06816dbdbbd168fa41d27411a7f9 ]
+
+A hung task can occur during [1] LTP cgroup testing when repeatedly
+mounting/unmounting perf_event and net_prio controllers with
+systemd.unified_cgroup_hierarchy=1. The hang manifests in
+cgroup_lock_and_drain_offline() during root destruction.
+
+Related case:
+cgroup_fj_function_perf_event cgroup_fj_function.sh perf_event
+cgroup_fj_function_net_prio cgroup_fj_function.sh net_prio
+
+Call Trace:
+ cgroup_lock_and_drain_offline+0x14c/0x1e8
+ cgroup_destroy_root+0x3c/0x2c0
+ css_free_rwork_fn+0x248/0x338
+ process_one_work+0x16c/0x3b8
+ worker_thread+0x22c/0x3b0
+ kthread+0xec/0x100
+ ret_from_fork+0x10/0x20
+
+Root Cause:
+
+CPU0 CPU1
+mount perf_event umount net_prio
+cgroup1_get_tree cgroup_kill_sb
+rebind_subsystems // root destruction enqueues
+ // cgroup_destroy_wq
+// kill all perf_event css
+ // one perf_event css A is dying
+ // css A offline enqueues cgroup_destroy_wq
+ // root destruction will be executed first
+ css_free_rwork_fn
+ cgroup_destroy_root
+ cgroup_lock_and_drain_offline
+ // some perf descendants are dying
+ // cgroup_destroy_wq max_active = 1
+ // waiting for css A to die
+
+Problem scenario:
+1. CPU0 mounts perf_event (rebind_subsystems)
+2. CPU1 unmounts net_prio (cgroup_kill_sb), queuing root destruction work
+3. A dying perf_event CSS gets queued for offline after root destruction
+4. Root destruction waits for offline completion, but offline work is
+ blocked behind root destruction in cgroup_destroy_wq (max_active=1)
+
+Solution:
+Split cgroup_destroy_wq into three dedicated workqueues:
+cgroup_offline_wq – Handles CSS offline operations
+cgroup_release_wq – Manages resource release
+cgroup_free_wq – Performs final memory deallocation
+
+This separation eliminates blocking in the CSS free path while waiting for
+offline operations to complete.
+
+[1] https://github.com/linux-test-project/ltp/blob/master/runtest/controllers
+Fixes: 334c3679ec4b ("cgroup: reimplement rebind_subsystems() using cgroup_apply_control() and friends")
+Reported-by: Gao Yingjie <gaoyingjie@uniontech.com>
+Signed-off-by: Chen Ridong <chenridong@huawei.com>
+Suggested-by: Teju Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 37d7a99be8f01..d709375d7509d 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -114,8 +114,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5165,7 +5188,7 @@ static void css_release_work_fn(struct work_struct *work)
+ mutex_unlock(&cgroup_mutex);
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5174,7 +5197,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5305,7 +5328,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ list_del_rcu(&css->rstat_css_node);
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5545,7 +5568,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -5922,8 +5945,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+--
+2.51.0
+
--- /dev/null
+From 168da6af34e43b78d94e70081b4f826da9c9c9d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:46:02 +0800
+Subject: cnic: Fix use-after-free bugs in cnic_delete_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit cfa7d9b1e3a8604afc84e9e51d789c29574fb216 ]
+
+The original code uses cancel_delayed_work() in cnic_cm_stop_bnx2x_hw(),
+which does not guarantee that the delayed work item 'delete_task' has
+fully completed if it was already running. Additionally, the delayed work
+item is cyclic, the flush_workqueue() in cnic_cm_stop_bnx2x_hw() only
+blocks and waits for work items that were already queued to the
+workqueue prior to its invocation. Any work items submitted after
+flush_workqueue() is called are not included in the set of tasks that the
+flush operation awaits. This means that after the cyclic work items have
+finished executing, a delayed work item may still exist in the workqueue.
+This leads to use-after-free scenarios where the cnic_dev is deallocated
+by cnic_free_dev(), while delete_task remains active and attempt to
+dereference cnic_dev in cnic_delete_task().
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+cnic_netdev_event() |
+ cnic_stop_hw() | cnic_delete_task()
+ cnic_cm_stop_bnx2x_hw() | ...
+ cancel_delayed_work() | /* the queue_delayed_work()
+ flush_workqueue() | executes after flush_workqueue()*/
+ | queue_delayed_work()
+ cnic_free_dev(dev)//free | cnic_delete_task() //new instance
+ | dev = cp->dev; //use
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the cyclic delayed work item is properly canceled and that any
+ongoing execution of the work item completes before the cnic_dev is
+deallocated. Furthermore, since cancel_delayed_work_sync() uses
+__flush_work(work, true) to synchronously wait for any currently
+executing instance of the work item to finish, the flush_workqueue()
+becomes redundant and should be removed.
+
+This bug was identified through static analysis. To reproduce the issue
+and validate the fix, I simulated the cnic PCI device in QEMU and
+introduced intentional delays — such as inserting calls to ssleep()
+within the cnic_delete_task() function — to increase the likelihood
+of triggering the bug.
+
+Fixes: fdf24086f475 ("cnic: Defer iscsi connection cleanup")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/cnic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index f7f10cfb3476e..582ca97532868 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4223,8 +4223,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+--
+2.51.0
+
--- /dev/null
+From 2ffd5c7cfe16b41a7c8210f15dd8aa621d383d87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 17:16:17 +0200
+Subject: i40e: remove redundant memory barrier when cleaning Tx descs
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e37084a26070c546ae7961ee135bbfb15fbe13fd ]
+
+i40e has a feature which writes to memory location last descriptor
+successfully sent. Memory barrier in i40e_clean_tx_irq() was used to
+avoid forward-reading descriptor fields in case DD bit was not set.
+Having mentioned feature in place implies that such situation will not
+happen as we know in advance how many descriptors HW has dealt with.
+
+Besides, this barrier placement was wrong. Idea is to have this
+protection *after* reading DD bit from HW descriptor, not before.
+Digging through git history showed me that indeed barrier was before DD
+bit check, anyways the commit introducing i40e_get_head() should have
+wiped it out altogether.
+
+Also, there was one commit doing s/read_barrier_depends/smp_rmb when get
+head feature was already in place, but it was only theoretical based on
+ixgbe experiences, which is different in these terms as that driver has
+to read DD bit from HW descriptor.
+
+Fixes: 1943d8ba9507 ("i40e/i40evf: enable hardware feature head write back")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 57667ccc28f54..0678705cb1b4a 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -795,9 +795,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+--
+2.51.0
+
--- /dev/null
+From cb4598d2883884ccb61afdc7f0f137ceb722b07b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 15:30:58 +0000
+Subject: net: liquidio: fix overflow in octeon_init_instr_queue()
+
+From: Alexey Nepomnyashih <sdl@nppct.ru>
+
+[ Upstream commit cca7b1cfd7b8a0eff2a3510c5e0f10efe8fa3758 ]
+
+The expression `(conf->instr_type == 64) << iq_no` can overflow because
+`iq_no` may be as high as 64 (`CN23XX_MAX_RINGS_PER_PF`). Casting the
+operand to `u64` ensures correct 64-bit arithmetic.
+
+Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters")
+Signed-off-by: Alexey Nepomnyashih <sdl@nppct.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index 8e59c2825533a..2a066f193bca1 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -135,7 +135,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+--
+2.51.0
+
--- /dev/null
+From 61b27525b8b46d9b264b2746e58de4f9295cdde9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:01:36 +0900
+Subject: net: natsemi: fix `rx_dropped` double accounting on `netif_rx()`
+ failure
+
+From: Yeounsu Moon <yyyynoom@gmail.com>
+
+[ Upstream commit 93ab4881a4e2b9657bdce4b8940073bfb4ed5eab ]
+
+`netif_rx()` already increments `rx_dropped` core stat when it fails.
+The driver was also updating `ndev->stats.rx_dropped` in the same path.
+Since both are reported together via `ip -s -s` command, this resulted
+in drops being counted twice in user-visible stats.
+
+Keep the driver update on `if (unlikely(!skb))`, but skip it after
+`netif_rx()` errors.
+
+Fixes: caf586e5f23c ("net: add a core netdev->rx_dropped counter")
+Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250913060135.35282-3-yyyynoom@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/ns83820.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 72794d1588711..09dbc975fcee9 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+--
+2.51.0
+
--- /dev/null
+From ceb2bb97a8f6497ba834fb49bfb977293133793a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:50:14 +0200
+Subject: pcmcia: omap_cf: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit d1dfcdd30140c031ae091868fb5bed084132bca1 ]
+
+As described in the added code comment, a reference to .exit.text is ok
+for drivers registered via platform_driver_probe(). Make this explicit
+to prevent the following section mismatch warning
+
+ WARNING: modpost: drivers/pcmcia/omap_cf: section mismatch in reference: omap_cf_driver+0x4 (section: .data) -> omap_cf_remove (section: .exit.text)
+
+that triggers on an omap1_defconfig + CONFIG_OMAP_CF=m build.
+
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/omap_cf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index d3ef5534991e6..a98841db02d5d 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -327,7 +327,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = driver_name,
+ },
+--
+2.51.0
+
--- /dev/null
+From 5f98044dea61740b299a6353f324a81004e9324c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 16:29:16 +1000
+Subject: qed: Don't collect too many protection override GRC elements
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit 56c0a2a9ddc2f5b5078c5fb0f81ab76bbc3d4c37 ]
+
+In the protection override dump path, the firmware can return far too
+many GRC elements, resulting in attempting to write past the end of the
+previously-kmalloc'ed dump buffer.
+
+This will result in a kernel panic with reason:
+
+ BUG: unable to handle kernel paging request at ADDRESS
+
+where "ADDRESS" is just past the end of the protection override dump
+buffer. The start address of the buffer is:
+ p_hwfn->cdev->dbg_features[DBG_FEATURE_PROTECTION_OVERRIDE].dump_buf
+and the size of the buffer is buf_size in the same data structure.
+
+The panic can be arrived at from either the qede Ethernet driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc02662ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc0267792 [qed]
+ qed_dbg_feature at ffffffffc026aa8f [qed]
+ qed_dbg_all_data at ffffffffc026b211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc027298a [qed]
+ devlink_health_do_dump at ffffffff82497f61
+ devlink_health_report at ffffffff8249cf29
+ qed_report_fatal_error at ffffffffc0272baf [qed]
+ qede_sp_task at ffffffffc045ed32 [qede]
+ process_one_work at ffffffff81d19783
+
+or the qedf storage driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc068b2ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc068c792 [qed]
+ qed_dbg_feature at ffffffffc068fa8f [qed]
+ qed_dbg_all_data at ffffffffc0690211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc069798a [qed]
+ devlink_health_do_dump at ffffffff8aa95e51
+ devlink_health_report at ffffffff8aa9ae19
+ qed_report_fatal_error at ffffffffc0697baf [qed]
+ qed_hw_err_notify at ffffffffc06d32d7 [qed]
+ qed_spq_post at ffffffffc06b1011 [qed]
+ qed_fcoe_destroy_conn at ffffffffc06b2e91 [qed]
+ qedf_cleanup_fcport at ffffffffc05e7597 [qedf]
+ qedf_rport_event_handler at ffffffffc05e7bf7 [qedf]
+ fc_rport_work at ffffffffc02da715 [libfc]
+ process_one_work at ffffffff8a319663
+
+Resolve this by clamping the firmware's return value to the maximum
+number of legal elements the firmware should return.
+
+Fixes: d52c89f120de8 ("qed*: Utilize FW 8.37.2.0")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Link: https://patch.msgid.link/f8e1182934aa274c18d0682a12dbaf347595469c.1757485536.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_debug.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 4b4077cf2d266..b4e108d3ec945 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4374,10 +4374,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ goto out;
+ }
+
+- /* Add override window info to buffer */
++ /* Add override window info to buffer, preventing buffer overflow */
+ override_window_dwords =
+- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++ PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+--
+2.51.0
+
--- /dev/null
+From 4cf0dc0a6e33bde933fa0268d9766424a360ce1a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 16:48:54 +0300
+Subject: Revert "net/mlx5e: Update and set Xon/Xoff upon port speed set"
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit 3fbfe251cc9f6d391944282cdb9bcf0bd02e01f8 ]
+
+This reverts commit d24341740fe48add8a227a753e68b6eedf4b385a.
+It causes errors when trying to configure QoS, as well as
+loss of L2 connectivity (on multi-host devices).
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20250910170011.70528106@kernel.org
+Fixes: d24341740fe4 ("net/mlx5e: Update and set Xon/Xoff upon port speed set")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index cc93c503984a1..cef60bc2589cc 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -141,8 +141,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+--
+2.51.0
+
soc-qcom-mdt_loader-deal-with-zero-e_shentsize.patch
drm-i915-power-fix-size-for-for_each_set_bit-in-abox-iteration.patch
mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch
+alsa-firewire-motu-drop-epollout-from-poll-return-va.patch
+wifi-mac80211-fix-incorrect-type-for-ret.patch
+pcmcia-omap_cf-mark-driver-struct-with-__refdata-to-.patch
+cgroup-split-cgroup_destroy_wq-into-3-workqueues.patch
+um-virtio_uml-fix-use-after-free-after-put_device-in.patch
+qed-don-t-collect-too-many-protection-override-grc-e.patch
+net-natsemi-fix-rx_dropped-double-accounting-on-neti.patch
+i40e-remove-redundant-memory-barrier-when-cleaning-t.patch
+tcp-clear-tcp_sk-sk-fastopen_rsk-in-tcp_disconnect.patch
+revert-net-mlx5e-update-and-set-xon-xoff-upon-port-s.patch
+net-liquidio-fix-overflow-in-octeon_init_instr_queue.patch
+cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
--- /dev/null
+From afb1ed8853b34fd26fba541d20dfb88cce22f654 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 17:56:46 +0000
+Subject: tcp: Clear tcp_sk(sk)->fastopen_rsk in tcp_disconnect().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 45c8a6cc2bcd780e634a6ba8e46bffbdf1fc5c01 ]
+
+syzbot reported the splat below where a socket had tcp_sk(sk)->fastopen_rsk
+in the TCP_ESTABLISHED state. [0]
+
+syzbot reused the server-side TCP Fast Open socket as a new client before
+the TFO socket completes 3WHS:
+
+ 1. accept()
+ 2. connect(AF_UNSPEC)
+ 3. connect() to another destination
+
+As of accept(), sk->sk_state is TCP_SYN_RECV, and tcp_disconnect() changes
+it to TCP_CLOSE and makes connect() possible, which restarts timers.
+
+Since tcp_disconnect() forgot to clear tcp_sk(sk)->fastopen_rsk, the
+retransmit timer triggered the warning and the intended packet was not
+retransmitted.
+
+Let's call reqsk_fastopen_remove() in tcp_disconnect().
+
+[0]:
+WARNING: CPU: 2 PID: 0 at net/ipv4/tcp_timer.c:542 tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Modules linked in:
+CPU: 2 UID: 0 PID: 0 Comm: swapper/2 Not tainted 6.17.0-rc5-g201825fb4278 #62 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Code: 41 55 41 54 55 53 48 8b af b8 08 00 00 48 89 fb 48 85 ed 0f 84 55 01 00 00 0f b6 47 12 3c 03 74 0c 0f b6 47 12 3c 04 74 04 90 <0f> 0b 90 48 8b 85 c0 00 00 00 48 89 ef 48 8b 40 30 e8 6a 4f 06 3e
+RSP: 0018:ffffc900002f8d40 EFLAGS: 00010293
+RAX: 0000000000000002 RBX: ffff888106911400 RCX: 0000000000000017
+RDX: 0000000002517619 RSI: ffffffff83764080 RDI: ffff888106911400
+RBP: ffff888106d5c000 R08: 0000000000000001 R09: ffffc900002f8de8
+R10: 00000000000000c2 R11: ffffc900002f8ff8 R12: ffff888106911540
+R13: ffff888106911480 R14: ffff888106911840 R15: ffffc900002f8de0
+FS: 0000000000000000(0000) GS:ffff88907b768000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8044d69d90 CR3: 0000000002c30003 CR4: 0000000000370ef0
+Call Trace:
+ <IRQ>
+ tcp_write_timer (net/ipv4/tcp_timer.c:738)
+ call_timer_fn (kernel/time/timer.c:1747)
+ __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
+ timer_expire_remote (kernel/time/timer.c:2385 kernel/time/timer.c:2376 kernel/time/timer.c:2135)
+ tmigr_handle_remote_up (kernel/time/timer_migration.c:944 kernel/time/timer_migration.c:1035)
+ __walk_groups.isra.0 (kernel/time/timer_migration.c:533 (discriminator 1))
+ tmigr_handle_remote (kernel/time/timer_migration.c:1096)
+ handle_softirqs (./arch/x86/include/asm/jump_label.h:36 ./include/trace/events/irq.h:142 kernel/softirq.c:580)
+ irq_exit_rcu (kernel/softirq.c:614 kernel/softirq.c:453 kernel/softirq.c:680 kernel/softirq.c:696)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 (discriminator 35) arch/x86/kernel/apic/apic.c:1050 (discriminator 35))
+ </IRQ>
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250915175800.118793-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 2d870d5e31cfb..afc31f1def760 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2770,6 +2770,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ /* Deny disconnect if other threads are blocked in sk_wait_event()
+@@ -2890,6 +2891,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet->defer_connect = 0;
+ tp->fastopen_client_fail = 0;
+--
+2.51.0
+
--- /dev/null
+From 5c6277b6493217eac1f5e3331ac6d360eee37d08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 15:00:51 +0800
+Subject: um: virtio_uml: Fix use-after-free after put_device in probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0 ]
+
+When register_virtio_device() fails in virtio_uml_probe(),
+the code sets vu_dev->registered = 1 even though
+the device was not successfully registered.
+This can lead to use-after-free or other issues.
+
+Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/virtio_uml.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index d5d768188b3ba..0178d33e59469 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1129,10 +1129,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ platform_set_drvdata(pdev, vu_dev);
+
+ rc = register_virtio_device(&vu_dev->vdev);
+- if (rc)
++ if (rc) {
+ put_device(&vu_dev->vdev.dev);
++ return rc;
++ }
+ vu_dev->registered = 1;
+- return rc;
++ return 0;
+
+ error_init:
+ os_close_file(vu_dev->sock);
+--
+2.51.0
+
--- /dev/null
+From 0852a59c7d14ef6ebba648ed6ba31d43bb9ed96c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 10:29:11 +0800
+Subject: wifi: mac80211: fix incorrect type for ret
+
+From: Liao Yuanhong <liaoyuanhong@vivo.com>
+
+[ Upstream commit a33b375ab5b3a9897a0ab76be8258d9f6b748628 ]
+
+The variable ret is declared as a u32 type, but it is assigned a value
+of -EOPNOTSUPP. Since unsigned types cannot correctly represent negative
+values, the type of ret should be changed to int.
+
+Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
+Link: https://patch.msgid.link/20250825022911.139377-1-liaoyuanhong@vivo.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/driver-ops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index a172f69c71123..d860c905fa733 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1236,7 +1236,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+--
+2.51.0
+
--- /dev/null
+From 2142dd1a0373bd180addffb84164c91c8fa57188 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 08:37:49 +0900
+Subject: ALSA: firewire-motu: drop EPOLLOUT from poll return values as write
+ is not supported
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit aea3493246c474bc917d124d6fb627663ab6bef0 ]
+
+The ALSA HwDep character device of the firewire-motu driver incorrectly
+returns EPOLLOUT in poll(2), even though the driver implements no operation
+for write(2). This misleads userspace applications to believe write() is
+allowed, potentially resulting in unnecessarily wakeups.
+
+This issue dates back to the driver's initial code added by a commit
+71c3797779d3 ("ALSA: firewire-motu: add hwdep interface"), and persisted
+when POLLOUT was updated to EPOLLOUT by a commit a9a08845e9ac ('vfs: do
+bulk POLL* -> EPOLL* replacement("").').
+
+This commit fixes the bug.
+
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://patch.msgid.link/20250829233749.366222-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/motu/motu-hwdep.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index b5ced5d27758b..364fecf8d2867 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -73,7 +73,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+--
+2.51.0
+
--- /dev/null
+From bbecf6e15fb8ebb0216867d5092df2f450a5b447 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:07:24 +0000
+Subject: cgroup: split cgroup_destroy_wq into 3 workqueues
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Ridong <chenridong@huawei.com>
+
+[ Upstream commit 79f919a89c9d06816dbdbbd168fa41d27411a7f9 ]
+
+A hung task can occur during [1] LTP cgroup testing when repeatedly
+mounting/unmounting perf_event and net_prio controllers with
+systemd.unified_cgroup_hierarchy=1. The hang manifests in
+cgroup_lock_and_drain_offline() during root destruction.
+
+Related case:
+cgroup_fj_function_perf_event cgroup_fj_function.sh perf_event
+cgroup_fj_function_net_prio cgroup_fj_function.sh net_prio
+
+Call Trace:
+ cgroup_lock_and_drain_offline+0x14c/0x1e8
+ cgroup_destroy_root+0x3c/0x2c0
+ css_free_rwork_fn+0x248/0x338
+ process_one_work+0x16c/0x3b8
+ worker_thread+0x22c/0x3b0
+ kthread+0xec/0x100
+ ret_from_fork+0x10/0x20
+
+Root Cause:
+
+CPU0 CPU1
+mount perf_event umount net_prio
+cgroup1_get_tree cgroup_kill_sb
+rebind_subsystems // root destruction enqueues
+ // cgroup_destroy_wq
+// kill all perf_event css
+ // one perf_event css A is dying
+ // css A offline enqueues cgroup_destroy_wq
+ // root destruction will be executed first
+ css_free_rwork_fn
+ cgroup_destroy_root
+ cgroup_lock_and_drain_offline
+ // some perf descendants are dying
+ // cgroup_destroy_wq max_active = 1
+ // waiting for css A to die
+
+Problem scenario:
+1. CPU0 mounts perf_event (rebind_subsystems)
+2. CPU1 unmounts net_prio (cgroup_kill_sb), queuing root destruction work
+3. A dying perf_event CSS gets queued for offline after root destruction
+4. Root destruction waits for offline completion, but offline work is
+ blocked behind root destruction in cgroup_destroy_wq (max_active=1)
+
+Solution:
+Split cgroup_destroy_wq into three dedicated workqueues:
+cgroup_offline_wq – Handles CSS offline operations
+cgroup_release_wq – Manages resource release
+cgroup_free_wq – Performs final memory deallocation
+
+This separation eliminates blocking in the CSS free path while waiting for
+offline operations to complete.
+
+[1] https://github.com/linux-test-project/ltp/blob/master/runtest/controllers
+Fixes: 334c3679ec4b ("cgroup: reimplement rebind_subsystems() using cgroup_apply_control() and friends")
+Reported-by: Gao Yingjie <gaoyingjie@uniontech.com>
+Signed-off-by: Chen Ridong <chenridong@huawei.com>
+Suggested-by: Teju Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 1a3b2e1436db0..e5fe4ffff7cd1 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -122,8 +122,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5263,7 +5286,7 @@ static void css_release_work_fn(struct work_struct *work)
+ mutex_unlock(&cgroup_mutex);
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5272,7 +5295,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5394,7 +5417,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ list_del_rcu(&css->rstat_css_node);
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5631,7 +5654,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -6008,8 +6031,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+--
+2.51.0
+
--- /dev/null
+From 82e77876bbbd99a5e6ca944cfcdb5f7051603fd2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:46:02 +0800
+Subject: cnic: Fix use-after-free bugs in cnic_delete_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit cfa7d9b1e3a8604afc84e9e51d789c29574fb216 ]
+
+The original code uses cancel_delayed_work() in cnic_cm_stop_bnx2x_hw(),
+which does not guarantee that the delayed work item 'delete_task' has
+fully completed if it was already running. Additionally, the delayed work
+item is cyclic, the flush_workqueue() in cnic_cm_stop_bnx2x_hw() only
+blocks and waits for work items that were already queued to the
+workqueue prior to its invocation. Any work items submitted after
+flush_workqueue() is called are not included in the set of tasks that the
+flush operation awaits. This means that after the cyclic work items have
+finished executing, a delayed work item may still exist in the workqueue.
+This leads to use-after-free scenarios where the cnic_dev is deallocated
+by cnic_free_dev(), while delete_task remains active and attempt to
+dereference cnic_dev in cnic_delete_task().
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+cnic_netdev_event() |
+ cnic_stop_hw() | cnic_delete_task()
+ cnic_cm_stop_bnx2x_hw() | ...
+ cancel_delayed_work() | /* the queue_delayed_work()
+ flush_workqueue() | executes after flush_workqueue()*/
+ | queue_delayed_work()
+ cnic_free_dev(dev)//free | cnic_delete_task() //new instance
+ | dev = cp->dev; //use
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the cyclic delayed work item is properly canceled and that any
+ongoing execution of the work item completes before the cnic_dev is
+deallocated. Furthermore, since cancel_delayed_work_sync() uses
+__flush_work(work, true) to synchronously wait for any currently
+executing instance of the work item to finish, the flush_workqueue()
+becomes redundant and should be removed.
+
+This bug was identified through static analysis. To reproduce the issue
+and validate the fix, I simulated the cnic PCI device in QEMU and
+introduced intentional delays — such as inserting calls to ssleep()
+within the cnic_delete_task() function — to increase the likelihood
+of triggering the bug.
+
+Fixes: fdf24086f475 ("cnic: Defer iscsi connection cleanup")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/cnic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index f7f10cfb3476e..582ca97532868 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4223,8 +4223,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+--
+2.51.0
+
--- /dev/null
+From 12d9181b3b4ceecb9e1e7dd5a2b9dd7c2f40753f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 17:48:25 +0300
+Subject: dpaa2-switch: fix buffer pool seeding for control traffic
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 2690cb089502b80b905f2abdafd1bf2d54e1abef ]
+
+Starting with commit c50e7475961c ("dpaa2-switch: Fix error checking in
+dpaa2_switch_seed_bp()"), the probing of a second DPSW object errors out
+like below.
+
+fsl_dpaa2_switch dpsw.1: fsl_mc_driver_probe failed: -12
+fsl_dpaa2_switch dpsw.1: probe with driver fsl_dpaa2_switch failed with error -12
+
+The aforementioned commit brought to the surface the fact that seeding
+buffers into the buffer pool destined for control traffic is not
+successful and an access violation recoverable error can be seen in the
+MC firmware log:
+
+[E, qbman_rec_isr:391, QBMAN] QBMAN recoverable event 0x1000000
+
+This happens because the driver incorrectly used the ID of the DPBP
+object instead of the hardware buffer pool ID when trying to release
+buffers into it.
+
+This is because any DPSW object uses two buffer pools, one managed by
+the Linux driver and destined for control traffic packet buffers and the
+other one managed by the MC firmware and destined only for offloaded
+traffic. And since the buffer pool managed by the MC firmware does not
+have an external facing DPBP equivalent, any subsequent DPBP objects
+created after the first DPSW will have a DPBP id different to the
+underlying hardware buffer ID.
+
+The issue was not caught earlier because these two numbers can be
+identical when all DPBP objects are created before the DPSW objects are.
+This is the case when the DPL file is used to describe the entire DPAA2
+object layout and objects are created at boot time and it's also true
+for the first DPSW being created dynamically using ls-addsw.
+
+Fix this by using the buffer pool ID instead of the DPBP id when
+releasing buffers into the pool.
+
+Fixes: 2877e4f7e189 ("staging: dpaa2-switch: setup buffer pool and RX path rings")
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20250910144825.2416019-1-ioana.ciornei@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 6448e06dcf826..1e6b29c047710 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2682,7 +2682,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+- ethsw->bpid = dpbp_attrs.id;
++ ethsw->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+--
+2.51.0
+
--- /dev/null
+From e77b4236991903ded15887c1929a184aea273f79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 17:16:17 +0200
+Subject: i40e: remove redundant memory barrier when cleaning Tx descs
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e37084a26070c546ae7961ee135bbfb15fbe13fd ]
+
+i40e has a feature which writes to memory location last descriptor
+successfully sent. Memory barrier in i40e_clean_tx_irq() was used to
+avoid forward-reading descriptor fields in case DD bit was not set.
+Having mentioned feature in place implies that such situation will not
+happen as we know in advance how many descriptors HW has dealt with.
+
+Besides, this barrier placement was wrong. Idea is to have this
+protection *after* reading DD bit from HW descriptor, not before.
+Digging through git history showed me that indeed barrier was before DD
+bit check, anyways the commit introducing i40e_get_head() should have
+wiped it out altogether.
+
+Also, there was one commit doing s/read_barrier_depends/smp_rmb when get
+head feature was already in place, but it was only theoretical based on
+ixgbe experiences, which is different in these terms as that driver has
+to read DD bit from HW descriptor.
+
+Fixes: 1943d8ba9507 ("i40e/i40evf: enable hardware feature head write back")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index e2737875e3795..b94d67729283c 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -949,9 +949,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+--
+2.51.0
+
--- /dev/null
+From 53b69dca4c3ac8293763b0e9fd7cd6e5c4309861 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 15:30:58 +0000
+Subject: net: liquidio: fix overflow in octeon_init_instr_queue()
+
+From: Alexey Nepomnyashih <sdl@nppct.ru>
+
+[ Upstream commit cca7b1cfd7b8a0eff2a3510c5e0f10efe8fa3758 ]
+
+The expression `(conf->instr_type == 64) << iq_no` can overflow because
+`iq_no` may be as high as 64 (`CN23XX_MAX_RINGS_PER_PF`). Casting the
+operand to `u64` ensures correct 64-bit arithmetic.
+
+Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters")
+Signed-off-by: Alexey Nepomnyashih <sdl@nppct.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index 8e59c2825533a..2a066f193bca1 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -135,7 +135,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+--
+2.51.0
+
--- /dev/null
+From 516bb16c57a764750f95a646c1d56e7824fa1e69 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:01:36 +0900
+Subject: net: natsemi: fix `rx_dropped` double accounting on `netif_rx()`
+ failure
+
+From: Yeounsu Moon <yyyynoom@gmail.com>
+
+[ Upstream commit 93ab4881a4e2b9657bdce4b8940073bfb4ed5eab ]
+
+`netif_rx()` already increments `rx_dropped` core stat when it fails.
+The driver was also updating `ndev->stats.rx_dropped` in the same path.
+Since both are reported together via `ip -s -s` command, this resulted
+in drops being counted twice in user-visible stats.
+
+Keep the driver update on `if (unlikely(!skb))`, but skip it after
+`netif_rx()` errors.
+
+Fixes: caf586e5f23c ("net: add a core netdev->rx_dropped counter")
+Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250913060135.35282-3-yyyynoom@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/ns83820.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 72794d1588711..09dbc975fcee9 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+--
+2.51.0
+
--- /dev/null
+From cd26382f70c229d03d12ea58f3b51e42ced4282f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:50:14 +0200
+Subject: pcmcia: omap_cf: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit d1dfcdd30140c031ae091868fb5bed084132bca1 ]
+
+As described in the added code comment, a reference to .exit.text is ok
+for drivers registered via platform_driver_probe(). Make this explicit
+to prevent the following section mismatch warning
+
+ WARNING: modpost: drivers/pcmcia/omap_cf: section mismatch in reference: omap_cf_driver+0x4 (section: .data) -> omap_cf_remove (section: .exit.text)
+
+that triggers on an omap1_defconfig + CONFIG_OMAP_CF=m build.
+
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/omap_cf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index f0b2c2d034695..ca88c75f04277 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -331,7 +331,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = driver_name,
+ },
+--
+2.51.0
+
--- /dev/null
+From 75fa28627689c3e97808b477a99b953010bbfe34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 16:29:16 +1000
+Subject: qed: Don't collect too many protection override GRC elements
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit 56c0a2a9ddc2f5b5078c5fb0f81ab76bbc3d4c37 ]
+
+In the protection override dump path, the firmware can return far too
+many GRC elements, resulting in attempting to write past the end of the
+previously-kmalloc'ed dump buffer.
+
+This will result in a kernel panic with reason:
+
+ BUG: unable to handle kernel paging request at ADDRESS
+
+where "ADDRESS" is just past the end of the protection override dump
+buffer. The start address of the buffer is:
+ p_hwfn->cdev->dbg_features[DBG_FEATURE_PROTECTION_OVERRIDE].dump_buf
+and the size of the buffer is buf_size in the same data structure.
+
+The panic can be arrived at from either the qede Ethernet driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc02662ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc0267792 [qed]
+ qed_dbg_feature at ffffffffc026aa8f [qed]
+ qed_dbg_all_data at ffffffffc026b211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc027298a [qed]
+ devlink_health_do_dump at ffffffff82497f61
+ devlink_health_report at ffffffff8249cf29
+ qed_report_fatal_error at ffffffffc0272baf [qed]
+ qede_sp_task at ffffffffc045ed32 [qede]
+ process_one_work at ffffffff81d19783
+
+or the qedf storage driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc068b2ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc068c792 [qed]
+ qed_dbg_feature at ffffffffc068fa8f [qed]
+ qed_dbg_all_data at ffffffffc0690211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc069798a [qed]
+ devlink_health_do_dump at ffffffff8aa95e51
+ devlink_health_report at ffffffff8aa9ae19
+ qed_report_fatal_error at ffffffffc0697baf [qed]
+ qed_hw_err_notify at ffffffffc06d32d7 [qed]
+ qed_spq_post at ffffffffc06b1011 [qed]
+ qed_fcoe_destroy_conn at ffffffffc06b2e91 [qed]
+ qedf_cleanup_fcport at ffffffffc05e7597 [qedf]
+ qedf_rport_event_handler at ffffffffc05e7bf7 [qedf]
+ fc_rport_work at ffffffffc02da715 [libfc]
+ process_one_work at ffffffff8a319663
+
+Resolve this by clamping the firmware's return value to the maximum
+number of legal elements the firmware should return.
+
+Fixes: d52c89f120de8 ("qed*: Utilize FW 8.37.2.0")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Link: https://patch.msgid.link/f8e1182934aa274c18d0682a12dbaf347595469c.1757485536.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_debug.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 4b4077cf2d266..b4e108d3ec945 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4374,10 +4374,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ goto out;
+ }
+
+- /* Add override window info to buffer */
++ /* Add override window info to buffer, preventing buffer overflow */
+ override_window_dwords =
+- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++ PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+--
+2.51.0
+
--- /dev/null
+From 95f6cf476446621fcdd96898cb8786efb1814791 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 16:48:54 +0300
+Subject: Revert "net/mlx5e: Update and set Xon/Xoff upon port speed set"
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit 3fbfe251cc9f6d391944282cdb9bcf0bd02e01f8 ]
+
+This reverts commit d24341740fe48add8a227a753e68b6eedf4b385a.
+It causes errors when trying to configure QoS, as well as
+loss of L2 connectivity (on multi-host devices).
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20250910170011.70528106@kernel.org
+Fixes: d24341740fe4 ("net/mlx5e: Update and set Xon/Xoff upon port speed set")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index cfbc569edfb5f..bb7e3c80ad74e 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -104,8 +104,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (up) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+--
+2.51.0
+
drm-i915-power-fix-size-for-for_each_set_bit-in-abox-iteration.patch
mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch
net-hsr-hsr_slave-fix-the-promiscuous-mode-in-offload-mode.patch
+alsa-firewire-motu-drop-epollout-from-poll-return-va.patch
+wifi-mac80211-fix-incorrect-type-for-ret.patch
+pcmcia-omap_cf-mark-driver-struct-with-__refdata-to-.patch
+cgroup-split-cgroup_destroy_wq-into-3-workqueues.patch
+um-virtio_uml-fix-use-after-free-after-put_device-in.patch
+dpaa2-switch-fix-buffer-pool-seeding-for-control-tra.patch
+qed-don-t-collect-too-many-protection-override-grc-e.patch
+net-natsemi-fix-rx_dropped-double-accounting-on-neti.patch
+i40e-remove-redundant-memory-barrier-when-cleaning-t.patch
+tcp-clear-tcp_sk-sk-fastopen_rsk-in-tcp_disconnect.patch
+revert-net-mlx5e-update-and-set-xon-xoff-upon-port-s.patch
+net-liquidio-fix-overflow-in-octeon_init_instr_queue.patch
+cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
--- /dev/null
+From f4ade75127f1a36ad3273be46375b9c223822287 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 17:56:46 +0000
+Subject: tcp: Clear tcp_sk(sk)->fastopen_rsk in tcp_disconnect().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 45c8a6cc2bcd780e634a6ba8e46bffbdf1fc5c01 ]
+
+syzbot reported the splat below where a socket had tcp_sk(sk)->fastopen_rsk
+in the TCP_ESTABLISHED state. [0]
+
+syzbot reused the server-side TCP Fast Open socket as a new client before
+the TFO socket completes 3WHS:
+
+ 1. accept()
+ 2. connect(AF_UNSPEC)
+ 3. connect() to another destination
+
+As of accept(), sk->sk_state is TCP_SYN_RECV, and tcp_disconnect() changes
+it to TCP_CLOSE and makes connect() possible, which restarts timers.
+
+Since tcp_disconnect() forgot to clear tcp_sk(sk)->fastopen_rsk, the
+retransmit timer triggered the warning and the intended packet was not
+retransmitted.
+
+Let's call reqsk_fastopen_remove() in tcp_disconnect().
+
+[0]:
+WARNING: CPU: 2 PID: 0 at net/ipv4/tcp_timer.c:542 tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Modules linked in:
+CPU: 2 UID: 0 PID: 0 Comm: swapper/2 Not tainted 6.17.0-rc5-g201825fb4278 #62 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Code: 41 55 41 54 55 53 48 8b af b8 08 00 00 48 89 fb 48 85 ed 0f 84 55 01 00 00 0f b6 47 12 3c 03 74 0c 0f b6 47 12 3c 04 74 04 90 <0f> 0b 90 48 8b 85 c0 00 00 00 48 89 ef 48 8b 40 30 e8 6a 4f 06 3e
+RSP: 0018:ffffc900002f8d40 EFLAGS: 00010293
+RAX: 0000000000000002 RBX: ffff888106911400 RCX: 0000000000000017
+RDX: 0000000002517619 RSI: ffffffff83764080 RDI: ffff888106911400
+RBP: ffff888106d5c000 R08: 0000000000000001 R09: ffffc900002f8de8
+R10: 00000000000000c2 R11: ffffc900002f8ff8 R12: ffff888106911540
+R13: ffff888106911480 R14: ffff888106911840 R15: ffffc900002f8de0
+FS: 0000000000000000(0000) GS:ffff88907b768000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8044d69d90 CR3: 0000000002c30003 CR4: 0000000000370ef0
+Call Trace:
+ <IRQ>
+ tcp_write_timer (net/ipv4/tcp_timer.c:738)
+ call_timer_fn (kernel/time/timer.c:1747)
+ __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
+ timer_expire_remote (kernel/time/timer.c:2385 kernel/time/timer.c:2376 kernel/time/timer.c:2135)
+ tmigr_handle_remote_up (kernel/time/timer_migration.c:944 kernel/time/timer_migration.c:1035)
+ __walk_groups.isra.0 (kernel/time/timer_migration.c:533 (discriminator 1))
+ tmigr_handle_remote (kernel/time/timer_migration.c:1096)
+ handle_softirqs (./arch/x86/include/asm/jump_label.h:36 ./include/trace/events/irq.h:142 kernel/softirq.c:580)
+ irq_exit_rcu (kernel/softirq.c:614 kernel/softirq.c:453 kernel/softirq.c:680 kernel/softirq.c:696)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 (discriminator 35) arch/x86/kernel/apic/apic.c:1050 (discriminator 35))
+ </IRQ>
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250915175800.118793-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index c1e624ca6a250..9508e2c90b840 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2981,6 +2981,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ /* Deny disconnect if other threads are blocked in sk_wait_event()
+@@ -3101,6 +3102,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet->defer_connect = 0;
+ tp->fastopen_client_fail = 0;
+--
+2.51.0
+
--- /dev/null
+From a132f13367ee9eb394b0cbf9f3443e00d019e483 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 15:00:51 +0800
+Subject: um: virtio_uml: Fix use-after-free after put_device in probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0 ]
+
+When register_virtio_device() fails in virtio_uml_probe(),
+the code sets vu_dev->registered = 1 even though
+the device was not successfully registered.
+This can lead to use-after-free or other issues.
+
+Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/virtio_uml.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index 204e9dfbff1a0..8edc218ce21fd 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1225,10 +1225,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+
+ rc = register_virtio_device(&vu_dev->vdev);
+- if (rc)
++ if (rc) {
+ put_device(&vu_dev->vdev.dev);
++ return rc;
++ }
+ vu_dev->registered = 1;
+- return rc;
++ return 0;
+
+ error_init:
+ os_close_file(vu_dev->sock);
+--
+2.51.0
+
--- /dev/null
+From 0d0635494ee1ed5bf5e7aaf0dbed13b7f5145f9a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 10:29:11 +0800
+Subject: wifi: mac80211: fix incorrect type for ret
+
+From: Liao Yuanhong <liaoyuanhong@vivo.com>
+
+[ Upstream commit a33b375ab5b3a9897a0ab76be8258d9f6b748628 ]
+
+The variable ret is declared as a u32 type, but it is assigned a value
+of -EOPNOTSUPP. Since unsigned types cannot correctly represent negative
+values, the type of ret should be changed to int.
+
+Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
+Link: https://patch.msgid.link/20250825022911.139377-1-liaoyuanhong@vivo.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/driver-ops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index c336267f4599c..d415a031bfa02 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1254,7 +1254,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+--
+2.51.0
+
--- /dev/null
+From bddc886e1e9e2f108f49c59919df3aedf749a144 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 08:37:49 +0900
+Subject: ALSA: firewire-motu: drop EPOLLOUT from poll return values as write
+ is not supported
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit aea3493246c474bc917d124d6fb627663ab6bef0 ]
+
+The ALSA HwDep character device of the firewire-motu driver incorrectly
+returns EPOLLOUT in poll(2), even though the driver implements no operation
+for write(2). This misleads userspace applications to believe write() is
+allowed, potentially resulting in unnecessarily wakeups.
+
+This issue dates back to the driver's initial code added by a commit
+71c3797779d3 ("ALSA: firewire-motu: add hwdep interface"), and persisted
+when POLLOUT was updated to EPOLLOUT by a commit a9a08845e9ac ('vfs: do
+bulk POLL* -> EPOLL* replacement("").').
+
+This commit fixes the bug.
+
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://patch.msgid.link/20250829233749.366222-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/motu/motu-hwdep.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 0764a477052a2..5e1254f106bf8 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -73,7 +73,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+--
+2.51.0
+
--- /dev/null
+From 5140d7b34dbb5c71c6d2da7082909732dbe46aed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:07:24 +0000
+Subject: cgroup: split cgroup_destroy_wq into 3 workqueues
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Ridong <chenridong@huawei.com>
+
+[ Upstream commit 79f919a89c9d06816dbdbbd168fa41d27411a7f9 ]
+
+A hung task can occur during [1] LTP cgroup testing when repeatedly
+mounting/unmounting perf_event and net_prio controllers with
+systemd.unified_cgroup_hierarchy=1. The hang manifests in
+cgroup_lock_and_drain_offline() during root destruction.
+
+Related case:
+cgroup_fj_function_perf_event cgroup_fj_function.sh perf_event
+cgroup_fj_function_net_prio cgroup_fj_function.sh net_prio
+
+Call Trace:
+ cgroup_lock_and_drain_offline+0x14c/0x1e8
+ cgroup_destroy_root+0x3c/0x2c0
+ css_free_rwork_fn+0x248/0x338
+ process_one_work+0x16c/0x3b8
+ worker_thread+0x22c/0x3b0
+ kthread+0xec/0x100
+ ret_from_fork+0x10/0x20
+
+Root Cause:
+
+CPU0 CPU1
+mount perf_event umount net_prio
+cgroup1_get_tree cgroup_kill_sb
+rebind_subsystems // root destruction enqueues
+ // cgroup_destroy_wq
+// kill all perf_event css
+ // one perf_event css A is dying
+ // css A offline enqueues cgroup_destroy_wq
+ // root destruction will be executed first
+ css_free_rwork_fn
+ cgroup_destroy_root
+ cgroup_lock_and_drain_offline
+ // some perf descendants are dying
+ // cgroup_destroy_wq max_active = 1
+ // waiting for css A to die
+
+Problem scenario:
+1. CPU0 mounts perf_event (rebind_subsystems)
+2. CPU1 unmounts net_prio (cgroup_kill_sb), queuing root destruction work
+3. A dying perf_event CSS gets queued for offline after root destruction
+4. Root destruction waits for offline completion, but offline work is
+ blocked behind root destruction in cgroup_destroy_wq (max_active=1)
+
+Solution:
+Split cgroup_destroy_wq into three dedicated workqueues:
+cgroup_offline_wq – Handles CSS offline operations
+cgroup_release_wq – Manages resource release
+cgroup_free_wq – Performs final memory deallocation
+
+This separation eliminates blocking in the CSS free path while waiting for
+offline operations to complete.
+
+[1] https://github.com/linux-test-project/ltp/blob/master/runtest/controllers
+Fixes: 334c3679ec4b ("cgroup: reimplement rebind_subsystems() using cgroup_apply_control() and friends")
+Reported-by: Gao Yingjie <gaoyingjie@uniontech.com>
+Signed-off-by: Chen Ridong <chenridong@huawei.com>
+Suggested-by: Teju Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 801022a8899b5..b761d70eccbf4 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -114,8 +114,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5233,7 +5256,7 @@ static void css_release_work_fn(struct work_struct *work)
+ mutex_unlock(&cgroup_mutex);
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5242,7 +5265,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5373,7 +5396,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ list_del_rcu(&css->rstat_css_node);
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5626,7 +5649,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -6002,8 +6025,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+--
+2.51.0
+
--- /dev/null
+From 7272710c39a402db4a4e9263d45b737585fd45dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:46:02 +0800
+Subject: cnic: Fix use-after-free bugs in cnic_delete_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit cfa7d9b1e3a8604afc84e9e51d789c29574fb216 ]
+
+The original code uses cancel_delayed_work() in cnic_cm_stop_bnx2x_hw(),
+which does not guarantee that the delayed work item 'delete_task' has
+fully completed if it was already running. Additionally, the delayed work
+item is cyclic, the flush_workqueue() in cnic_cm_stop_bnx2x_hw() only
+blocks and waits for work items that were already queued to the
+workqueue prior to its invocation. Any work items submitted after
+flush_workqueue() is called are not included in the set of tasks that the
+flush operation awaits. This means that after the cyclic work items have
+finished executing, a delayed work item may still exist in the workqueue.
+This leads to use-after-free scenarios where the cnic_dev is deallocated
+by cnic_free_dev(), while delete_task remains active and attempt to
+dereference cnic_dev in cnic_delete_task().
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+cnic_netdev_event() |
+ cnic_stop_hw() | cnic_delete_task()
+ cnic_cm_stop_bnx2x_hw() | ...
+ cancel_delayed_work() | /* the queue_delayed_work()
+ flush_workqueue() | executes after flush_workqueue()*/
+ | queue_delayed_work()
+ cnic_free_dev(dev)//free | cnic_delete_task() //new instance
+ | dev = cp->dev; //use
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the cyclic delayed work item is properly canceled and that any
+ongoing execution of the work item completes before the cnic_dev is
+deallocated. Furthermore, since cancel_delayed_work_sync() uses
+__flush_work(work, true) to synchronously wait for any currently
+executing instance of the work item to finish, the flush_workqueue()
+becomes redundant and should be removed.
+
+This bug was identified through static analysis. To reproduce the issue
+and validate the fix, I simulated the cnic PCI device in QEMU and
+introduced intentional delays — such as inserting calls to ssleep()
+within the cnic_delete_task() function — to increase the likelihood
+of triggering the bug.
+
+Fixes: fdf24086f475 ("cnic: Defer iscsi connection cleanup")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/cnic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index 155599dcee76d..3283f66e02b6e 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4224,8 +4224,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+--
+2.51.0
+
--- /dev/null
+From a54a4736a9eb39f755d4ad369fd7fa0dba5dc651 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 17:16:17 +0200
+Subject: i40e: remove redundant memory barrier when cleaning Tx descs
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e37084a26070c546ae7961ee135bbfb15fbe13fd ]
+
+i40e has a feature which writes to memory location last descriptor
+successfully sent. Memory barrier in i40e_clean_tx_irq() was used to
+avoid forward-reading descriptor fields in case DD bit was not set.
+Having mentioned feature in place implies that such situation will not
+happen as we know in advance how many descriptors HW has dealt with.
+
+Besides, this barrier placement was wrong. Idea is to have this
+protection *after* reading DD bit from HW descriptor, not before.
+Digging through git history showed me that indeed barrier was before DD
+bit check, anyways the commit introducing i40e_get_head() should have
+wiped it out altogether.
+
+Also, there was one commit doing s/read_barrier_depends/smp_rmb when get
+head feature was already in place, but it was only theoretical based on
+ixgbe experiences, which is different in these terms as that driver has
+to read DD bit from HW descriptor.
+
+Fixes: 1943d8ba9507 ("i40e/i40evf: enable hardware feature head write back")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 6067c88668341..ff213cbe84ca8 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -794,9 +794,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+--
+2.51.0
+
--- /dev/null
+From b13e60395dcf8483410bdf798af9defe386d50b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 15:30:58 +0000
+Subject: net: liquidio: fix overflow in octeon_init_instr_queue()
+
+From: Alexey Nepomnyashih <sdl@nppct.ru>
+
+[ Upstream commit cca7b1cfd7b8a0eff2a3510c5e0f10efe8fa3758 ]
+
+The expression `(conf->instr_type == 64) << iq_no` can overflow because
+`iq_no` may be as high as 64 (`CN23XX_MAX_RINGS_PER_PF`). Casting the
+operand to `u64` ensures correct 64-bit arithmetic.
+
+Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters")
+Signed-off-by: Alexey Nepomnyashih <sdl@nppct.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index 6dd65f9b347cb..b606cb1906644 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -139,7 +139,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+--
+2.51.0
+
--- /dev/null
+From 2bf9f57894216dec121fd9d76349d6aa716a0d54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:01:36 +0900
+Subject: net: natsemi: fix `rx_dropped` double accounting on `netif_rx()`
+ failure
+
+From: Yeounsu Moon <yyyynoom@gmail.com>
+
+[ Upstream commit 93ab4881a4e2b9657bdce4b8940073bfb4ed5eab ]
+
+`netif_rx()` already increments `rx_dropped` core stat when it fails.
+The driver was also updating `ndev->stats.rx_dropped` in the same path.
+Since both are reported together via `ip -s -s` command, this resulted
+in drops being counted twice in user-visible stats.
+
+Keep the driver update on `if (unlikely(!skb))`, but skip it after
+`netif_rx()` errors.
+
+Fixes: caf586e5f23c ("net: add a core netdev->rx_dropped counter")
+Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250913060135.35282-3-yyyynoom@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/ns83820.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 6af9a7eee1149..ded37141683a9 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -818,7 +818,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -879,8 +879,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -899,15 +901,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+--
+2.51.0
+
--- /dev/null
+From aebb5fd8081d493cc48dd91404b8dc23b3f0c4b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:50:14 +0200
+Subject: pcmcia: omap_cf: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit d1dfcdd30140c031ae091868fb5bed084132bca1 ]
+
+As described in the added code comment, a reference to .exit.text is ok
+for drivers registered via platform_driver_probe(). Make this explicit
+to prevent the following section mismatch warning
+
+ WARNING: modpost: drivers/pcmcia/omap_cf: section mismatch in reference: omap_cf_driver+0x4 (section: .data) -> omap_cf_remove (section: .exit.text)
+
+that triggers on an omap1_defconfig + CONFIG_OMAP_CF=m build.
+
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/omap_cf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index 0a04eb04f3a28..2e7559b7f103e 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -327,7 +327,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = (char *) driver_name,
+ },
+--
+2.51.0
+
--- /dev/null
+From 7963320fb8567ee8195f1004b00e61f3c4834b5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 16:48:54 +0300
+Subject: Revert "net/mlx5e: Update and set Xon/Xoff upon port speed set"
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit 3fbfe251cc9f6d391944282cdb9bcf0bd02e01f8 ]
+
+This reverts commit d24341740fe48add8a227a753e68b6eedf4b385a.
+It causes errors when trying to configure QoS, as well as
+loss of L2 connectivity (on multi-host devices).
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20250910170011.70528106@kernel.org
+Fixes: d24341740fe4 ("net/mlx5e: Update and set Xon/Xoff upon port speed set")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index b8d0b68befcb9..41bd16cc9d0f6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -141,8 +141,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (port_state == VPORT_STATE_UP) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+--
+2.51.0
+
phy-ti-pipe3-fix-device-leak-at-unbind.patch
soc-qcom-mdt_loader-deal-with-zero-e_shentsize.patch
mm-memory-failure-fix-vm_bug_on_page-pagepoisoned-page-when-unpoison-memory.patch
+alsa-firewire-motu-drop-epollout-from-poll-return-va.patch
+wifi-mac80211-fix-incorrect-type-for-ret.patch
+pcmcia-omap_cf-mark-driver-struct-with-__refdata-to-.patch
+cgroup-split-cgroup_destroy_wq-into-3-workqueues.patch
+net-natsemi-fix-rx_dropped-double-accounting-on-neti.patch
+i40e-remove-redundant-memory-barrier-when-cleaning-t.patch
+tcp-clear-tcp_sk-sk-fastopen_rsk-in-tcp_disconnect.patch
+revert-net-mlx5e-update-and-set-xon-xoff-upon-port-s.patch
+net-liquidio-fix-overflow-in-octeon_init_instr_queue.patch
+cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
--- /dev/null
+From bd6a686db8b43253cb6dbbbd789a615d3dbbd2fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 17:56:46 +0000
+Subject: tcp: Clear tcp_sk(sk)->fastopen_rsk in tcp_disconnect().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 45c8a6cc2bcd780e634a6ba8e46bffbdf1fc5c01 ]
+
+syzbot reported the splat below where a socket had tcp_sk(sk)->fastopen_rsk
+in the TCP_ESTABLISHED state. [0]
+
+syzbot reused the server-side TCP Fast Open socket as a new client before
+the TFO socket completes 3WHS:
+
+ 1. accept()
+ 2. connect(AF_UNSPEC)
+ 3. connect() to another destination
+
+As of accept(), sk->sk_state is TCP_SYN_RECV, and tcp_disconnect() changes
+it to TCP_CLOSE and makes connect() possible, which restarts timers.
+
+Since tcp_disconnect() forgot to clear tcp_sk(sk)->fastopen_rsk, the
+retransmit timer triggered the warning and the intended packet was not
+retransmitted.
+
+Let's call reqsk_fastopen_remove() in tcp_disconnect().
+
+[0]:
+WARNING: CPU: 2 PID: 0 at net/ipv4/tcp_timer.c:542 tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Modules linked in:
+CPU: 2 UID: 0 PID: 0 Comm: swapper/2 Not tainted 6.17.0-rc5-g201825fb4278 #62 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Code: 41 55 41 54 55 53 48 8b af b8 08 00 00 48 89 fb 48 85 ed 0f 84 55 01 00 00 0f b6 47 12 3c 03 74 0c 0f b6 47 12 3c 04 74 04 90 <0f> 0b 90 48 8b 85 c0 00 00 00 48 89 ef 48 8b 40 30 e8 6a 4f 06 3e
+RSP: 0018:ffffc900002f8d40 EFLAGS: 00010293
+RAX: 0000000000000002 RBX: ffff888106911400 RCX: 0000000000000017
+RDX: 0000000002517619 RSI: ffffffff83764080 RDI: ffff888106911400
+RBP: ffff888106d5c000 R08: 0000000000000001 R09: ffffc900002f8de8
+R10: 00000000000000c2 R11: ffffc900002f8ff8 R12: ffff888106911540
+R13: ffff888106911480 R14: ffff888106911840 R15: ffffc900002f8de0
+FS: 0000000000000000(0000) GS:ffff88907b768000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8044d69d90 CR3: 0000000002c30003 CR4: 0000000000370ef0
+Call Trace:
+ <IRQ>
+ tcp_write_timer (net/ipv4/tcp_timer.c:738)
+ call_timer_fn (kernel/time/timer.c:1747)
+ __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
+ timer_expire_remote (kernel/time/timer.c:2385 kernel/time/timer.c:2376 kernel/time/timer.c:2135)
+ tmigr_handle_remote_up (kernel/time/timer_migration.c:944 kernel/time/timer_migration.c:1035)
+ __walk_groups.isra.0 (kernel/time/timer_migration.c:533 (discriminator 1))
+ tmigr_handle_remote (kernel/time/timer_migration.c:1096)
+ handle_softirqs (./arch/x86/include/asm/jump_label.h:36 ./include/trace/events/irq.h:142 kernel/softirq.c:580)
+ irq_exit_rcu (kernel/softirq.c:614 kernel/softirq.c:453 kernel/softirq.c:680 kernel/softirq.c:696)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 (discriminator 35) arch/x86/kernel/apic/apic.c:1050 (discriminator 35))
+ </IRQ>
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250915175800.118793-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 55754bf176d99..cc0efcb4a553c 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2593,6 +2593,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ /* Deny disconnect if other threads are blocked in sk_wait_event()
+@@ -2711,6 +2712,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet->defer_connect = 0;
+ tp->fastopen_client_fail = 0;
+--
+2.51.0
+
--- /dev/null
+From 8b897af7c2bcc52ad5e52339ccaacc19e09091e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 10:29:11 +0800
+Subject: wifi: mac80211: fix incorrect type for ret
+
+From: Liao Yuanhong <liaoyuanhong@vivo.com>
+
+[ Upstream commit a33b375ab5b3a9897a0ab76be8258d9f6b748628 ]
+
+The variable ret is declared as a u32 type, but it is assigned a value
+of -EOPNOTSUPP. Since unsigned types cannot correctly represent negative
+values, the type of ret should be changed to int.
+
+Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
+Link: https://patch.msgid.link/20250825022911.139377-1-liaoyuanhong@vivo.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/driver-ops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index f4c7e0af896b1..ce902f8a08942 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1237,7 +1237,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+--
+2.51.0
+
--- /dev/null
+From 6befdff83f7d192163999d85f99a1aea2655399f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 08:37:49 +0900
+Subject: ALSA: firewire-motu: drop EPOLLOUT from poll return values as write
+ is not supported
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit aea3493246c474bc917d124d6fb627663ab6bef0 ]
+
+The ALSA HwDep character device of the firewire-motu driver incorrectly
+returns EPOLLOUT in poll(2), even though the driver implements no operation
+for write(2). This misleads userspace applications to believe write() is
+allowed, potentially resulting in unnecessarily wakeups.
+
+This issue dates back to the driver's initial code added by a commit
+71c3797779d3 ("ALSA: firewire-motu: add hwdep interface"), and persisted
+when POLLOUT was updated to EPOLLOUT by a commit a9a08845e9ac ('vfs: do
+bulk POLL* -> EPOLL* replacement("").').
+
+This commit fixes the bug.
+
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://patch.msgid.link/20250829233749.366222-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/motu/motu-hwdep.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 88d1f4b56e4be..a220ac0c8eb83 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+--
+2.51.0
+
--- /dev/null
+From 6b270dbf3195cb7527296fd89195042a747d2b14 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 08:01:26 +0000
+Subject: bonding: don't set oif to bond dev when getting NS target destination
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit a8ba87f04ca9cdec06776ce92dce1395026dc3bb ]
+
+Unlike IPv4, IPv6 routing strictly requires the source address to be valid
+on the outgoing interface. If the NS target is set to a remote VLAN interface,
+and the source address is also configured on a VLAN over a bond interface,
+setting the oif to the bond device will fail to retrieve the correct
+destination route.
+
+Fix this by not setting the oif to the bond device when retrieving the NS
+target destination. This allows the correct destination device (the VLAN
+interface) to be determined, so that bond_verify_device_path can return the
+proper VLAN tags for sending NS messages.
+
+Reported-by: David Wilder <wilder@us.ibm.com>
+Closes: https://lore.kernel.org/netdev/aGOKggdfjv0cApTO@fedora/
+Suggested-by: Jay Vosburgh <jv@jvosburgh.net>
+Tested-by: David Wilder <wilder@us.ibm.com>
+Acked-by: Jay Vosburgh <jv@jvosburgh.net>
+Fixes: 4e24be018eb9 ("bonding: add new parameter ns_targets")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250916080127.430626-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 11c58b88f9ce7..69ea7db784fd7 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3206,7 +3206,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+- fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+--
+2.51.0
+
--- /dev/null
+From 750a12260c583e527c9b3bcda64dbc48b17296db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 16:53:21 +0100
+Subject: btrfs: fix invalid extref key setup when replaying dentry
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit b62fd63ade7cb573b114972ef8f9fa505be8d74a ]
+
+The offset for an extref item's key is not the object ID of the parent
+dir, otherwise we would not need the extref item and would use plain ref
+items. Instead the offset is the result of a hash computation that uses
+the object ID of the parent dir and the name associated to the entry.
+So fix this by setting the key offset at replay_one_name() to be the
+result of calling btrfs_extref_hash().
+
+Fixes: 725af92a6251 ("btrfs: Open-code name_in_log_ref in replay_one_name")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 6e8e90bce0467..e4cc287eee993 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1934,7 +1934,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+
+ search_key.objectid = log_key.objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+- search_key.offset = key->objectid;
++ search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len);
+ ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ if (ret < 0) {
+ goto out;
+--
+2.51.0
+
--- /dev/null
+From 252b77d3d717045947a5871d8fdd49d94ee875c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:07:24 +0000
+Subject: cgroup: split cgroup_destroy_wq into 3 workqueues
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Ridong <chenridong@huawei.com>
+
+[ Upstream commit 79f919a89c9d06816dbdbbd168fa41d27411a7f9 ]
+
+A hung task can occur during [1] LTP cgroup testing when repeatedly
+mounting/unmounting perf_event and net_prio controllers with
+systemd.unified_cgroup_hierarchy=1. The hang manifests in
+cgroup_lock_and_drain_offline() during root destruction.
+
+Related case:
+cgroup_fj_function_perf_event cgroup_fj_function.sh perf_event
+cgroup_fj_function_net_prio cgroup_fj_function.sh net_prio
+
+Call Trace:
+ cgroup_lock_and_drain_offline+0x14c/0x1e8
+ cgroup_destroy_root+0x3c/0x2c0
+ css_free_rwork_fn+0x248/0x338
+ process_one_work+0x16c/0x3b8
+ worker_thread+0x22c/0x3b0
+ kthread+0xec/0x100
+ ret_from_fork+0x10/0x20
+
+Root Cause:
+
+CPU0 CPU1
+mount perf_event umount net_prio
+cgroup1_get_tree cgroup_kill_sb
+rebind_subsystems // root destruction enqueues
+ // cgroup_destroy_wq
+// kill all perf_event css
+ // one perf_event css A is dying
+ // css A offline enqueues cgroup_destroy_wq
+ // root destruction will be executed first
+ css_free_rwork_fn
+ cgroup_destroy_root
+ cgroup_lock_and_drain_offline
+ // some perf descendants are dying
+ // cgroup_destroy_wq max_active = 1
+ // waiting for css A to die
+
+Problem scenario:
+1. CPU0 mounts perf_event (rebind_subsystems)
+2. CPU1 unmounts net_prio (cgroup_kill_sb), queuing root destruction work
+3. A dying perf_event CSS gets queued for offline after root destruction
+4. Root destruction waits for offline completion, but offline work is
+ blocked behind root destruction in cgroup_destroy_wq (max_active=1)
+
+Solution:
+Split cgroup_destroy_wq into three dedicated workqueues:
+cgroup_offline_wq – Handles CSS offline operations
+cgroup_release_wq – Manages resource release
+cgroup_free_wq – Performs final memory deallocation
+
+This separation eliminates blocking in the CSS free path while waiting for
+offline operations to complete.
+
+[1] https://github.com/linux-test-project/ltp/blob/master/runtest/controllers
+Fixes: 334c3679ec4b ("cgroup: reimplement rebind_subsystems() using cgroup_apply_control() and friends")
+Reported-by: Gao Yingjie <gaoyingjie@uniontech.com>
+Signed-off-by: Chen Ridong <chenridong@huawei.com>
+Suggested-by: Teju Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 7997c8021b62f..9742574ec62fd 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -123,8 +123,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5444,7 +5467,7 @@ static void css_release_work_fn(struct work_struct *work)
+ cgroup_unlock();
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5453,7 +5476,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5575,7 +5598,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ list_del_rcu(&css->rstat_css_node);
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5811,7 +5834,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -6183,8 +6206,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+--
+2.51.0
+
--- /dev/null
+From 7c44f8a760b7d0a19369ea03399d1413b78de6c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:46:02 +0800
+Subject: cnic: Fix use-after-free bugs in cnic_delete_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit cfa7d9b1e3a8604afc84e9e51d789c29574fb216 ]
+
+The original code uses cancel_delayed_work() in cnic_cm_stop_bnx2x_hw(),
+which does not guarantee that the delayed work item 'delete_task' has
+fully completed if it was already running. Additionally, the delayed work
+item is cyclic, the flush_workqueue() in cnic_cm_stop_bnx2x_hw() only
+blocks and waits for work items that were already queued to the
+workqueue prior to its invocation. Any work items submitted after
+flush_workqueue() is called are not included in the set of tasks that the
+flush operation awaits. This means that after the cyclic work items have
+finished executing, a delayed work item may still exist in the workqueue.
+This leads to use-after-free scenarios where the cnic_dev is deallocated
+by cnic_free_dev(), while delete_task remains active and attempt to
+dereference cnic_dev in cnic_delete_task().
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+cnic_netdev_event() |
+ cnic_stop_hw() | cnic_delete_task()
+ cnic_cm_stop_bnx2x_hw() | ...
+ cancel_delayed_work() | /* the queue_delayed_work()
+ flush_workqueue() | executes after flush_workqueue()*/
+ | queue_delayed_work()
+ cnic_free_dev(dev)//free | cnic_delete_task() //new instance
+ | dev = cp->dev; //use
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the cyclic delayed work item is properly canceled and that any
+ongoing execution of the work item completes before the cnic_dev is
+deallocated. Furthermore, since cancel_delayed_work_sync() uses
+__flush_work(work, true) to synchronously wait for any currently
+executing instance of the work item to finish, the flush_workqueue()
+becomes redundant and should be removed.
+
+This bug was identified through static analysis. To reproduce the issue
+and validate the fix, I simulated the cnic PCI device in QEMU and
+introduced intentional delays — such as inserting calls to ssleep()
+within the cnic_delete_task() function — to increase the likelihood
+of triggering the bug.
+
+Fixes: fdf24086f475 ("cnic: Defer iscsi connection cleanup")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/cnic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index 2198e35d9e181..1e3eff91877d0 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4222,8 +4222,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+--
+2.51.0
+
--- /dev/null
+From c822fe89cb255a7678aaa609201729f5c515488c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 17:48:25 +0300
+Subject: dpaa2-switch: fix buffer pool seeding for control traffic
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 2690cb089502b80b905f2abdafd1bf2d54e1abef ]
+
+Starting with commit c50e7475961c ("dpaa2-switch: Fix error checking in
+dpaa2_switch_seed_bp()"), the probing of a second DPSW object errors out
+like below.
+
+fsl_dpaa2_switch dpsw.1: fsl_mc_driver_probe failed: -12
+fsl_dpaa2_switch dpsw.1: probe with driver fsl_dpaa2_switch failed with error -12
+
+The aforementioned commit brought to the surface the fact that seeding
+buffers into the buffer pool destined for control traffic is not
+successful and an access violation recoverable error can be seen in the
+MC firmware log:
+
+[E, qbman_rec_isr:391, QBMAN] QBMAN recoverable event 0x1000000
+
+This happens because the driver incorrectly used the ID of the DPBP
+object instead of the hardware buffer pool ID when trying to release
+buffers into it.
+
+This is because any DPSW object uses two buffer pools, one managed by
+the Linux driver and destined for control traffic packet buffers and the
+other one managed by the MC firmware and destined only for offloaded
+traffic. And since the buffer pool managed by the MC firmware does not
+have an external facing DPBP equivalent, any subsequent DPBP objects
+created after the first DPSW will have a DPBP id different to the
+underlying hardware buffer ID.
+
+The issue was not caught earlier because these two numbers can be
+identical when all DPBP objects are created before the DPSW objects are.
+This is the case when the DPL file is used to describe the entire DPAA2
+object layout and objects are created at boot time and it's also true
+for the first DPSW being created dynamically using ls-addsw.
+
+Fix this by using the buffer pool ID instead of the DPBP id when
+releasing buffers into the pool.
+
+Fixes: 2877e4f7e189 ("staging: dpaa2-switch: setup buffer pool and RX path rings")
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20250910144825.2416019-1-ioana.ciornei@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 732fd2e389c41..e928fea16e841 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2680,7 +2680,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+- ethsw->bpid = dpbp_attrs.id;
++ ethsw->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+--
+2.51.0
+
--- /dev/null
+From 11a0b36a7379aac1087b6611906e7c0916b6d3eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 17:16:17 +0200
+Subject: i40e: remove redundant memory barrier when cleaning Tx descs
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e37084a26070c546ae7961ee135bbfb15fbe13fd ]
+
+i40e has a feature which writes to memory location last descriptor
+successfully sent. Memory barrier in i40e_clean_tx_irq() was used to
+avoid forward-reading descriptor fields in case DD bit was not set.
+Having mentioned feature in place implies that such situation will not
+happen as we know in advance how many descriptors HW has dealt with.
+
+Besides, this barrier placement was wrong. Idea is to have this
+protection *after* reading DD bit from HW descriptor, not before.
+Digging through git history showed me that indeed barrier was before DD
+bit check, anyways the commit introducing i40e_get_head() should have
+wiped it out altogether.
+
+Also, there was one commit doing s/read_barrier_depends/smp_rmb when get
+head feature was already in place, but it was only theoretical based on
+ixgbe experiences, which is different in these terms as that driver has
+to read DD bit from HW descriptor.
+
+Fixes: 1943d8ba9507 ("i40e/i40evf: enable hardware feature head write back")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 3d83fccf742b1..2ede35ba3919b 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -949,9 +949,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+--
+2.51.0
+
--- /dev/null
+From ff468dea284e368b3e945cf34635f156a7ab7f4b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:20 +0200
+Subject: mptcp: set remote_deny_join_id0 on SYN recv
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 96939cec994070aa5df852c10fad5fc303a97ea3 ]
+
+When a SYN containing the 'C' flag (deny join id0) was received, this
+piece of information was not propagated to the path-manager.
+
+Even if this flag is mainly set on the server side, a client can also
+tell the server it cannot try to establish new subflows to the client's
+initial IP address and port. The server's PM should then record such
+info when received, and before sending events about the new connection.
+
+Fixes: df377be38725 ("mptcp: add deny_join_id0 in mptcp_options_received")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-1-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/subflow.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index cff2328106928..2ff72b7940fe9 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -773,6 +773,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+ goto fallback;
+
+ owner = mptcp_sk(ctx->conn);
++
++ if (mp_opt.deny_join_id0)
++ WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
++
+ mptcp_pm_new_connection(owner, child, 1);
+
+ /* with OoO packets we can reach here without ingress
+--
+2.51.0
+
--- /dev/null
+From a5a64d5c8364e0b1051e3a390d8de8b3ed3deaae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 15:30:58 +0000
+Subject: net: liquidio: fix overflow in octeon_init_instr_queue()
+
+From: Alexey Nepomnyashih <sdl@nppct.ru>
+
+[ Upstream commit cca7b1cfd7b8a0eff2a3510c5e0f10efe8fa3758 ]
+
+The expression `(conf->instr_type == 64) << iq_no` can overflow because
+`iq_no` may be as high as 64 (`CN23XX_MAX_RINGS_PER_PF`). Casting the
+operand to `u64` ensures correct 64-bit arithmetic.
+
+Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters")
+Signed-off-by: Alexey Nepomnyashih <sdl@nppct.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index 8e59c2825533a..2a066f193bca1 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -135,7 +135,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+--
+2.51.0
+
--- /dev/null
+From 8a339436b517bfca0eebe2591be07fcaa9f92753 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:01:36 +0900
+Subject: net: natsemi: fix `rx_dropped` double accounting on `netif_rx()`
+ failure
+
+From: Yeounsu Moon <yyyynoom@gmail.com>
+
+[ Upstream commit 93ab4881a4e2b9657bdce4b8940073bfb4ed5eab ]
+
+`netif_rx()` already increments `rx_dropped` core stat when it fails.
+The driver was also updating `ndev->stats.rx_dropped` in the same path.
+Since both are reported together via `ip -s -s` command, this resulted
+in drops being counted twice in user-visible stats.
+
+Keep the driver update on `if (unlikely(!skb))`, but skip it after
+`netif_rx()` errors.
+
+Fixes: caf586e5f23c ("net: add a core netdev->rx_dropped counter")
+Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250913060135.35282-3-yyyynoom@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/ns83820.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 998586872599b..c692d2e878b2e 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+--
+2.51.0
+
--- /dev/null
+From ab657b1d978396600bafd7466482d756e710bd53 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 14:38:53 +0800
+Subject: octeontx2-pf: Fix use-after-free bugs in otx2_sync_tstamp()
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit f8b4687151021db61841af983f1cb7be6915d4ef ]
+
+The original code relies on cancel_delayed_work() in otx2_ptp_destroy(),
+which does not ensure that the delayed work item synctstamp_work has fully
+completed if it was already running. This leads to use-after-free scenarios
+where otx2_ptp is deallocated by otx2_ptp_destroy(), while synctstamp_work
+remains active and attempts to dereference otx2_ptp in otx2_sync_tstamp().
+Furthermore, the synctstamp_work is cyclic, the likelihood of triggering
+the bug is nonnegligible.
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+otx2_remove() |
+ otx2_ptp_destroy() | otx2_sync_tstamp()
+ cancel_delayed_work() |
+ kfree(ptp) |
+ | ptp = container_of(...); //UAF
+ | ptp-> //UAF
+
+This is confirmed by a KASAN report:
+
+BUG: KASAN: slab-use-after-free in __run_timer_base.part.0+0x7d7/0x8c0
+Write of size 8 at addr ffff88800aa09a18 by task bash/136
+...
+Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x55/0x70
+ print_report+0xcf/0x610
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ kasan_report+0xb8/0xf0
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ __run_timer_base.part.0+0x7d7/0x8c0
+ ? __pfx___run_timer_base.part.0+0x10/0x10
+ ? __pfx_read_tsc+0x10/0x10
+ ? ktime_get+0x60/0x140
+ ? lapic_next_event+0x11/0x20
+ ? clockevents_program_event+0x1d4/0x2a0
+ run_timer_softirq+0xd1/0x190
+ handle_softirqs+0x16a/0x550
+ irq_exit_rcu+0xaf/0xe0
+ sysvec_apic_timer_interrupt+0x70/0x80
+ </IRQ>
+...
+Allocated by task 1:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ __kasan_kmalloc+0x7f/0x90
+ otx2_ptp_init+0xb1/0x860
+ otx2_probe+0x4eb/0xc30
+ local_pci_probe+0xdc/0x190
+ pci_device_probe+0x2fe/0x470
+ really_probe+0x1ca/0x5c0
+ __driver_probe_device+0x248/0x310
+ driver_probe_device+0x44/0x120
+ __driver_attach+0xd2/0x310
+ bus_for_each_dev+0xed/0x170
+ bus_add_driver+0x208/0x500
+ driver_register+0x132/0x460
+ do_one_initcall+0x89/0x300
+ kernel_init_freeable+0x40d/0x720
+ kernel_init+0x1a/0x150
+ ret_from_fork+0x10c/0x1a0
+ ret_from_fork_asm+0x1a/0x30
+
+Freed by task 136:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ kasan_save_free_info+0x3a/0x60
+ __kasan_slab_free+0x3f/0x50
+ kfree+0x137/0x370
+ otx2_ptp_destroy+0x38/0x80
+ otx2_remove+0x10d/0x4c0
+ pci_device_remove+0xa6/0x1d0
+ device_release_driver_internal+0xf8/0x210
+ pci_stop_bus_device+0x105/0x150
+ pci_stop_and_remove_bus_device_locked+0x15/0x30
+ remove_store+0xcc/0xe0
+ kernfs_fop_write_iter+0x2c3/0x440
+ vfs_write+0x871/0xd70
+ ksys_write+0xee/0x1c0
+ do_syscall_64+0xac/0x280
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+...
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the delayed work item is properly canceled before the otx2_ptp is
+deallocated.
+
+This bug was initially identified through static analysis. To reproduce
+and test it, I simulated the OcteonTX2 PCI device in QEMU and introduced
+artificial delays within the otx2_sync_tstamp() function to increase the
+likelihood of triggering the bug.
+
+Fixes: 2958d17a8984 ("octeontx2-pf: Add support for ptp 1-step mode on CN10K silicon")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+index 896b2f9bac344..d2584b450f272 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+@@ -365,7 +365,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
+ if (!ptp)
+ return;
+
+- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
++ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+--
+2.51.0
+
--- /dev/null
+From 12c628d2a715caac0641be1688747e6857a9c5e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:50:14 +0200
+Subject: pcmcia: omap_cf: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit d1dfcdd30140c031ae091868fb5bed084132bca1 ]
+
+As described in the added code comment, a reference to .exit.text is ok
+for drivers registered via platform_driver_probe(). Make this explicit
+to prevent the following section mismatch warning
+
+ WARNING: modpost: drivers/pcmcia/omap_cf: section mismatch in reference: omap_cf_driver+0x4 (section: .data) -> omap_cf_remove (section: .exit.text)
+
+that triggers on an omap1_defconfig + CONFIG_OMAP_CF=m build.
+
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/omap_cf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index e22a752052f2f..b8260dd12b1a8 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -305,7 +305,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = driver_name,
+ },
+--
+2.51.0
+
--- /dev/null
+From d7ab85d902abebd97893a3c1492623e63cc4afcb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 16:29:16 +1000
+Subject: qed: Don't collect too many protection override GRC elements
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit 56c0a2a9ddc2f5b5078c5fb0f81ab76bbc3d4c37 ]
+
+In the protection override dump path, the firmware can return far too
+many GRC elements, resulting in attempting to write past the end of the
+previously-kmalloc'ed dump buffer.
+
+This will result in a kernel panic with reason:
+
+ BUG: unable to handle kernel paging request at ADDRESS
+
+where "ADDRESS" is just past the end of the protection override dump
+buffer. The start address of the buffer is:
+ p_hwfn->cdev->dbg_features[DBG_FEATURE_PROTECTION_OVERRIDE].dump_buf
+and the size of the buffer is buf_size in the same data structure.
+
+The panic can be arrived at from either the qede Ethernet driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc02662ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc0267792 [qed]
+ qed_dbg_feature at ffffffffc026aa8f [qed]
+ qed_dbg_all_data at ffffffffc026b211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc027298a [qed]
+ devlink_health_do_dump at ffffffff82497f61
+ devlink_health_report at ffffffff8249cf29
+ qed_report_fatal_error at ffffffffc0272baf [qed]
+ qede_sp_task at ffffffffc045ed32 [qede]
+ process_one_work at ffffffff81d19783
+
+or the qedf storage driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc068b2ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc068c792 [qed]
+ qed_dbg_feature at ffffffffc068fa8f [qed]
+ qed_dbg_all_data at ffffffffc0690211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc069798a [qed]
+ devlink_health_do_dump at ffffffff8aa95e51
+ devlink_health_report at ffffffff8aa9ae19
+ qed_report_fatal_error at ffffffffc0697baf [qed]
+ qed_hw_err_notify at ffffffffc06d32d7 [qed]
+ qed_spq_post at ffffffffc06b1011 [qed]
+ qed_fcoe_destroy_conn at ffffffffc06b2e91 [qed]
+ qedf_cleanup_fcport at ffffffffc05e7597 [qedf]
+ qedf_rport_event_handler at ffffffffc05e7bf7 [qedf]
+ fc_rport_work at ffffffffc02da715 [libfc]
+ process_one_work at ffffffff8a319663
+
+Resolve this by clamping the firmware's return value to the maximum
+number of legal elements the firmware should return.
+
+Fixes: d52c89f120de8 ("qed*: Utilize FW 8.37.2.0")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Link: https://patch.msgid.link/f8e1182934aa274c18d0682a12dbaf347595469c.1757485536.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_debug.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index cdcead614e9fa..ae421c2707785 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ goto out;
+ }
+
+- /* Add override window info to buffer */
++ /* Add override window info to buffer, preventing buffer overflow */
+ override_window_dwords =
+- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++ PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+--
+2.51.0
+
--- /dev/null
+From acbd4eb6dfb1ebc59475896342eb998f80c5842c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 16:48:54 +0300
+Subject: Revert "net/mlx5e: Update and set Xon/Xoff upon port speed set"
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit 3fbfe251cc9f6d391944282cdb9bcf0bd02e01f8 ]
+
+This reverts commit d24341740fe48add8a227a753e68b6eedf4b385a.
+It causes errors when trying to configure QoS, as well as
+loss of L2 connectivity (on multi-host devices).
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20250910170011.70528106@kernel.org
+Fixes: d24341740fe4 ("net/mlx5e: Update and set Xon/Xoff upon port speed set")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index ae3a7b96f7978..7612070b66160 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -107,8 +107,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (up) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+--
+2.51.0
+
--- /dev/null
+alsa-firewire-motu-drop-epollout-from-poll-return-va.patch
+wifi-mac80211-increase-scan_ies_len-for-s1g.patch
+wifi-mac80211-fix-incorrect-type-for-ret.patch
+pcmcia-omap_cf-mark-driver-struct-with-__refdata-to-.patch
+cgroup-split-cgroup_destroy_wq-into-3-workqueues.patch
+btrfs-fix-invalid-extref-key-setup-when-replaying-de.patch
+um-virtio_uml-fix-use-after-free-after-put_device-in.patch
+dpaa2-switch-fix-buffer-pool-seeding-for-control-tra.patch
+qed-don-t-collect-too-many-protection-override-grc-e.patch
+mptcp-set-remote_deny_join_id0-on-syn-recv.patch
+net-natsemi-fix-rx_dropped-double-accounting-on-neti.patch
+i40e-remove-redundant-memory-barrier-when-cleaning-t.patch
+bonding-don-t-set-oif-to-bond-dev-when-getting-ns-ta.patch
+tcp-clear-tcp_sk-sk-fastopen_rsk-in-tcp_disconnect.patch
+tls-make-sure-to-abort-the-stream-if-headers-are-bog.patch
+revert-net-mlx5e-update-and-set-xon-xoff-upon-port-s.patch
+net-liquidio-fix-overflow-in-octeon_init_instr_queue.patch
+cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
+octeontx2-pf-fix-use-after-free-bugs-in-otx2_sync_ts.patch
--- /dev/null
+From 4717254dde96f0701be4da5c815b9c1a8f73fe79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 17:56:46 +0000
+Subject: tcp: Clear tcp_sk(sk)->fastopen_rsk in tcp_disconnect().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 45c8a6cc2bcd780e634a6ba8e46bffbdf1fc5c01 ]
+
+syzbot reported the splat below where a socket had tcp_sk(sk)->fastopen_rsk
+in the TCP_ESTABLISHED state. [0]
+
+syzbot reused the server-side TCP Fast Open socket as a new client before
+the TFO socket completes 3WHS:
+
+ 1. accept()
+ 2. connect(AF_UNSPEC)
+ 3. connect() to another destination
+
+As of accept(), sk->sk_state is TCP_SYN_RECV, and tcp_disconnect() changes
+it to TCP_CLOSE and makes connect() possible, which restarts timers.
+
+Since tcp_disconnect() forgot to clear tcp_sk(sk)->fastopen_rsk, the
+retransmit timer triggered the warning and the intended packet was not
+retransmitted.
+
+Let's call reqsk_fastopen_remove() in tcp_disconnect().
+
+[0]:
+WARNING: CPU: 2 PID: 0 at net/ipv4/tcp_timer.c:542 tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Modules linked in:
+CPU: 2 UID: 0 PID: 0 Comm: swapper/2 Not tainted 6.17.0-rc5-g201825fb4278 #62 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Code: 41 55 41 54 55 53 48 8b af b8 08 00 00 48 89 fb 48 85 ed 0f 84 55 01 00 00 0f b6 47 12 3c 03 74 0c 0f b6 47 12 3c 04 74 04 90 <0f> 0b 90 48 8b 85 c0 00 00 00 48 89 ef 48 8b 40 30 e8 6a 4f 06 3e
+RSP: 0018:ffffc900002f8d40 EFLAGS: 00010293
+RAX: 0000000000000002 RBX: ffff888106911400 RCX: 0000000000000017
+RDX: 0000000002517619 RSI: ffffffff83764080 RDI: ffff888106911400
+RBP: ffff888106d5c000 R08: 0000000000000001 R09: ffffc900002f8de8
+R10: 00000000000000c2 R11: ffffc900002f8ff8 R12: ffff888106911540
+R13: ffff888106911480 R14: ffff888106911840 R15: ffffc900002f8de0
+FS: 0000000000000000(0000) GS:ffff88907b768000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8044d69d90 CR3: 0000000002c30003 CR4: 0000000000370ef0
+Call Trace:
+ <IRQ>
+ tcp_write_timer (net/ipv4/tcp_timer.c:738)
+ call_timer_fn (kernel/time/timer.c:1747)
+ __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
+ timer_expire_remote (kernel/time/timer.c:2385 kernel/time/timer.c:2376 kernel/time/timer.c:2135)
+ tmigr_handle_remote_up (kernel/time/timer_migration.c:944 kernel/time/timer_migration.c:1035)
+ __walk_groups.isra.0 (kernel/time/timer_migration.c:533 (discriminator 1))
+ tmigr_handle_remote (kernel/time/timer_migration.c:1096)
+ handle_softirqs (./arch/x86/include/asm/jump_label.h:36 ./include/trace/events/irq.h:142 kernel/softirq.c:580)
+ irq_exit_rcu (kernel/softirq.c:614 kernel/softirq.c:453 kernel/softirq.c:680 kernel/softirq.c:696)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 (discriminator 35) arch/x86/kernel/apic/apic.c:1050 (discriminator 35))
+ </IRQ>
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250915175800.118793-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index b64d53590f25c..d94daa296d59d 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3130,6 +3130,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ if (old_state != TCP_CLOSE)
+@@ -3239,6 +3240,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet->defer_connect = 0;
+ tp->fastopen_client_fail = 0;
+--
+2.51.0
+
--- /dev/null
+From 699c158e3ad7482de4c3eb461b039cbd5aa8f190 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 17:28:13 -0700
+Subject: tls: make sure to abort the stream if headers are bogus
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0aeb54ac4cd5cf8f60131b4d9ec0b6dc9c27b20d ]
+
+Normally we wait for the socket to buffer up the whole record
+before we service it. If the socket has a tiny buffer, however,
+we read out the data sooner, to prevent connection stalls.
+Make sure that we abort the connection when we find out late
+that the record is actually invalid. Retrying the parsing is
+fine in itself but since we copy some more data each time
+before we parse we can overflow the allocated skb space.
+
+Constructing a scenario in which we're under pressure without
+enough data in the socket to parse the length upfront is quite
+hard. syzbot figured out a way to do this by serving us the header
+in small OOB sends, and then filling in the recvbuf with a large
+normal send.
+
+Make sure that tls_rx_msg_size() aborts strp, if we reach
+an invalid record there's really no way to recover.
+
+Reported-by: Lee Jones <lee@kernel.org>
+Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser")
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://patch.msgid.link/20250917002814.1743558-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls.h | 1 +
+ net/tls/tls_strp.c | 14 +++++++++-----
+ net/tls/tls_sw.c | 3 +--
+ 3 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 4922668fefaa8..f25699517bdf8 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -91,6 +91,7 @@ int tls_sk_query(struct sock *sk, int optname, char __user *optval,
+ int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
+ unsigned int optlen);
+ void tls_err_abort(struct sock *sk, int err);
++void tls_strp_abort_strp(struct tls_strparser *strp, int err);
+
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+ void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index b7ed76c0e576e..532230bed13b0 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -12,7 +12,7 @@
+
+ static struct workqueue_struct *tls_strp_wq;
+
+-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
++void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+ {
+ if (strp->stopped)
+ return;
+@@ -210,11 +210,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+ struct sk_buff *in_skb, unsigned int offset,
+ size_t in_len)
+ {
++ unsigned int nfrag = skb->len / PAGE_SIZE;
+ size_t len, chunk;
+ skb_frag_t *frag;
+ int sz;
+
+- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
++ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
++ return -EMSGSIZE;
++ }
++
++ frag = &skb_shinfo(skb)->frags[nfrag];
+
+ len = in_len;
+ /* First make sure we got the header */
+@@ -515,10 +521,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ tls_strp_load_anchor_with_queue(strp, inq);
+ if (!strp->stm.full_len) {
+ sz = tls_rx_msg_size(strp, strp->anchor);
+- if (sz < 0) {
+- tls_strp_abort_strp(strp, sz);
++ if (sz < 0)
+ return sz;
+- }
+
+ strp->stm.full_len = sz;
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 96e62e8f1dad2..fe6514e964ba3 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2435,8 +2435,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
+ return data_len + TLS_HEADER_SIZE;
+
+ read_failure:
+- tls_err_abort(strp->sk, ret);
+-
++ tls_strp_abort_strp(strp, ret);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1e7ae85e9e3baf538105726ccce5e3bd24290a59 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 15:00:51 +0800
+Subject: um: virtio_uml: Fix use-after-free after put_device in probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0 ]
+
+When register_virtio_device() fails in virtio_uml_probe(),
+the code sets vu_dev->registered = 1 even though
+the device was not successfully registered.
+This can lead to use-after-free or other issues.
+
+Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/virtio_uml.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index ddd080f6dd82e..d288dbed5f5bc 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1229,10 +1229,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+
+ rc = register_virtio_device(&vu_dev->vdev);
+- if (rc)
++ if (rc) {
+ put_device(&vu_dev->vdev.dev);
++ return rc;
++ }
+ vu_dev->registered = 1;
+- return rc;
++ return 0;
+
+ error_init:
+ os_close_file(vu_dev->sock);
+--
+2.51.0
+
--- /dev/null
+From 0578ca708c1079cd69886cb8b2377cc7685b6c35 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 10:29:11 +0800
+Subject: wifi: mac80211: fix incorrect type for ret
+
+From: Liao Yuanhong <liaoyuanhong@vivo.com>
+
+[ Upstream commit a33b375ab5b3a9897a0ab76be8258d9f6b748628 ]
+
+The variable ret is declared as a u32 type, but it is assigned a value
+of -EOPNOTSUPP. Since unsigned types cannot correctly represent negative
+values, the type of ret should be changed to int.
+
+Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
+Link: https://patch.msgid.link/20250825022911.139377-1-liaoyuanhong@vivo.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/driver-ops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index e685c12757f4b..1f961944ecc98 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1216,7 +1216,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+--
+2.51.0
+
--- /dev/null
+From 2a25390356fc836a2343ca90632ee35b71329287 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Aug 2025 18:54:37 +1000
+Subject: wifi: mac80211: increase scan_ies_len for S1G
+
+From: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+
+[ Upstream commit 7e2f3213e85eba00acb4cfe6d71647892d63c3a1 ]
+
+Currently the S1G capability element is not taken into account
+for the scan_ies_len, which leads to a buffer length validation
+failure in ieee80211_prep_hw_scan() and subsequent WARN in
+__ieee80211_start_scan(). This prevents hw scanning from functioning.
+To fix ensure we accommodate for the S1G capability length.
+
+Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+Link: https://patch.msgid.link/20250826085437.3493-1-lachlan.hodges@morsemicro.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 683301d9f5084..7831e412c7b9d 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -945,7 +945,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ int result, i;
+ enum nl80211_band band;
+ int channels, max_bitrates;
+- bool supp_ht, supp_vht, supp_he, supp_eht;
++ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
+ struct cfg80211_chan_def dflt_chandef = {};
+
+ if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
+@@ -1061,6 +1061,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ supp_vht = false;
+ supp_he = false;
+ supp_eht = false;
++ supp_s1g = false;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+
+@@ -1097,6 +1098,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ max_bitrates = sband->n_bitrates;
+ supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
++ supp_s1g = supp_s1g || sband->s1g_cap.s1g;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *iftd;
+@@ -1219,6 +1221,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_cap);
+
++ if (supp_s1g)
++ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
++
+ /*
+ * HE cap element is variable in size - set len to allow max size */
+ if (supp_he) {
+--
+2.51.0
+
--- /dev/null
+From b6007a1b9f1084aa7845613809d18af639c61a6a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 08:37:49 +0900
+Subject: ALSA: firewire-motu: drop EPOLLOUT from poll return values as write
+ is not supported
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit aea3493246c474bc917d124d6fb627663ab6bef0 ]
+
+The ALSA HwDep character device of the firewire-motu driver incorrectly
+returns EPOLLOUT in poll(2), even though the driver implements no operation
+for write(2). This misleads userspace applications to believe write() is
+allowed, potentially resulting in unnecessarily wakeups.
+
+This issue dates back to the driver's initial code added by a commit
+71c3797779d3 ("ALSA: firewire-motu: add hwdep interface"), and persisted
+when POLLOUT was updated to EPOLLOUT by a commit a9a08845e9ac ('vfs: do
+bulk POLL* -> EPOLL* replacement("").').
+
+This commit fixes the bug.
+
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://patch.msgid.link/20250829233749.366222-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/motu/motu-hwdep.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 88d1f4b56e4be..a220ac0c8eb83 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+--
+2.51.0
+
--- /dev/null
+From e49908478147378ec4434b3b376a54f6ae75202e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 08:01:26 +0000
+Subject: bonding: don't set oif to bond dev when getting NS target destination
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit a8ba87f04ca9cdec06776ce92dce1395026dc3bb ]
+
+Unlike IPv4, IPv6 routing strictly requires the source address to be valid
+on the outgoing interface. If the NS target is set to a remote VLAN interface,
+and the source address is also configured on a VLAN over a bond interface,
+setting the oif to the bond device will fail to retrieve the correct
+destination route.
+
+Fix this by not setting the oif to the bond device when retrieving the NS
+target destination. This allows the correct destination device (the VLAN
+interface) to be determined, so that bond_verify_device_path can return the
+proper VLAN tags for sending NS messages.
+
+Reported-by: David Wilder <wilder@us.ibm.com>
+Closes: https://lore.kernel.org/netdev/aGOKggdfjv0cApTO@fedora/
+Suggested-by: Jay Vosburgh <jv@jvosburgh.net>
+Tested-by: David Wilder <wilder@us.ibm.com>
+Acked-by: Jay Vosburgh <jv@jvosburgh.net>
+Fixes: 4e24be018eb9 ("bonding: add new parameter ns_targets")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250916080127.430626-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 952737a98751e..00204e42de2e7 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3338,7 +3338,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+- fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+--
+2.51.0
+
--- /dev/null
+From 32b83eb4c2d1006f68c7962c3353833e9831c0a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 02:43:34 +0000
+Subject: bonding: set random address only when slaves already exist
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 35ae4e86292ef7dfe4edbb9942955c884e984352 ]
+
+After commit 5c3bf6cba791 ("bonding: assign random address if device
+address is same as bond"), bonding will erroneously randomize the MAC
+address of the first interface added to the bond if fail_over_mac =
+follow.
+
+Correct this by additionally testing for the bond being empty before
+randomizing the MAC.
+
+Fixes: 5c3bf6cba791 ("bonding: assign random address if device address is same as bond")
+Reported-by: Qiuling Ren <qren@redhat.com>
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250910024336.400253-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 52ff0f9e04e07..952737a98751e 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2117,6 +2117,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
++ bond_has_slaves(bond) &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+--
+2.51.0
+
--- /dev/null
+From aa74e05ae7a20b9b7b6f548aaaa2c921113aff79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 16:53:21 +0100
+Subject: btrfs: fix invalid extref key setup when replaying dentry
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit b62fd63ade7cb573b114972ef8f9fa505be8d74a ]
+
+The offset for an extref item's key is not the object ID of the parent
+dir, otherwise we would not need the extref item and would use plain ref
+items. Instead the offset is the result of a hash computation that uses
+the object ID of the parent dir and the name associated to the entry.
+So fix this by setting the key offset at replay_one_name() to be the
+result of calling btrfs_extref_hash().
+
+Fixes: 725af92a6251 ("btrfs: Open-code name_in_log_ref in replay_one_name")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index f917fdae7e672..0022ad003791f 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1946,7 +1946,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+
+ search_key.objectid = log_key.objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+- search_key.offset = key->objectid;
++ search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len);
+ ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ if (ret < 0) {
+ goto out;
+--
+2.51.0
+
--- /dev/null
+From 927d59c67c1af7ef8107cce783e2ddea87e8d596 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:07:24 +0000
+Subject: cgroup: split cgroup_destroy_wq into 3 workqueues
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Ridong <chenridong@huawei.com>
+
+[ Upstream commit 79f919a89c9d06816dbdbbd168fa41d27411a7f9 ]
+
+A hung task can occur during [1] LTP cgroup testing when repeatedly
+mounting/unmounting perf_event and net_prio controllers with
+systemd.unified_cgroup_hierarchy=1. The hang manifests in
+cgroup_lock_and_drain_offline() during root destruction.
+
+Related case:
+cgroup_fj_function_perf_event cgroup_fj_function.sh perf_event
+cgroup_fj_function_net_prio cgroup_fj_function.sh net_prio
+
+Call Trace:
+ cgroup_lock_and_drain_offline+0x14c/0x1e8
+ cgroup_destroy_root+0x3c/0x2c0
+ css_free_rwork_fn+0x248/0x338
+ process_one_work+0x16c/0x3b8
+ worker_thread+0x22c/0x3b0
+ kthread+0xec/0x100
+ ret_from_fork+0x10/0x20
+
+Root Cause:
+
+CPU0 CPU1
+mount perf_event umount net_prio
+cgroup1_get_tree cgroup_kill_sb
+rebind_subsystems // root destruction enqueues
+ // cgroup_destroy_wq
+// kill all perf_event css
+ // one perf_event css A is dying
+ // css A offline enqueues cgroup_destroy_wq
+ // root destruction will be executed first
+ css_free_rwork_fn
+ cgroup_destroy_root
+ cgroup_lock_and_drain_offline
+ // some perf descendants are dying
+ // cgroup_destroy_wq max_active = 1
+ // waiting for css A to die
+
+Problem scenario:
+1. CPU0 mounts perf_event (rebind_subsystems)
+2. CPU1 unmounts net_prio (cgroup_kill_sb), queuing root destruction work
+3. A dying perf_event CSS gets queued for offline after root destruction
+4. Root destruction waits for offline completion, but offline work is
+ blocked behind root destruction in cgroup_destroy_wq (max_active=1)
+
+Solution:
+Split cgroup_destroy_wq into three dedicated workqueues:
+cgroup_offline_wq – Handles CSS offline operations
+cgroup_release_wq – Manages resource release
+cgroup_free_wq – Performs final memory deallocation
+
+This separation eliminates blocking in the CSS free path while waiting for
+offline operations to complete.
+
+[1] https://github.com/linux-test-project/ltp/blob/master/runtest/controllers
+Fixes: 334c3679ec4b ("cgroup: reimplement rebind_subsystems() using cgroup_apply_control() and friends")
+Reported-by: Gao Yingjie <gaoyingjie@uniontech.com>
+Signed-off-by: Chen Ridong <chenridong@huawei.com>
+Suggested-by: Teju Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index 62933468aaf46..5fc2801ee921c 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -123,8 +123,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5541,7 +5564,7 @@ static void css_release_work_fn(struct work_struct *work)
+ cgroup_unlock();
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5550,7 +5573,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5685,7 +5708,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ list_del_rcu(&css->rstat_css_node);
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5918,7 +5941,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -6296,8 +6319,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+--
+2.51.0
+
--- /dev/null
+From 16cd5b99ea5197e9ac8a13f3e4f35e58f0eb2186 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:46:02 +0800
+Subject: cnic: Fix use-after-free bugs in cnic_delete_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit cfa7d9b1e3a8604afc84e9e51d789c29574fb216 ]
+
+The original code uses cancel_delayed_work() in cnic_cm_stop_bnx2x_hw(),
+which does not guarantee that the delayed work item 'delete_task' has
+fully completed if it was already running. Additionally, the delayed work
+item is cyclic, the flush_workqueue() in cnic_cm_stop_bnx2x_hw() only
+blocks and waits for work items that were already queued to the
+workqueue prior to its invocation. Any work items submitted after
+flush_workqueue() is called are not included in the set of tasks that the
+flush operation awaits. This means that after the cyclic work items have
+finished executing, a delayed work item may still exist in the workqueue.
+This leads to use-after-free scenarios where the cnic_dev is deallocated
+by cnic_free_dev(), while delete_task remains active and attempt to
+dereference cnic_dev in cnic_delete_task().
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+cnic_netdev_event() |
+ cnic_stop_hw() | cnic_delete_task()
+ cnic_cm_stop_bnx2x_hw() | ...
+ cancel_delayed_work() | /* the queue_delayed_work()
+ flush_workqueue() | executes after flush_workqueue()*/
+ | queue_delayed_work()
+ cnic_free_dev(dev)//free | cnic_delete_task() //new instance
+ | dev = cp->dev; //use
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the cyclic delayed work item is properly canceled and that any
+ongoing execution of the work item completes before the cnic_dev is
+deallocated. Furthermore, since cancel_delayed_work_sync() uses
+__flush_work(work, true) to synchronously wait for any currently
+executing instance of the work item to finish, the flush_workqueue()
+becomes redundant and should be removed.
+
+This bug was identified through static analysis. To reproduce the issue
+and validate the fix, I simulated the cnic PCI device in QEMU and
+introduced intentional delays — such as inserting calls to ssleep()
+within the cnic_delete_task() function — to increase the likelihood
+of triggering the bug.
+
+Fixes: fdf24086f475 ("cnic: Defer iscsi connection cleanup")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/cnic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index a9040c42d2ff9..6e97a5a7daaf9 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+--
+2.51.0
+
--- /dev/null
+From d3dfa9f4c2caf7321eb94c1c277a4d9a5e5317ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 17:48:25 +0300
+Subject: dpaa2-switch: fix buffer pool seeding for control traffic
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 2690cb089502b80b905f2abdafd1bf2d54e1abef ]
+
+Starting with commit c50e7475961c ("dpaa2-switch: Fix error checking in
+dpaa2_switch_seed_bp()"), the probing of a second DPSW object errors out
+like below.
+
+fsl_dpaa2_switch dpsw.1: fsl_mc_driver_probe failed: -12
+fsl_dpaa2_switch dpsw.1: probe with driver fsl_dpaa2_switch failed with error -12
+
+The aforementioned commit brought to the surface the fact that seeding
+buffers into the buffer pool destined for control traffic is not
+successful and an access violation recoverable error can be seen in the
+MC firmware log:
+
+[E, qbman_rec_isr:391, QBMAN] QBMAN recoverable event 0x1000000
+
+This happens because the driver incorrectly used the ID of the DPBP
+object instead of the hardware buffer pool ID when trying to release
+buffers into it.
+
+This is because any DPSW object uses two buffer pools, one managed by
+the Linux driver and destined for control traffic packet buffers and the
+other one managed by the MC firmware and destined only for offloaded
+traffic. And since the buffer pool managed by the MC firmware does not
+have an external facing DPBP equivalent, any subsequent DPBP objects
+created after the first DPSW will have a DPBP id different to the
+underlying hardware buffer ID.
+
+The issue was not caught earlier because these two numbers can be
+identical when all DPBP objects are created before the DPSW objects are.
+This is the case when the DPL file is used to describe the entire DPAA2
+object layout and objects are created at boot time and it's also true
+for the first DPSW being created dynamically using ls-addsw.
+
+Fix this by using the buffer pool ID instead of the DPBP id when
+releasing buffers into the pool.
+
+Fixes: 2877e4f7e189 ("staging: dpaa2-switch: setup buffer pool and RX path rings")
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20250910144825.2416019-1-ioana.ciornei@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index cbd3859ea475b..980daecab8ea3 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2735,7 +2735,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+- ethsw->bpid = dpbp_attrs.id;
++ ethsw->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+--
+2.51.0
+
--- /dev/null
+From 13f8f045157cbba249009fdcf209c0e44b1111ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 17:16:17 +0200
+Subject: i40e: remove redundant memory barrier when cleaning Tx descs
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e37084a26070c546ae7961ee135bbfb15fbe13fd ]
+
+i40e has a feature which writes to memory location last descriptor
+successfully sent. Memory barrier in i40e_clean_tx_irq() was used to
+avoid forward-reading descriptor fields in case DD bit was not set.
+Having mentioned feature in place implies that such situation will not
+happen as we know in advance how many descriptors HW has dealt with.
+
+Besides, this barrier placement was wrong. Idea is to have this
+protection *after* reading DD bit from HW descriptor, not before.
+Digging through git history showed me that indeed barrier was before DD
+bit check, anyways the commit introducing i40e_get_head() should have
+wiped it out altogether.
+
+Also, there was one commit doing s/read_barrier_depends/smp_rmb when get
+head feature was already in place, but it was only theoretical based on
+ixgbe experiences, which is different in these terms as that driver has
+to read DD bit from HW descriptor.
+
+Fixes: 1943d8ba9507 ("i40e/i40evf: enable hardware feature head write back")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index c006f716a3bdb..ca7517a68a2c3 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -947,9 +947,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+--
+2.51.0
+
--- /dev/null
+From 7b8f0200a50dce83f351af259b95fe666d5517e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 16:00:14 -0700
+Subject: ice: fix Rx page leak on multi-buffer frames
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 84bf1ac85af84d354c7a2fdbdc0d4efc8aaec34b ]
+
+The ice_put_rx_mbuf() function handles calling ice_put_rx_buf() for each
+buffer in the current frame. This function was introduced as part of
+handling multi-buffer XDP support in the ice driver.
+
+It works by iterating over the buffers from first_desc up to 1 plus the
+total number of fragments in the frame, cached from before the XDP program
+was executed.
+
+If the hardware posts a descriptor with a size of 0, the logic used in
+ice_put_rx_mbuf() breaks. Such descriptors get skipped and don't get added
+as fragments in ice_add_xdp_frag. Since the buffer isn't counted as a
+fragment, we do not iterate over it in ice_put_rx_mbuf(), and thus we don't
+call ice_put_rx_buf().
+
+Because we don't call ice_put_rx_buf(), we don't attempt to re-use the
+page or free it. This leaves a stale page in the ring, as we don't
+increment next_to_alloc.
+
+The ice_reuse_rx_page() assumes that the next_to_alloc has been incremented
+properly, and that it always points to a buffer with a NULL page. Since
+this function doesn't check, it will happily recycle a page over the top
+of the next_to_alloc buffer, losing track of the old page.
+
+Note that this leak only occurs for multi-buffer frames. The
+ice_put_rx_mbuf() function always handles at least one buffer, so a
+single-buffer frame will always get handled correctly. It is not clear
+precisely why the hardware hands us descriptors with a size of 0 sometimes,
+but it happens somewhat regularly with "jumbo frames" used by 9K MTU.
+
+To fix ice_put_rx_mbuf(), we need to make sure to call ice_put_rx_buf() on
+all buffers between first_desc and next_to_clean. Borrow the logic of a
+similar function in i40e used for this same purpose. Use the same logic
+also in ice_get_pgcnts().
+
+Instead of iterating over just the number of fragments, use a loop which
+iterates until the current index reaches to the next_to_clean element just
+past the current frame. Unlike i40e, the ice_put_rx_mbuf() function does
+call ice_put_rx_buf() on the last buffer of the frame indicating the end of
+packet.
+
+For non-linear (multi-buffer) frames, we need to take care when adjusting
+the pagecnt_bias. An XDP program might release fragments from the tail of
+the frame, in which case that fragment page is already released. Only
+update the pagecnt_bias for the first descriptor and fragments still
+remaining post-XDP program. Take care to only access the shared info for
+fragmented buffers, as this avoids a significant cache miss.
+
+The xdp_xmit value only needs to be updated if an XDP program is run, and
+only once per packet. Drop the xdp_xmit pointer argument from
+ice_put_rx_mbuf(). Instead, set xdp_xmit in the ice_clean_rx_irq() function
+directly. This avoids needing to pass the argument and avoids an extra
+bit-wise OR for each buffer in the frame.
+
+Move the increment of the ntc local variable to ensure its updated *before*
+all calls to ice_get_pgcnts() or ice_put_rx_mbuf(), as the loop logic
+requires the index of the element just after the current frame.
+
+Now that we use an index pointer in the ring to identify the packet, we no
+longer need to track or cache the number of fragments in the rx_ring.
+
+Cc: Christoph Petrausch <christoph.petrausch@deepl.com>
+Cc: Jesper Dangaard Brouer <hawk@kernel.org>
+Reported-by: Jaroslav Pulchart <jaroslav.pulchart@gooddata.com>
+Closes: https://lore.kernel.org/netdev/CAK8fFZ4hY6GUJNENz3wY9jaYLZXGfpr7dnZxzGMYoE44caRbgw@mail.gmail.com/
+Fixes: 743bbd93cf29 ("ice: put Rx buffers after being done with current frame")
+Tested-by: Michal Kubiak <michal.kubiak@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_txrx.c | 80 ++++++++++-------------
+ drivers/net/ethernet/intel/ice/ice_txrx.h | 1 -
+ 2 files changed, 34 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index cde69f5686656..431a6ed498a4e 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -865,10 +865,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
+- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+- * can pop off frags but driver has to handle it on its own
+- */
+- rx_ring->nr_frags = sinfo->nr_frags;
+
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+@@ -939,20 +935,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ /**
+ * ice_get_pgcnts - grab page_count() for gathered fragments
+ * @rx_ring: Rx descriptor ring to store the page counts on
++ * @ntc: the next to clean element (not included in this frame!)
+ *
+ * This function is intended to be called right before running XDP
+ * program so that the page recycling mechanism will be able to take
+ * a correct decision regarding underlying pages; this is done in such
+ * way as XDP program can change the refcount of page
+ */
+-static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
++static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc)
+ {
+- u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ struct ice_rx_buf *rx_buf;
+ u32 cnt = rx_ring->count;
+
+- for (int i = 0; i < nr_frags; i++) {
++ while (idx != ntc) {
+ rx_buf = &rx_ring->rx_buf[idx];
+ rx_buf->pgcnt = page_count(rx_buf->page);
+
+@@ -1125,62 +1121,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ }
+
+ /**
+- * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
++ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame
+ * @rx_ring: Rx ring with all the auxiliary data
+ * @xdp: XDP buffer carrying linear + frags part
+- * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+- * @ntc: a current next_to_clean value to be stored at rx_ring
++ * @ntc: the next to clean element (not included in this frame!)
+ * @verdict: return code from XDP program execution
+ *
+- * Walk through gathered fragments and satisfy internal page
+- * recycle mechanism; we take here an action related to verdict
+- * returned by XDP program;
++ * Called after XDP program is completed, or on error with verdict set to
++ * ICE_XDP_CONSUMED.
++ *
++ * Walk through buffers from first_desc to the end of the frame, releasing
++ * buffers and satisfying internal page recycle mechanism. The action depends
++ * on verdict from XDP program.
+ */
+ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+- u32 *xdp_xmit, u32 ntc, u32 verdict)
++ u32 ntc, u32 verdict)
+ {
+- u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+- u32 post_xdp_frags = 1;
+ struct ice_rx_buf *buf;
+- int i;
++ u32 xdp_frags = 0;
++ int i = 0;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+- post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
++ xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+
+- for (i = 0; i < post_xdp_frags; i++) {
++ while (idx != ntc) {
+ buf = &rx_ring->rx_buf[idx];
++ if (++idx == cnt)
++ idx = 0;
+
+- if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
++ /* An XDP program could release fragments from the end of the
++ * buffer. For these, we need to keep the pagecnt_bias as-is.
++ * To do this, only adjust pagecnt_bias for fragments up to
++ * the total remaining after the XDP program has run.
++ */
++ if (verdict != ICE_XDP_CONSUMED)
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+- *xdp_xmit |= verdict;
+- } else if (verdict & ICE_XDP_CONSUMED) {
++ else if (i++ <= xdp_frags)
+ buf->pagecnt_bias++;
+- } else if (verdict == ICE_XDP_PASS) {
+- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+- }
+
+ ice_put_rx_buf(rx_ring, buf);
+-
+- if (++idx == cnt)
+- idx = 0;
+- }
+- /* handle buffers that represented frags released by XDP prog;
+- * for these we keep pagecnt_bias as-is; refcount from struct page
+- * has been decremented within XDP prog and we do not have to increase
+- * the biased refcnt
+- */
+- for (; i < nr_frags; i++) {
+- buf = &rx_ring->rx_buf[idx];
+- ice_put_rx_buf(rx_ring, buf);
+- if (++idx == cnt)
+- idx = 0;
+ }
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
+- rx_ring->nr_frags = 0;
+ }
+
+ /**
+@@ -1260,6 +1245,10 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ /* retrieve a buffer from the ring */
+ rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
+
++ /* Increment ntc before calls to ice_put_rx_mbuf() */
++ if (++ntc == cnt)
++ ntc = 0;
++
+ if (!xdp->data) {
+ void *hard_start;
+
+@@ -1268,24 +1257,23 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
+ xdp_buff_clear_frags_flag(xdp);
+ } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+- ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
++ ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED);
+ break;
+ }
+- if (++ntc == cnt)
+- ntc = 0;
+
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+- ice_get_pgcnts(rx_ring);
++ ice_get_pgcnts(rx_ring, ntc);
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
+ goto construct_skb;
+ total_rx_bytes += xdp_get_buff_len(xdp);
+ total_rx_pkts++;
+
+- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
++ xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR);
+
+ continue;
+ construct_skb:
+@@ -1298,7 +1286,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ xdp_verdict = ICE_XDP_CONSUMED;
+ }
+- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
+
+ if (!skb)
+ break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index 7eef66f5964a3..a13531c21d4ad 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -357,7 +357,6 @@ struct ice_rx_ring {
+ struct ice_tx_ring *xdp_ring;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
+ struct xsk_buff_pool *xsk_pool;
+- u32 nr_frags;
+ u16 max_frame;
+ u16 rx_buf_len;
+ dma_addr_t dma; /* physical address of ring */
+--
+2.51.0
+
--- /dev/null
+From 2aa39010e622492c0f45b1c53a5266dbae6d5ad4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Sep 2024 16:07:45 -0700
+Subject: ice: store max_frame and rx_buf_len only in ice_rx_ring
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 7e61c89c6065731dfc11ac7a2c0dd27a910f2afb ]
+
+The max_frame and rx_buf_len fields of the VSI set the maximum frame size
+for packets on the wire, and configure the size of the Rx buffer. In the
+hardware, these are per-queue configuration. Most VSI types use a simple
+method to determine the size of the buffers for all queues.
+
+However, VFs may potentially configure different values for each queue.
+While the Linux iAVF driver does not do this, it is allowed by the virtchnl
+interface.
+
+The current virtchnl code simply sets the per-VSI fields inbetween calls to
+ice_vsi_cfg_single_rxq(). This technically works, as these fields are only
+ever used when programming the Rx ring, and otherwise not checked again.
+However, it is confusing to maintain.
+
+The Rx ring also already has an rx_buf_len field in order to access the
+buffer length in the hotpath. It also has extra unused bytes in the ring
+structure which we can make use of to store the maximum frame size.
+
+Drop the VSI max_frame and rx_buf_len fields. Add max_frame to the Rx ring,
+and slightly re-order rx_buf_len to better fit into the gaps in the
+structure layout.
+
+Change the ice_vsi_cfg_frame_size function so that it writes to the ring
+fields. Call this function once per ring in ice_vsi_cfg_rxqs(). This is
+done over calling it inside the ice_vsi_cfg_rxq(), because
+ice_vsi_cfg_rxq() is called in the virtchnl flow where the max_frame and
+rx_buf_len have already been configured.
+
+Change the accesses for rx_buf_len and max_frame to all point to the ring
+structure. This has the added benefit that ice_vsi_cfg_rxq() no longer has
+the surprise side effect of updating ring->rx_buf_len based on the VSI
+field.
+
+Update the virtchnl ice_vc_cfg_qs_msg() function to set the ring values
+directly, and drop references to the removed VSI fields.
+
+This now makes the VF logic clear, as the ring fields are obviously
+per-queue. This reduces the required cognitive load when reasoning about
+this logic.
+
+Note that removing the VSI fields does leave a 4 byte gap, but the ice_vsi
+structure has many gaps, and its layout is not as critical in the hot path.
+The structure may benefit from a more thorough repacking, but no attempt
+was made in this change.
+
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Rafal Romanowski <rafal.romanowski@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Stable-dep-of: 84bf1ac85af8 ("ice: fix Rx page leak on multi-buffer frames")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice.h | 3 --
+ drivers/net/ethernet/intel/ice/ice_base.c | 34 ++++++++++---------
+ drivers/net/ethernet/intel/ice/ice_txrx.h | 3 +-
+ drivers/net/ethernet/intel/ice/ice_virtchnl.c | 7 ++--
+ 4 files changed, 23 insertions(+), 24 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
+index 2960709f6b62c..0e699a0432c5b 100644
+--- a/drivers/net/ethernet/intel/ice/ice.h
++++ b/drivers/net/ethernet/intel/ice/ice.h
+@@ -372,9 +372,6 @@ struct ice_vsi {
+ spinlock_t arfs_lock; /* protects aRFS hash table and filter state */
+ atomic_t *arfs_last_fltr_id;
+
+- u16 max_frame;
+- u16 rx_buf_len;
+-
+ struct ice_aqc_vsi_props info; /* VSI properties */
+ struct ice_vsi_vlan_info vlan_info; /* vlan config to be restored */
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
+index 4a9a6899fc453..98c3764fed396 100644
+--- a/drivers/net/ethernet/intel/ice/ice_base.c
++++ b/drivers/net/ethernet/intel/ice/ice_base.c
+@@ -445,7 +445,7 @@ static int ice_setup_rx_ctx(struct ice_rx_ring *ring)
+ /* Max packet size for this queue - must not be set to a larger value
+ * than 5 x DBUF
+ */
+- rlan_ctx.rxmax = min_t(u32, vsi->max_frame,
++ rlan_ctx.rxmax = min_t(u32, ring->max_frame,
+ ICE_MAX_CHAINED_RX_BUFS * ring->rx_buf_len);
+
+ /* Rx queue threshold in units of 64 */
+@@ -541,8 +541,6 @@ static int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
+ u32 num_bufs = ICE_RX_DESC_UNUSED(ring);
+ int err;
+
+- ring->rx_buf_len = ring->vsi->rx_buf_len;
+-
+ if (ring->vsi->type == ICE_VSI_PF || ring->vsi->type == ICE_VSI_SF) {
+ if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+ err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+@@ -641,21 +639,25 @@ int ice_vsi_cfg_single_rxq(struct ice_vsi *vsi, u16 q_idx)
+ /**
+ * ice_vsi_cfg_frame_size - setup max frame size and Rx buffer length
+ * @vsi: VSI
++ * @ring: Rx ring to configure
++ *
++ * Determine the maximum frame size and Rx buffer length to use for a PF VSI.
++ * Set these in the associated Rx ring structure.
+ */
+-static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi)
++static void ice_vsi_cfg_frame_size(struct ice_vsi *vsi, struct ice_rx_ring *ring)
+ {
+ if (!vsi->netdev || test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) {
+- vsi->max_frame = ICE_MAX_FRAME_LEGACY_RX;
+- vsi->rx_buf_len = ICE_RXBUF_1664;
++ ring->max_frame = ICE_MAX_FRAME_LEGACY_RX;
++ ring->rx_buf_len = ICE_RXBUF_1664;
+ #if (PAGE_SIZE < 8192)
+ } else if (!ICE_2K_TOO_SMALL_WITH_PADDING &&
+ (vsi->netdev->mtu <= ETH_DATA_LEN)) {
+- vsi->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
+- vsi->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
++ ring->max_frame = ICE_RXBUF_1536 - NET_IP_ALIGN;
++ ring->rx_buf_len = ICE_RXBUF_1536 - NET_IP_ALIGN;
+ #endif
+ } else {
+- vsi->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
+- vsi->rx_buf_len = ICE_RXBUF_3072;
++ ring->max_frame = ICE_AQ_SET_MAC_FRAME_SIZE_MAX;
++ ring->rx_buf_len = ICE_RXBUF_3072;
+ }
+ }
+
+@@ -670,15 +672,15 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
+ {
+ u16 i;
+
+- if (vsi->type == ICE_VSI_VF)
+- goto setup_rings;
+-
+- ice_vsi_cfg_frame_size(vsi);
+-setup_rings:
+ /* set up individual rings */
+ ice_for_each_rxq(vsi, i) {
+- int err = ice_vsi_cfg_rxq(vsi->rx_rings[i]);
++ struct ice_rx_ring *ring = vsi->rx_rings[i];
++ int err;
++
++ if (vsi->type != ICE_VSI_VF)
++ ice_vsi_cfg_frame_size(vsi, ring);
+
++ err = ice_vsi_cfg_rxq(ring);
+ if (err)
+ return err;
+ }
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index 7130992d41779..7eef66f5964a3 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -358,8 +358,9 @@ struct ice_rx_ring {
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
+ struct xsk_buff_pool *xsk_pool;
+ u32 nr_frags;
+- dma_addr_t dma; /* physical address of ring */
++ u16 max_frame;
+ u16 rx_buf_len;
++ dma_addr_t dma; /* physical address of ring */
+ u8 dcb_tc; /* Traffic class of ring */
+ u8 ptp_rx;
+ #define ICE_RX_FLAGS_RING_BUILD_SKB BIT(1)
+diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+index 87ffd25b268a2..471d64d202b76 100644
+--- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c
++++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c
+@@ -1748,19 +1748,18 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
+ (qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
+ qpi->rxq.databuffer_size < 1024))
+ goto error_param;
+- vsi->rx_buf_len = qpi->rxq.databuffer_size;
+- ring->rx_buf_len = vsi->rx_buf_len;
++ ring->rx_buf_len = qpi->rxq.databuffer_size;
+ if (qpi->rxq.max_pkt_size > max_frame_size ||
+ qpi->rxq.max_pkt_size < 64)
+ goto error_param;
+
+- vsi->max_frame = qpi->rxq.max_pkt_size;
++ ring->max_frame = qpi->rxq.max_pkt_size;
+ /* add space for the port VLAN since the VF driver is
+ * not expected to account for it in the MTU
+ * calculation
+ */
+ if (ice_vf_is_port_vlan_ena(vf))
+- vsi->max_frame += VLAN_HLEN;
++ ring->max_frame += VLAN_HLEN;
+
+ if (ice_vsi_cfg_single_rxq(vsi, q_idx)) {
+ dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n",
+--
+2.51.0
+
--- /dev/null
+From bd63bed57c71644099b052637bea1e8adee59308 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 22:47:21 +0900
+Subject: igc: don't fail igc_probe() on LED setup error
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit 528eb4e19ec0df30d0c9ae4074ce945667dde919 ]
+
+When igc_led_setup() fails, igc_probe() fails and triggers kernel panic
+in free_netdev() since unregister_netdev() is not called. [1]
+This behavior can be tested using fault-injection framework, especially
+the failslab feature. [2]
+
+Since LED support is not mandatory, treat LED setup failures as
+non-fatal and continue probe with a warning message, consequently
+avoiding the kernel panic.
+
+[1]
+ kernel BUG at net/core/dev.c:12047!
+ Oops: invalid opcode: 0000 [#1] SMP NOPTI
+ CPU: 0 UID: 0 PID: 937 Comm: repro-igc-led-e Not tainted 6.17.0-rc4-enjuk-tnguy-00865-gc4940196ab02 #64 PREEMPT(voluntary)
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+ RIP: 0010:free_netdev+0x278/0x2b0
+ [...]
+ Call Trace:
+ <TASK>
+ igc_probe+0x370/0x910
+ local_pci_probe+0x3a/0x80
+ pci_device_probe+0xd1/0x200
+ [...]
+
+[2]
+ #!/bin/bash -ex
+
+ FAILSLAB_PATH=/sys/kernel/debug/failslab/
+ DEVICE=0000:00:05.0
+ START_ADDR=$(grep " igc_led_setup" /proc/kallsyms \
+ | awk '{printf("0x%s", $1)}')
+ END_ADDR=$(printf "0x%x" $((START_ADDR + 0x100)))
+
+ echo $START_ADDR > $FAILSLAB_PATH/require-start
+ echo $END_ADDR > $FAILSLAB_PATH/require-end
+ echo 1 > $FAILSLAB_PATH/times
+ echo 100 > $FAILSLAB_PATH/probability
+ echo N > $FAILSLAB_PATH/ignore-gfp-wait
+
+ echo $DEVICE > /sys/bus/pci/drivers/igc/bind
+
+Fixes: ea578703b03d ("igc: Add support for LEDs on i225/i226")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
+Tested-by: Mor Bar-Gabay <morx.bar.gabay@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc.h | 1 +
+ drivers/net/ethernet/intel/igc/igc_main.c | 12 +++++++++---
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 323db1e2be388..79d5fc5ac4fce 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -336,6 +336,7 @@ struct igc_adapter {
+ /* LEDs */
+ struct mutex led_mutex;
+ struct igc_led_classdev *leds;
++ bool leds_available;
+ };
+
+ void igc_up(struct igc_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index aadc0667fa04a..9ba41a427e141 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7169,8 +7169,14 @@ static int igc_probe(struct pci_dev *pdev,
+
+ if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ err = igc_led_setup(adapter);
+- if (err)
+- goto err_register;
++ if (err) {
++ netdev_warn_once(netdev,
++ "LED init failed (%d); continuing without LED support\n",
++ err);
++ adapter->leds_available = false;
++ } else {
++ adapter->leds_available = true;
++ }
+ }
+
+ return 0;
+@@ -7226,7 +7232,7 @@ static void igc_remove(struct pci_dev *pdev)
+ cancel_work_sync(&adapter->watchdog_task);
+ hrtimer_cancel(&adapter->hrtimer);
+
+- if (IS_ENABLED(CONFIG_IGC_LEDS))
++ if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
+ igc_led_free(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+--
+2.51.0
+
--- /dev/null
+From d8825196666154e4ecd79e576b938db6216f04c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:20 +0200
+Subject: mptcp: set remote_deny_join_id0 on SYN recv
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 96939cec994070aa5df852c10fad5fc303a97ea3 ]
+
+When a SYN containing the 'C' flag (deny join id0) was received, this
+piece of information was not propagated to the path-manager.
+
+Even if this flag is mainly set on the server side, a client can also
+tell the server it cannot try to establish new subflows to the client's
+initial IP address and port. The server's PM should then record such
+info when received, and before sending events about the new connection.
+
+Fixes: df377be38725 ("mptcp: add deny_join_id0 in mptcp_options_received")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-1-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/subflow.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index a05f201d194c5..17d1a9d8b0e98 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -888,6 +888,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+
+ ctx->subflow_id = 1;
+ owner = mptcp_sk(ctx->conn);
++
++ if (mp_opt.deny_join_id0)
++ WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
++
+ mptcp_pm_new_connection(owner, child, 1);
+
+ /* with OoO packets we can reach here without ingress
+--
+2.51.0
+
--- /dev/null
+From bb855294d9be986f55e069c1689c699a76c4bdb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:23 +0200
+Subject: mptcp: tfo: record 'deny join id0' info
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 92da495cb65719583aa06bc946aeb18a10e1e6e2 ]
+
+When TFO is used, the check to see if the 'C' flag (deny join id0) was
+set was bypassed.
+
+This flag can be set when TFO is used, so the check should also be done
+when TFO is used.
+
+Note that the set_fully_established label is also used when a 4th ACK is
+received. In this case, deny_join_id0 will not be set.
+
+Fixes: dfc8d0603033 ("mptcp: implement delayed seq generation for passive fastopen")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-4-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/options.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 7d4718a57bdcc..479a3bfa87aa2 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -985,13 +985,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ return false;
+ }
+
+- if (mp_opt->deny_join_id0)
+- WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+-
+ if (unlikely(!READ_ONCE(msk->pm.server_side)))
+ pr_warn_once("bogus mpc option on established client sk");
+
+ set_fully_established:
++ if (mp_opt->deny_join_id0)
++ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
++
+ mptcp_data_lock((struct sock *)msk);
+ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
+ mptcp_data_unlock((struct sock *)msk);
+--
+2.51.0
+
--- /dev/null
+From 774e166ea15e2b2075420b2933782e739ceff458 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 15:30:58 +0000
+Subject: net: liquidio: fix overflow in octeon_init_instr_queue()
+
+From: Alexey Nepomnyashih <sdl@nppct.ru>
+
+[ Upstream commit cca7b1cfd7b8a0eff2a3510c5e0f10efe8fa3758 ]
+
+The expression `(conf->instr_type == 64) << iq_no` can overflow because
+`iq_no` may be as high as 64 (`CN23XX_MAX_RINGS_PER_PF`). Casting the
+operand to `u64` ensures correct 64-bit arithmetic.
+
+Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters")
+Signed-off-by: Alexey Nepomnyashih <sdl@nppct.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index de8a6ce86ad7e..12105ffb5dac6 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+--
+2.51.0
+
--- /dev/null
+From cf00a9167b79ba1fc0e06bda875e14067fb1c144 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 15:24:32 +0300
+Subject: net/mlx5e: Harden uplink netdev access against device unbind
+
+From: Jianbo Liu <jianbol@nvidia.com>
+
+[ Upstream commit 6b4be64fd9fec16418f365c2d8e47a7566e9eba5 ]
+
+The function mlx5_uplink_netdev_get() gets the uplink netdevice
+pointer from mdev->mlx5e_res.uplink_netdev. However, the netdevice can
+be removed and its pointer cleared when unbound from the mlx5_core.eth
+driver. This results in a NULL pointer, causing a kernel panic.
+
+ BUG: unable to handle page fault for address: 0000000000001300
+ at RIP: 0010:mlx5e_vport_rep_load+0x22a/0x270 [mlx5_core]
+ Call Trace:
+ <TASK>
+ mlx5_esw_offloads_rep_load+0x68/0xe0 [mlx5_core]
+ esw_offloads_enable+0x593/0x910 [mlx5_core]
+ mlx5_eswitch_enable_locked+0x341/0x420 [mlx5_core]
+ mlx5_devlink_eswitch_mode_set+0x17e/0x3a0 [mlx5_core]
+ devlink_nl_eswitch_set_doit+0x60/0xd0
+ genl_family_rcv_msg_doit+0xe0/0x130
+ genl_rcv_msg+0x183/0x290
+ netlink_rcv_skb+0x4b/0xf0
+ genl_rcv+0x24/0x40
+ netlink_unicast+0x255/0x380
+ netlink_sendmsg+0x1f3/0x420
+ __sock_sendmsg+0x38/0x60
+ __sys_sendto+0x119/0x180
+ do_syscall_64+0x53/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Ensure the pointer is valid before use by checking it for NULL. If it
+is valid, immediately call netdev_hold() to take a reference, and
+preventing the netdevice from being freed while it is in use.
+
+Fixes: 7a9fb35e8c3a ("net/mlx5e: Do not reload ethernet ports when changing eswitch mode")
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/1757939074-617281-2-git-send-email-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en_rep.c | 27 +++++++++++++++----
+ .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 1 +
+ .../ethernet/mellanox/mlx5/core/lib/mlx5.h | 15 ++++++++++-
+ include/linux/mlx5/driver.h | 1 +
+ 4 files changed, 38 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 18ec392d17404..b561358474c4f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1497,12 +1497,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
+ static int
+ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+ {
+- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
++ struct net_device *netdev;
++ struct mlx5e_priv *priv;
++ int err;
++
++ netdev = mlx5_uplink_netdev_get(dev);
++ if (!netdev)
++ return 0;
+
++ priv = netdev_priv(netdev);
+ rpriv->netdev = priv->netdev;
+- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+- rpriv);
++ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
++ rpriv);
++ mlx5_uplink_netdev_put(dev, netdev);
++ return err;
+ }
+
+ static void
+@@ -1629,8 +1638,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+ {
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev = rpriv->netdev;
+- struct mlx5e_priv *priv = netdev_priv(netdev);
+- void *ppriv = priv->ppriv;
++ struct mlx5e_priv *priv;
++ void *ppriv;
++
++ if (!netdev) {
++ ppriv = rpriv;
++ goto free_ppriv;
++ }
++
++ priv = netdev_priv(netdev);
++ ppriv = priv->ppriv;
+
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ mlx5e_vport_uplink_rep_unload(rpriv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 02a3563f51ad2..d8c304427e2ab 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -733,6 +733,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+ speed = lksettings.base.speed;
+
+ out:
++ mlx5_uplink_netdev_put(mdev, slave);
+ return speed;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+index 37d5f445598c7..a7486e6d0d5ef 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+@@ -52,7 +52,20 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+
+ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+ {
+- return mdev->mlx5e_res.uplink_netdev;
++ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
++ struct net_device *netdev;
++
++ mutex_lock(&mlx5e_res->uplink_netdev_lock);
++ netdev = mlx5e_res->uplink_netdev;
++ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
++ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
++ return netdev;
++}
++
++static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
++ struct net_device *netdev)
++{
++ netdev_put(netdev, &mdev->mlx5e_res.tracker);
+ }
+
+ struct mlx5_sd;
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index da9749739abde..9a8eb644f6707 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -689,6 +689,7 @@ struct mlx5e_resources {
+ bool tisn_valid;
+ } hw_objs;
+ struct net_device *uplink_netdev;
++ netdevice_tracker tracker;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
+ };
+--
+2.51.0
+
--- /dev/null
+From a8b0f43e4877c088575ad220fe1c1925d71b124f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:01:36 +0900
+Subject: net: natsemi: fix `rx_dropped` double accounting on `netif_rx()`
+ failure
+
+From: Yeounsu Moon <yyyynoom@gmail.com>
+
+[ Upstream commit 93ab4881a4e2b9657bdce4b8940073bfb4ed5eab ]
+
+`netif_rx()` already increments `rx_dropped` core stat when it fails.
+The driver was also updating `ndev->stats.rx_dropped` in the same path.
+Since both are reported together via `ip -s -s` command, this resulted
+in drops being counted twice in user-visible stats.
+
+Keep the driver update on `if (unlikely(!skb))`, but skip it after
+`netif_rx()` errors.
+
+Fixes: caf586e5f23c ("net: add a core netdev->rx_dropped counter")
+Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250913060135.35282-3-yyyynoom@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/ns83820.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 998586872599b..c692d2e878b2e 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+--
+2.51.0
+
--- /dev/null
+From f829f20e8a6a6fe4ab178be7e99a3e32022b7382 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Sep 2025 20:07:44 -0300
+Subject: net/tcp: Fix a NULL pointer dereference when using TCP-AO with
+ TCP_REPAIR
+
+From: Anderson Nascimento <anderson@allelesecurity.com>
+
+[ Upstream commit 2e7bba08923ebc675b1f0e0e0959e68e53047838 ]
+
+A NULL pointer dereference can occur in tcp_ao_finish_connect() during a
+connect() system call on a socket with a TCP-AO key added and TCP_REPAIR
+enabled.
+
+The function is called with skb being NULL and attempts to dereference it
+on tcp_hdr(skb)->seq without a prior skb validation.
+
+Fix this by checking if skb is NULL before dereferencing it.
+
+The commentary is taken from bpf_skops_established(), which is also called
+in the same flow. Unlike the function being patched,
+bpf_skops_established() validates the skb before dereferencing it.
+
+int main(void){
+ struct sockaddr_in sockaddr;
+ struct tcp_ao_add tcp_ao;
+ int sk;
+ int one = 1;
+
+ memset(&sockaddr,'\0',sizeof(sockaddr));
+ memset(&tcp_ao,'\0',sizeof(tcp_ao));
+
+ sk = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+
+ sockaddr.sin_family = AF_INET;
+
+ memcpy(tcp_ao.alg_name,"cmac(aes128)",12);
+ memcpy(tcp_ao.key,"ABCDEFGHABCDEFGH",16);
+ tcp_ao.keylen = 16;
+
+ memcpy(&tcp_ao.addr,&sockaddr,sizeof(sockaddr));
+
+ setsockopt(sk, IPPROTO_TCP, TCP_AO_ADD_KEY, &tcp_ao,
+ sizeof(tcp_ao));
+ setsockopt(sk, IPPROTO_TCP, TCP_REPAIR, &one, sizeof(one));
+
+ sockaddr.sin_family = AF_INET;
+ sockaddr.sin_port = htobe16(123);
+
+ inet_aton("127.0.0.1", &sockaddr.sin_addr);
+
+ connect(sk,(struct sockaddr *)&sockaddr,sizeof(sockaddr));
+
+return 0;
+}
+
+$ gcc tcp-ao-nullptr.c -o tcp-ao-nullptr -Wall
+$ unshare -Urn
+
+BUG: kernel NULL pointer dereference, address: 00000000000000b6
+PGD 1f648d067 P4D 1f648d067 PUD 1982e8067 PMD 0
+Oops: Oops: 0000 [#1] SMP NOPTI
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop
+Reference Platform, BIOS 6.00 11/12/2020
+RIP: 0010:tcp_ao_finish_connect (net/ipv4/tcp_ao.c:1182)
+
+Fixes: 7c2ffaf21bd6 ("net/tcp: Calculate TCP-AO traffic keys")
+Signed-off-by: Anderson Nascimento <anderson@allelesecurity.com>
+Reviewed-by: Dmitry Safonov <0x7f454c46@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250911230743.2551-3-anderson@allelesecurity.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_ao.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
+index bbb8d5f0eae7d..3338b6cc85c48 100644
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -1178,7 +1178,9 @@ void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
+ if (!ao)
+ return;
+
+- WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
++ /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
++ if (skb)
++ WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
+ ao->rcv_sne = 0;
+
+ hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
+--
+2.51.0
+
--- /dev/null
+From e80b6c133925e67c75e0ad1a2cd8fa7193574265 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 15:32:49 +0200
+Subject: nvme: fix PI insert on write
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 7ac3c2889bc060c3f67cf44df0dbb093a835c176 ]
+
+I recently ran into an issue where the PI generated using the block layer
+integrity code differs from that from a kernel using the PRACT fallback
+when the block layer integrity code is disabled, and I tracked this down
+to us using PRACT incorrectly.
+
+The NVM Command Set Specification (section 5.33 in 1.2, similar in older
+versions) specifies the PRACT insert behavior as:
+
+ Inserted protection information consists of the computed CRC for the
+ protection information format (refer to section 5.3.1) in the Guard
+ field, the LBAT field value in the Application Tag field, the LBST
+ field value in the Storage Tag field, if defined, and the computed
+ reference tag in the Logical Block Reference Tag.
+
+Where the computed reference tag is defined as following for type 1 and
+type 2 using the text below that is duplicated in the respective bullet
+points:
+
+ the value of the computed reference tag for the first logical block of
+ the command is the value contained in the Initial Logical Block
+ Reference Tag (ILBRT) or Expected Initial Logical Block Reference Tag
+ (EILBRT) field in the command, and the computed reference tag is
+ incremented for each subsequent logical block.
+
+So we need to set ILBRT field, but we currently don't. Interestingly
+this works fine on my older type 1 formatted SSD, but Qemu trips up on
+this. We already set ILBRT for Write Same since commit aeb7bb061be5
+("nvme: set the PRACT bit when using Write Zeroes with T10 PI").
+
+To ease this, move the PI type check into nvme_set_ref_tag.
+
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 9e223574db7f7..24d82d35041b5 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -890,6 +890,15 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
+ u32 upper, lower;
+ u64 ref48;
+
++ /* only type1 and type 2 PI formats have a reftag */
++ switch (ns->head->pi_type) {
++ case NVME_NS_DPS_PI_TYPE1:
++ case NVME_NS_DPS_PI_TYPE2:
++ break;
++ default:
++ return;
++ }
++
+ /* both rw and write zeroes share the same reftag format */
+ switch (ns->head->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+@@ -929,13 +938,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
+
+ if (nvme_ns_has_pi(ns->head)) {
+ cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
+-
+- switch (ns->head->pi_type) {
+- case NVME_NS_DPS_PI_TYPE1:
+- case NVME_NS_DPS_PI_TYPE2:
+- nvme_set_ref_tag(ns, cmnd, req);
+- break;
+- }
++ nvme_set_ref_tag(ns, cmnd, req);
+ }
+
+ return BLK_STS_OK;
+@@ -1014,6 +1017,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
+ if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
+ return BLK_STS_NOTSUPP;
+ control |= NVME_RW_PRINFO_PRACT;
++ nvme_set_ref_tag(ns, cmnd, req);
+ }
+
+ switch (ns->head->pi_type) {
+--
+2.51.0
+
--- /dev/null
+From 82a82949027011bbc7300f6b8fb63c81a37292f8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 06:32:07 -0700
+Subject: octeon_ep: fix VF MAC address lifecycle handling
+
+From: Sathesh B Edara <sedara@marvell.com>
+
+[ Upstream commit a72175c985132885573593222a7b088cf49b07ae ]
+
+Currently, VF MAC address info is not updated when the MAC address is
+configured from VF, and it is not cleared when the VF is removed. This
+leads to stale or missing MAC information in the PF, which may cause
+incorrect state tracking or inconsistencies when VFs are hot-plugged
+or reassigned.
+
+Fix this by:
+ - storing the VF MAC address in the PF when it is set from VF
+ - clearing the stored VF MAC address when the VF is removed
+
+This ensures that the PF always has correct VF MAC state.
+
+Fixes: cde29af9e68e ("octeon_ep: add PF-VF mailbox communication")
+Signed-off-by: Sathesh B Edara <sedara@marvell.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250916133207.21737-1-sedara@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+index e6eb98d70f3c4..6334b68f28d79 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+@@ -177,6 +177,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
+ dev_err(&oct->pdev->dev, "Get VF MAC address failed via host control Mbox\n");
+ return;
+ }
++ ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ }
+
+@@ -186,6 +187,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
+ {
+ int err;
+
++ /* Reset VF-specific information maintained by the PF */
++ memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
+ err = octep_ctrl_net_dev_remove(oct, vf_id);
+ if (err) {
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+--
+2.51.0
+
--- /dev/null
+From 58015432fe36cba7c5e650e8e672da75c02792e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 14:38:53 +0800
+Subject: octeontx2-pf: Fix use-after-free bugs in otx2_sync_tstamp()
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit f8b4687151021db61841af983f1cb7be6915d4ef ]
+
+The original code relies on cancel_delayed_work() in otx2_ptp_destroy(),
+which does not ensure that the delayed work item synctstamp_work has fully
+completed if it was already running. This leads to use-after-free scenarios
+where otx2_ptp is deallocated by otx2_ptp_destroy(), while synctstamp_work
+remains active and attempts to dereference otx2_ptp in otx2_sync_tstamp().
+Furthermore, the synctstamp_work is cyclic, the likelihood of triggering
+the bug is nonnegligible.
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+otx2_remove() |
+ otx2_ptp_destroy() | otx2_sync_tstamp()
+ cancel_delayed_work() |
+ kfree(ptp) |
+ | ptp = container_of(...); //UAF
+ | ptp-> //UAF
+
+This is confirmed by a KASAN report:
+
+BUG: KASAN: slab-use-after-free in __run_timer_base.part.0+0x7d7/0x8c0
+Write of size 8 at addr ffff88800aa09a18 by task bash/136
+...
+Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x55/0x70
+ print_report+0xcf/0x610
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ kasan_report+0xb8/0xf0
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ __run_timer_base.part.0+0x7d7/0x8c0
+ ? __pfx___run_timer_base.part.0+0x10/0x10
+ ? __pfx_read_tsc+0x10/0x10
+ ? ktime_get+0x60/0x140
+ ? lapic_next_event+0x11/0x20
+ ? clockevents_program_event+0x1d4/0x2a0
+ run_timer_softirq+0xd1/0x190
+ handle_softirqs+0x16a/0x550
+ irq_exit_rcu+0xaf/0xe0
+ sysvec_apic_timer_interrupt+0x70/0x80
+ </IRQ>
+...
+Allocated by task 1:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ __kasan_kmalloc+0x7f/0x90
+ otx2_ptp_init+0xb1/0x860
+ otx2_probe+0x4eb/0xc30
+ local_pci_probe+0xdc/0x190
+ pci_device_probe+0x2fe/0x470
+ really_probe+0x1ca/0x5c0
+ __driver_probe_device+0x248/0x310
+ driver_probe_device+0x44/0x120
+ __driver_attach+0xd2/0x310
+ bus_for_each_dev+0xed/0x170
+ bus_add_driver+0x208/0x500
+ driver_register+0x132/0x460
+ do_one_initcall+0x89/0x300
+ kernel_init_freeable+0x40d/0x720
+ kernel_init+0x1a/0x150
+ ret_from_fork+0x10c/0x1a0
+ ret_from_fork_asm+0x1a/0x30
+
+Freed by task 136:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ kasan_save_free_info+0x3a/0x60
+ __kasan_slab_free+0x3f/0x50
+ kfree+0x137/0x370
+ otx2_ptp_destroy+0x38/0x80
+ otx2_remove+0x10d/0x4c0
+ pci_device_remove+0xa6/0x1d0
+ device_release_driver_internal+0xf8/0x210
+ pci_stop_bus_device+0x105/0x150
+ pci_stop_and_remove_bus_device_locked+0x15/0x30
+ remove_store+0xcc/0xe0
+ kernfs_fop_write_iter+0x2c3/0x440
+ vfs_write+0x871/0xd70
+ ksys_write+0xee/0x1c0
+ do_syscall_64+0xac/0x280
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+...
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the delayed work item is properly canceled before the otx2_ptp is
+deallocated.
+
+This bug was initially identified through static analysis. To reproduce
+and test it, I simulated the OcteonTX2 PCI device in QEMU and introduced
+artificial delays within the otx2_sync_tstamp() function to increase the
+likelihood of triggering the bug.
+
+Fixes: 2958d17a8984 ("octeontx2-pf: Add support for ptp 1-step mode on CN10K silicon")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+index 63130ba37e9df..69b435ed8fbbe 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
+ if (!ptp)
+ return;
+
+- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
++ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+--
+2.51.0
+
--- /dev/null
+From 48c0c903f896fb7af5ad032e839de0b5b239e592 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:50:14 +0200
+Subject: pcmcia: omap_cf: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit d1dfcdd30140c031ae091868fb5bed084132bca1 ]
+
+As described in the added code comment, a reference to .exit.text is ok
+for drivers registered via platform_driver_probe(). Make this explicit
+to prevent the following section mismatch warning
+
+ WARNING: modpost: drivers/pcmcia/omap_cf: section mismatch in reference: omap_cf_driver+0x4 (section: .data) -> omap_cf_remove (section: .exit.text)
+
+that triggers on an omap1_defconfig + CONFIG_OMAP_CF=m build.
+
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/omap_cf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index 5b639c942f17a..a2f50db2b8bab 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -304,7 +304,13 @@ static void __exit omap_cf_remove(struct platform_device *pdev)
+ kfree(cf);
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = driver_name,
+ },
+--
+2.51.0
+
--- /dev/null
+From 38857443e2dd3807d79760cfe37117d5131663d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 16:29:16 +1000
+Subject: qed: Don't collect too many protection override GRC elements
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit 56c0a2a9ddc2f5b5078c5fb0f81ab76bbc3d4c37 ]
+
+In the protection override dump path, the firmware can return far too
+many GRC elements, resulting in attempting to write past the end of the
+previously-kmalloc'ed dump buffer.
+
+This will result in a kernel panic with reason:
+
+ BUG: unable to handle kernel paging request at ADDRESS
+
+where "ADDRESS" is just past the end of the protection override dump
+buffer. The start address of the buffer is:
+ p_hwfn->cdev->dbg_features[DBG_FEATURE_PROTECTION_OVERRIDE].dump_buf
+and the size of the buffer is buf_size in the same data structure.
+
+The panic can be arrived at from either the qede Ethernet driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc02662ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc0267792 [qed]
+ qed_dbg_feature at ffffffffc026aa8f [qed]
+ qed_dbg_all_data at ffffffffc026b211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc027298a [qed]
+ devlink_health_do_dump at ffffffff82497f61
+ devlink_health_report at ffffffff8249cf29
+ qed_report_fatal_error at ffffffffc0272baf [qed]
+ qede_sp_task at ffffffffc045ed32 [qede]
+ process_one_work at ffffffff81d19783
+
+or the qedf storage driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc068b2ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc068c792 [qed]
+ qed_dbg_feature at ffffffffc068fa8f [qed]
+ qed_dbg_all_data at ffffffffc0690211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc069798a [qed]
+ devlink_health_do_dump at ffffffff8aa95e51
+ devlink_health_report at ffffffff8aa9ae19
+ qed_report_fatal_error at ffffffffc0697baf [qed]
+ qed_hw_err_notify at ffffffffc06d32d7 [qed]
+ qed_spq_post at ffffffffc06b1011 [qed]
+ qed_fcoe_destroy_conn at ffffffffc06b2e91 [qed]
+ qedf_cleanup_fcport at ffffffffc05e7597 [qedf]
+ qedf_rport_event_handler at ffffffffc05e7bf7 [qedf]
+ fc_rport_work at ffffffffc02da715 [libfc]
+ process_one_work at ffffffff8a319663
+
+Resolve this by clamping the firmware's return value to the maximum
+number of legal elements the firmware should return.
+
+Fixes: d52c89f120de8 ("qed*: Utilize FW 8.37.2.0")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Link: https://patch.msgid.link/f8e1182934aa274c18d0682a12dbaf347595469c.1757485536.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_debug.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index f67be4b8ad435..523cbd91baf49 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ goto out;
+ }
+
+- /* Add override window info to buffer */
++ /* Add override window info to buffer, preventing buffer overflow */
+ override_window_dwords =
+- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++ PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+--
+2.51.0
+
--- /dev/null
+From f572355bcd817072f1ca6ddeb8483d7f803a0813 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 16:48:54 +0300
+Subject: Revert "net/mlx5e: Update and set Xon/Xoff upon port speed set"
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit 3fbfe251cc9f6d391944282cdb9bcf0bd02e01f8 ]
+
+This reverts commit d24341740fe48add8a227a753e68b6eedf4b385a.
+It causes errors when trying to configure QoS, as well as
+loss of L2 connectivity (on multi-host devices).
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20250910170011.70528106@kernel.org
+Fixes: d24341740fe4 ("net/mlx5e: Update and set Xon/Xoff upon port speed set")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 6176457b846bc..de2327ffb0f78 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -135,8 +135,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (up) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+--
+2.51.0
+
--- /dev/null
+From 762f1158cf3804b6033a4a0ea121febc3751256d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:24 +0200
+Subject: selftests: mptcp: sockopt: fix error messages
+
+From: Geliang Tang <tanggeliang@kylinos.cn>
+
+[ Upstream commit b86418beade11d45540a2d20c4ec1128849b6c27 ]
+
+This patch fixes several issues in the error reporting of the MPTCP sockopt
+selftest:
+
+1. Fix diff not printed: The error messages for counter mismatches had
+ the actual difference ('diff') as argument, but it was missing in the
+ format string. Displaying it makes the debugging easier.
+
+2. Fix variable usage: The error check for 'mptcpi_bytes_acked' incorrectly
+ used 'ret2' (sent bytes) for both the expected value and the difference
+ calculation. It now correctly uses 'ret' (received bytes), which is the
+ expected value for bytes_acked.
+
+3. Fix off-by-one in diff: The calculation for the 'mptcpi_rcv_delta' diff
+ was 's.mptcpi_rcv_delta - ret', which is off-by-one. It has been
+ corrected to 's.mptcpi_rcv_delta - (ret + 1)' to match the expected
+ value in the condition above it.
+
+Fixes: 5dcff89e1455 ("selftests: mptcp: explicitly tests aggregate counters")
+Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-5-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../testing/selftests/net/mptcp/mptcp_sockopt.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+index 926b0be87c990..1dc2bd6ee4a50 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+@@ -658,22 +658,26 @@ static void process_one_client(int fd, int pipefd)
+
+ do_getsockopts(&s, fd, ret, ret2);
+ if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
+- xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
++ xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64,
++ s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1));
+
+ /* be nice when running on top of older kernel */
+ if (s.pkt_stats_avail) {
+ if (s.last_sample.mptcpi_bytes_sent != ret2)
+- xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
++ xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_sent, ret2,
+ s.last_sample.mptcpi_bytes_sent - ret2);
+ if (s.last_sample.mptcpi_bytes_received != ret)
+- xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
++ xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_received, ret,
+ s.last_sample.mptcpi_bytes_received - ret);
+ if (s.last_sample.mptcpi_bytes_acked != ret)
+- xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
+- s.last_sample.mptcpi_bytes_acked, ret2,
+- s.last_sample.mptcpi_bytes_acked - ret2);
++ xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
++ s.last_sample.mptcpi_bytes_acked, ret,
++ s.last_sample.mptcpi_bytes_acked - ret);
+ }
+
+ close(fd);
+--
+2.51.0
+
--- /dev/null
+From 5ce3378e212531dd84e788ff1ed2514a5a0663e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:22 +0200
+Subject: selftests: mptcp: userspace pm: validate deny-join-id0 flag
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 24733e193a0d68f20d220e86da0362460c9aa812 ]
+
+The previous commit adds the MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 flag. Make
+sure it is correctly announced by the other peer when it has been
+received.
+
+pm_nl_ctl will now display 'deny_join_id0:1' when monitoring the events,
+and when this flag was set by the other peer.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: 702c2f646d42 ("mptcp: netlink: allow userspace-driven subflow establishment")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-3-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/mptcp/pm_nl_ctl.c | 7 +++++++
+ tools/testing/selftests/net/mptcp/userspace_pm.sh | 14 +++++++++++---
+ 2 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index 994a556f46c15..93fea3442216c 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -188,6 +188,13 @@ static int capture_events(int fd, int event_group)
+ fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
+ fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
++ else if (attrs->rta_type == MPTCP_ATTR_FLAGS) {
++ __u16 flags = *(__u16 *)RTA_DATA(attrs);
++
++ /* only print when present, easier */
++ if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0)
++ fprintf(stderr, ",deny_join_id0:1");
++ }
+
+ attrs = RTA_NEXT(attrs, msg_len);
+ }
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 3651f73451cf8..cc682bf675b2b 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -173,6 +173,9 @@ make_connection()
+ is_v6="v4"
+ fi
+
++ # set this on the client side only: will not affect the rest
++ ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0
++
+ :>"$client_evts"
+ :>"$server_evts"
+
+@@ -195,23 +198,28 @@ make_connection()
+ local client_token
+ local client_port
+ local client_serverside
++ local client_nojoin
+ local server_token
+ local server_serverside
++ local server_nojoin
+
+ client_token=$(mptcp_lib_evts_get_info token "$client_evts")
+ client_port=$(mptcp_lib_evts_get_info sport "$client_evts")
+ client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts")
++ client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts")
+ server_token=$(mptcp_lib_evts_get_info token "$server_evts")
+ server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts")
++ server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts")
+
+ print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1"
+- if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
+- [ "$server_serverside" = 1 ]
++ if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] &&
++ [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] &&
++ [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ]
+ then
+ test_pass
+ print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
+ else
+- test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
++ test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})"
+ mptcp_lib_result_print_all_tap
+ exit ${KSFT_FAIL}
+ fi
+--
+2.51.0
+
--- /dev/null
+wifi-wilc1000-avoid-buffer-overflow-in-wid-string-co.patch
+nvme-fix-pi-insert-on-write.patch
+alsa-firewire-motu-drop-epollout-from-poll-return-va.patch
+wifi-mac80211-increase-scan_ies_len-for-s1g.patch
+wifi-mac80211-fix-incorrect-type-for-ret.patch
+pcmcia-omap_cf-mark-driver-struct-with-__refdata-to-.patch
+cgroup-split-cgroup_destroy_wq-into-3-workqueues.patch
+btrfs-fix-invalid-extref-key-setup-when-replaying-de.patch
+um-virtio_uml-fix-use-after-free-after-put_device-in.patch
+um-fix-fd-copy-size-in-os_rcv_fd_msg.patch
+dpaa2-switch-fix-buffer-pool-seeding-for-control-tra.patch
+net-tcp-fix-a-null-pointer-dereference-when-using-tc.patch
+qed-don-t-collect-too-many-protection-override-grc-e.patch
+bonding-set-random-address-only-when-slaves-already-.patch
+mptcp-set-remote_deny_join_id0-on-syn-recv.patch
+selftests-mptcp-userspace-pm-validate-deny-join-id0-.patch
+mptcp-tfo-record-deny-join-id0-info.patch
+selftests-mptcp-sockopt-fix-error-messages.patch
+net-natsemi-fix-rx_dropped-double-accounting-on-neti.patch
+ice-store-max_frame-and-rx_buf_len-only-in-ice_rx_ri.patch
+ice-fix-rx-page-leak-on-multi-buffer-frames.patch
+i40e-remove-redundant-memory-barrier-when-cleaning-t.patch
+igc-don-t-fail-igc_probe-on-led-setup-error.patch
+net-mlx5e-harden-uplink-netdev-access-against-device.patch
+bonding-don-t-set-oif-to-bond-dev-when-getting-ns-ta.patch
+octeon_ep-fix-vf-mac-address-lifecycle-handling.patch
+tcp-clear-tcp_sk-sk-fastopen_rsk-in-tcp_disconnect.patch
+tls-make-sure-to-abort-the-stream-if-headers-are-bog.patch
+revert-net-mlx5e-update-and-set-xon-xoff-upon-port-s.patch
+net-liquidio-fix-overflow-in-octeon_init_instr_queue.patch
+cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
+octeontx2-pf-fix-use-after-free-bugs-in-otx2_sync_ts.patch
--- /dev/null
+From 7c45dab0e4a5f5b75f7939937c451f42da7bd913 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 17:56:46 +0000
+Subject: tcp: Clear tcp_sk(sk)->fastopen_rsk in tcp_disconnect().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 45c8a6cc2bcd780e634a6ba8e46bffbdf1fc5c01 ]
+
+syzbot reported the splat below where a socket had tcp_sk(sk)->fastopen_rsk
+in the TCP_ESTABLISHED state. [0]
+
+syzbot reused the server-side TCP Fast Open socket as a new client before
+the TFO socket completes 3WHS:
+
+ 1. accept()
+ 2. connect(AF_UNSPEC)
+ 3. connect() to another destination
+
+As of accept(), sk->sk_state is TCP_SYN_RECV, and tcp_disconnect() changes
+it to TCP_CLOSE and makes connect() possible, which restarts timers.
+
+Since tcp_disconnect() forgot to clear tcp_sk(sk)->fastopen_rsk, the
+retransmit timer triggered the warning and the intended packet was not
+retransmitted.
+
+Let's call reqsk_fastopen_remove() in tcp_disconnect().
+
+[0]:
+WARNING: CPU: 2 PID: 0 at net/ipv4/tcp_timer.c:542 tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Modules linked in:
+CPU: 2 UID: 0 PID: 0 Comm: swapper/2 Not tainted 6.17.0-rc5-g201825fb4278 #62 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Code: 41 55 41 54 55 53 48 8b af b8 08 00 00 48 89 fb 48 85 ed 0f 84 55 01 00 00 0f b6 47 12 3c 03 74 0c 0f b6 47 12 3c 04 74 04 90 <0f> 0b 90 48 8b 85 c0 00 00 00 48 89 ef 48 8b 40 30 e8 6a 4f 06 3e
+RSP: 0018:ffffc900002f8d40 EFLAGS: 00010293
+RAX: 0000000000000002 RBX: ffff888106911400 RCX: 0000000000000017
+RDX: 0000000002517619 RSI: ffffffff83764080 RDI: ffff888106911400
+RBP: ffff888106d5c000 R08: 0000000000000001 R09: ffffc900002f8de8
+R10: 00000000000000c2 R11: ffffc900002f8ff8 R12: ffff888106911540
+R13: ffff888106911480 R14: ffff888106911840 R15: ffffc900002f8de0
+FS: 0000000000000000(0000) GS:ffff88907b768000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8044d69d90 CR3: 0000000002c30003 CR4: 0000000000370ef0
+Call Trace:
+ <IRQ>
+ tcp_write_timer (net/ipv4/tcp_timer.c:738)
+ call_timer_fn (kernel/time/timer.c:1747)
+ __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
+ timer_expire_remote (kernel/time/timer.c:2385 kernel/time/timer.c:2376 kernel/time/timer.c:2135)
+ tmigr_handle_remote_up (kernel/time/timer_migration.c:944 kernel/time/timer_migration.c:1035)
+ __walk_groups.isra.0 (kernel/time/timer_migration.c:533 (discriminator 1))
+ tmigr_handle_remote (kernel/time/timer_migration.c:1096)
+ handle_softirqs (./arch/x86/include/asm/jump_label.h:36 ./include/trace/events/irq.h:142 kernel/softirq.c:580)
+ irq_exit_rcu (kernel/softirq.c:614 kernel/softirq.c:453 kernel/softirq.c:680 kernel/softirq.c:696)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 (discriminator 35) arch/x86/kernel/apic/apic.c:1050 (discriminator 35))
+ </IRQ>
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250915175800.118793-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 156da81bce068..988992ff898b3 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3286,6 +3286,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ if (old_state != TCP_CLOSE)
+@@ -3400,6 +3401,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet_clear_bit(DEFER_CONNECT, sk);
+ tp->fastopen_client_fail = 0;
+--
+2.51.0
+
--- /dev/null
+From 322f730e1f4112d39702b23affba4f2e3dd65c2b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 17:28:13 -0700
+Subject: tls: make sure to abort the stream if headers are bogus
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0aeb54ac4cd5cf8f60131b4d9ec0b6dc9c27b20d ]
+
+Normally we wait for the socket to buffer up the whole record
+before we service it. If the socket has a tiny buffer, however,
+we read out the data sooner, to prevent connection stalls.
+Make sure that we abort the connection when we find out late
+that the record is actually invalid. Retrying the parsing is
+fine in itself but since we copy some more data each time
+before we parse we can overflow the allocated skb space.
+
+Constructing a scenario in which we're under pressure without
+enough data in the socket to parse the length upfront is quite
+hard. syzbot figured out a way to do this by serving us the header
+in small OOB sends, and then filling in the recvbuf with a large
+normal send.
+
+Make sure that tls_rx_msg_size() aborts strp, if we reach
+an invalid record there's really no way to recover.
+
+Reported-by: Lee Jones <lee@kernel.org>
+Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser")
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://patch.msgid.link/20250917002814.1743558-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls.h | 1 +
+ net/tls/tls_strp.c | 14 +++++++++-----
+ net/tls/tls_sw.c | 3 +--
+ 3 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index e1eaf12b37426..fca0c0e170047 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -141,6 +141,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+
+ int wait_on_pending_writer(struct sock *sk, long *timeo);
+ void tls_err_abort(struct sock *sk, int err);
++void tls_strp_abort_strp(struct tls_strparser *strp, int err);
+
+ int init_prot_info(struct tls_prot_info *prot,
+ const struct tls_crypto_info *crypto_info,
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index d71643b494a1a..98e12f0ff57e5 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -13,7 +13,7 @@
+
+ static struct workqueue_struct *tls_strp_wq;
+
+-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
++void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+ {
+ if (strp->stopped)
+ return;
+@@ -211,11 +211,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+ struct sk_buff *in_skb, unsigned int offset,
+ size_t in_len)
+ {
++ unsigned int nfrag = skb->len / PAGE_SIZE;
+ size_t len, chunk;
+ skb_frag_t *frag;
+ int sz;
+
+- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
++ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
++ return -EMSGSIZE;
++ }
++
++ frag = &skb_shinfo(skb)->frags[nfrag];
+
+ len = in_len;
+ /* First make sure we got the header */
+@@ -520,10 +526,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ tls_strp_load_anchor_with_queue(strp, inq);
+ if (!strp->stm.full_len) {
+ sz = tls_rx_msg_size(strp, strp->anchor);
+- if (sz < 0) {
+- tls_strp_abort_strp(strp, sz);
++ if (sz < 0)
+ return sz;
+- }
+
+ strp->stm.full_len = sz;
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index ee92ce3255f93..f46550b96061e 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2440,8 +2440,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
+ return data_len + TLS_HEADER_SIZE;
+
+ read_failure:
+- tls_err_abort(strp->sk, ret);
+-
++ tls_strp_abort_strp(strp, ret);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 8eead82becf7e25b1e320ee39d08046f6412d259 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Sep 2025 08:27:15 +0800
+Subject: um: Fix FD copy size in os_rcv_fd_msg()
+
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+
+[ Upstream commit df447a3b4a4b961c9979b4b3ffb74317394b9b40 ]
+
+When copying FDs, the copy size should not include the control
+message header (cmsghdr). Fix it.
+
+Fixes: 5cde6096a4dd ("um: generalize os_rcv_fd")
+Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/os-Linux/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
+index f1d03cf3957fe..62c176a2c1ac4 100644
+--- a/arch/um/os-Linux/file.c
++++ b/arch/um/os-Linux/file.c
+@@ -556,7 +556,7 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
+ cmsg->cmsg_type != SCM_RIGHTS)
+ return n;
+
+- memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len);
++ memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0));
+ return n;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From ab8389f71a24ddb4d63b899c0fb0fd0d7c0c9624 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 15:00:51 +0800
+Subject: um: virtio_uml: Fix use-after-free after put_device in probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0 ]
+
+When register_virtio_device() fails in virtio_uml_probe(),
+the code sets vu_dev->registered = 1 even though
+the device was not successfully registered.
+This can lead to use-after-free or other issues.
+
+Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/virtio_uml.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index 2b6e701776b6b..e2cba5117fd25 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1231,10 +1231,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+
+ rc = register_virtio_device(&vu_dev->vdev);
+- if (rc)
++ if (rc) {
+ put_device(&vu_dev->vdev.dev);
++ return rc;
++ }
+ vu_dev->registered = 1;
+- return rc;
++ return 0;
+
+ error_init:
+ os_close_file(vu_dev->sock);
+--
+2.51.0
+
--- /dev/null
+From 85f562c264f2ecef4b0bb7bdedd6031717e0d136 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 10:29:11 +0800
+Subject: wifi: mac80211: fix incorrect type for ret
+
+From: Liao Yuanhong <liaoyuanhong@vivo.com>
+
+[ Upstream commit a33b375ab5b3a9897a0ab76be8258d9f6b748628 ]
+
+The variable ret is declared as a u32 type, but it is assigned a value
+of -EOPNOTSUPP. Since unsigned types cannot correctly represent negative
+values, the type of ret should be changed to int.
+
+Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
+Link: https://patch.msgid.link/20250825022911.139377-1-liaoyuanhong@vivo.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/driver-ops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index d1c10f5f95160..69de9fdd779f2 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1388,7 +1388,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+ lockdep_assert_wiphy(local->hw.wiphy);
+--
+2.51.0
+
--- /dev/null
+From 3829de569726fc1c624e36a0e688700e6560c9f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Aug 2025 18:54:37 +1000
+Subject: wifi: mac80211: increase scan_ies_len for S1G
+
+From: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+
+[ Upstream commit 7e2f3213e85eba00acb4cfe6d71647892d63c3a1 ]
+
+Currently the S1G capability element is not taken into account
+for the scan_ies_len, which leads to a buffer length validation
+failure in ieee80211_prep_hw_scan() and subsequent WARN in
+__ieee80211_start_scan(). This prevents hw scanning from functioning.
+To fix ensure we accommodate for the S1G capability length.
+
+Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+Link: https://patch.msgid.link/20250826085437.3493-1-lachlan.hodges@morsemicro.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index caedc939eea19..c745de0aae776 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1120,7 +1120,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ int result, i;
+ enum nl80211_band band;
+ int channels, max_bitrates;
+- bool supp_ht, supp_vht, supp_he, supp_eht;
++ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
+ struct cfg80211_chan_def dflt_chandef = {};
+
+ if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
+@@ -1236,6 +1236,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ supp_vht = false;
+ supp_he = false;
+ supp_eht = false;
++ supp_s1g = false;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ const struct ieee80211_sband_iftype_data *iftd;
+ struct ieee80211_supported_band *sband;
+@@ -1283,6 +1284,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ max_bitrates = sband->n_bitrates;
+ supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
++ supp_s1g = supp_s1g || sband->s1g_cap.s1g;
+
+ for_each_sband_iftype_data(sband, i, iftd) {
+ u8 he_40_mhz_cap;
+@@ -1411,6 +1413,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_cap);
+
++ if (supp_s1g)
++ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
++
+ /*
+ * HE cap element is variable in size - set len to allow max size */
+ if (supp_he) {
+--
+2.51.0
+
--- /dev/null
+From a60a47c24337ee0c1cb4ba0e7d382f25fcabd359 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Aug 2025 22:58:43 +0000
+Subject: wifi: wilc1000: avoid buffer overflow in WID string configuration
+
+From: Ajay.Kathat@microchip.com <Ajay.Kathat@microchip.com>
+
+[ Upstream commit fe9e4d0c39311d0f97b024147a0d155333f388b5 ]
+
+Fix the following copy overflow warning identified by Smatch checker.
+
+ drivers/net/wireless/microchip/wilc1000/wlan_cfg.c:184 wilc_wlan_parse_response_frame()
+ error: '__memcpy()' 'cfg->s[i]->str' copy overflow (512 vs 65537)
+
+This patch introduces size check before accessing the memory buffer.
+The checks are base on the WID type of received data from the firmware.
+For WID string configuration, the size limit is determined by individual
+element size in 'struct wilc_cfg_str_vals' that is maintained in 'len' field
+of 'struct wilc_cfg_str'.
+
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/linux-wireless/aLFbr9Yu9j_TQTey@stanley.mountain
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Ajay Singh <ajay.kathat@microchip.com>
+Link: https://patch.msgid.link/20250829225829.5423-1-ajay.kathat@microchip.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../wireless/microchip/wilc1000/wlan_cfg.c | 37 ++++++++++++++-----
+ .../wireless/microchip/wilc1000/wlan_cfg.h | 5 ++-
+ 2 files changed, 30 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+index 131388886acbf..cfabd5aebb540 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = {
+ };
+
+ static const struct wilc_cfg_str g_cfg_str[] = {
+- {WID_FIRMWARE_VERSION, NULL},
+- {WID_MAC_ADDR, NULL},
+- {WID_ASSOC_RES_INFO, NULL},
+- {WID_NIL, NULL}
++ {WID_FIRMWARE_VERSION, 0, NULL},
++ {WID_MAC_ADDR, 0, NULL},
++ {WID_ASSOC_RES_INFO, 0, NULL},
++ {WID_NIL, 0, NULL}
+ };
+
+ #define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
+@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
+
+ switch (FIELD_GET(WILC_WID_TYPE, wid)) {
+ case WID_CHAR:
++ len = 3;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
+ i++;
+
+ if (cfg->b[i].id == wid)
+ cfg->b[i].val = info[4];
+
+- len = 3;
+ break;
+
+ case WID_SHORT:
++ len = 4;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
+ i++;
+
+ if (cfg->hw[i].id == wid)
+ cfg->hw[i].val = get_unaligned_le16(&info[4]);
+
+- len = 4;
+ break;
+
+ case WID_INT:
++ len = 6;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
+ i++;
+
+ if (cfg->w[i].id == wid)
+ cfg->w[i].val = get_unaligned_le32(&info[4]);
+
+- len = 6;
+ break;
+
+ case WID_STR:
++ len = 2 + get_unaligned_le16(&info[2]);
++
+ while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
+ i++;
+
+- if (cfg->s[i].id == wid)
++ if (cfg->s[i].id == wid) {
++ if (len > cfg->s[i].len || (len + 2 > size))
++ return;
++
+ memcpy(cfg->s[i].str, &info[2],
+- get_unaligned_le16(&info[2]) + 2);
++ len);
++ }
+
+- len = 2 + get_unaligned_le16(&info[2]);
+ break;
+
+ default:
+@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl)
+ /* store the string cfg parameters */
+ wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
+ wl->cfg.s[i].str = str_vals->firmware_version;
++ wl->cfg.s[i].len = sizeof(str_vals->firmware_version);
+ i++;
+ wl->cfg.s[i].id = WID_MAC_ADDR;
+ wl->cfg.s[i].str = str_vals->mac_address;
++ wl->cfg.s[i].len = sizeof(str_vals->mac_address);
+ i++;
+ wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
+ wl->cfg.s[i].str = str_vals->assoc_rsp;
++ wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp);
+ i++;
+ wl->cfg.s[i].id = WID_NIL;
+ wl->cfg.s[i].str = NULL;
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+index 7038b74f8e8ff..5ae74bced7d74 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+@@ -24,12 +24,13 @@ struct wilc_cfg_word {
+
+ struct wilc_cfg_str {
+ u16 id;
++ u16 len;
+ u8 *str;
+ };
+
+ struct wilc_cfg_str_vals {
+- u8 mac_address[7];
+- u8 firmware_version[129];
++ u8 mac_address[8];
++ u8 firmware_version[130];
+ u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
+ };
+
+--
+2.51.0
+
--- /dev/null
+From c4d95feb9fe64d8801ec4aa3a61fec618d05e02c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 08:37:49 +0900
+Subject: ALSA: firewire-motu: drop EPOLLOUT from poll return values as write
+ is not supported
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit aea3493246c474bc917d124d6fb627663ab6bef0 ]
+
+The ALSA HwDep character device of the firewire-motu driver incorrectly
+returns EPOLLOUT in poll(2), even though the driver implements no operation
+for write(2). This misleads userspace applications to believe write() is
+allowed, potentially resulting in unnecessarily wakeups.
+
+This issue dates back to the driver's initial code added by a commit
+71c3797779d3 ("ALSA: firewire-motu: add hwdep interface"), and persisted
+when POLLOUT was updated to EPOLLOUT by a commit a9a08845e9ac ('vfs: do
+bulk POLL* -> EPOLL* replacement("").').
+
+This commit fixes the bug.
+
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://patch.msgid.link/20250829233749.366222-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/motu/motu-hwdep.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 88d1f4b56e4be..a220ac0c8eb83 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+--
+2.51.0
+
--- /dev/null
+From 1a8d25a84d7dfb35640a6c1f803693c5b33cc832 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 08:01:26 +0000
+Subject: bonding: don't set oif to bond dev when getting NS target destination
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit a8ba87f04ca9cdec06776ce92dce1395026dc3bb ]
+
+Unlike IPv4, IPv6 routing strictly requires the source address to be valid
+on the outgoing interface. If the NS target is set to a remote VLAN interface,
+and the source address is also configured on a VLAN over a bond interface,
+setting the oif to the bond device will fail to retrieve the correct
+destination route.
+
+Fix this by not setting the oif to the bond device when retrieving the NS
+target destination. This allows the correct destination device (the VLAN
+interface) to be determined, so that bond_verify_device_path can return the
+proper VLAN tags for sending NS messages.
+
+Reported-by: David Wilder <wilder@us.ibm.com>
+Closes: https://lore.kernel.org/netdev/aGOKggdfjv0cApTO@fedora/
+Suggested-by: Jay Vosburgh <jv@jvosburgh.net>
+Tested-by: David Wilder <wilder@us.ibm.com>
+Acked-by: Jay Vosburgh <jv@jvosburgh.net>
+Fixes: 4e24be018eb9 ("bonding: add new parameter ns_targets")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250916080127.430626-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index e413340be2bff..e23195dd74776 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3339,7 +3339,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+- fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+--
+2.51.0
+
--- /dev/null
+From ee4dd737079778582981c10216e4b3c1945325be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 02:43:34 +0000
+Subject: bonding: set random address only when slaves already exist
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 35ae4e86292ef7dfe4edbb9942955c884e984352 ]
+
+After commit 5c3bf6cba791 ("bonding: assign random address if device
+address is same as bond"), bonding will erroneously randomize the MAC
+address of the first interface added to the bond if fail_over_mac =
+follow.
+
+Correct this by additionally testing for the bond being empty before
+randomizing the MAC.
+
+Fixes: 5c3bf6cba791 ("bonding: assign random address if device address is same as bond")
+Reported-by: Qiuling Ren <qren@redhat.com>
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250910024336.400253-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index c4d53e8e7c152..e413340be2bff 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2115,6 +2115,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
++ bond_has_slaves(bond) &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+--
+2.51.0
+
--- /dev/null
+From 03f81a74805f76d9c702e12f90d8dd531f09eef4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 16:53:21 +0100
+Subject: btrfs: fix invalid extref key setup when replaying dentry
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit b62fd63ade7cb573b114972ef8f9fa505be8d74a ]
+
+The offset for an extref item's key is not the object ID of the parent
+dir, otherwise we would not need the extref item and would use plain ref
+items. Instead the offset is the result of a hash computation that uses
+the object ID of the parent dir and the name associated to the entry.
+So fix this by setting the key offset at replay_one_name() to be the
+result of calling btrfs_extref_hash().
+
+Fixes: 725af92a6251 ("btrfs: Open-code name_in_log_ref in replay_one_name")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index 56d30ec0f52fc..5466a93a28f58 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1933,7 +1933,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+
+ search_key.objectid = log_key.objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+- search_key.offset = key->objectid;
++ search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len);
+ ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ if (ret < 0) {
+ goto out;
+--
+2.51.0
+
--- /dev/null
+From f234ea32324ca215fda2357001defece2e0a0513 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Sep 2025 15:54:43 +0200
+Subject: btrfs: zoned: fix incorrect ASSERT in
+ btrfs_zoned_reserve_data_reloc_bg()
+
+From: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+
+[ Upstream commit 5b8d2964754102323ca24495ba94892426284e3a ]
+
+When moving a block-group to the dedicated data relocation space-info in
+btrfs_zoned_reserve_data_reloc_bg() it is asserted that the newly
+created block group for data relocation does not contain any
+zone_unusable bytes.
+
+But on disks with zone_capacity < zone_size, the difference between
+zone_size and zone_capacity is accounted as zone_unusable.
+
+Instead of asserting that the block-group does not contain any
+zone_unusable bytes, remove them from the block-groups total size.
+
+Reported-by: Yi Zhang <yi.zhang@redhat.com>
+Link: https://lore.kernel.org/linux-block/CAHj4cs8-cS2E+-xQ-d2Bj6vMJZ+CwT_cbdWBTju4BV35LsvEYw@mail.gmail.com/
+Fixes: daa0fde322350 ("btrfs: zoned: fix data relocation block group reservation")
+Reviewed-by: Naohiro Aota <naohiro.aota@wdc.com>
+Tested-by: Yi Zhang <yi.zhang@redhat.com>
+Signed-off-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/zoned.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index d7a1193332d94..60937127a0bc6 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -2577,9 +2577,9 @@ void btrfs_zoned_reserve_data_reloc_bg(struct btrfs_fs_info *fs_info)
+ spin_lock(&space_info->lock);
+ space_info->total_bytes -= bg->length;
+ space_info->disk_total -= bg->length * factor;
++ space_info->disk_total -= bg->zone_unusable;
+ /* There is no allocation ever happened. */
+ ASSERT(bg->used == 0);
+- ASSERT(bg->zone_unusable == 0);
+ /* No super block in a block group on the zoned setup. */
+ ASSERT(bg->bytes_super == 0);
+ spin_unlock(&space_info->lock);
+--
+2.51.0
+
--- /dev/null
+From 6c82021c81e99a1dffc6639af885139d6755f27e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:07:24 +0000
+Subject: cgroup: split cgroup_destroy_wq into 3 workqueues
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Ridong <chenridong@huawei.com>
+
+[ Upstream commit 79f919a89c9d06816dbdbbd168fa41d27411a7f9 ]
+
+A hung task can occur during [1] LTP cgroup testing when repeatedly
+mounting/unmounting perf_event and net_prio controllers with
+systemd.unified_cgroup_hierarchy=1. The hang manifests in
+cgroup_lock_and_drain_offline() during root destruction.
+
+Related case:
+cgroup_fj_function_perf_event cgroup_fj_function.sh perf_event
+cgroup_fj_function_net_prio cgroup_fj_function.sh net_prio
+
+Call Trace:
+ cgroup_lock_and_drain_offline+0x14c/0x1e8
+ cgroup_destroy_root+0x3c/0x2c0
+ css_free_rwork_fn+0x248/0x338
+ process_one_work+0x16c/0x3b8
+ worker_thread+0x22c/0x3b0
+ kthread+0xec/0x100
+ ret_from_fork+0x10/0x20
+
+Root Cause:
+
+CPU0 CPU1
+mount perf_event umount net_prio
+cgroup1_get_tree cgroup_kill_sb
+rebind_subsystems // root destruction enqueues
+ // cgroup_destroy_wq
+// kill all perf_event css
+ // one perf_event css A is dying
+ // css A offline enqueues cgroup_destroy_wq
+ // root destruction will be executed first
+ css_free_rwork_fn
+ cgroup_destroy_root
+ cgroup_lock_and_drain_offline
+ // some perf descendants are dying
+ // cgroup_destroy_wq max_active = 1
+ // waiting for css A to die
+
+Problem scenario:
+1. CPU0 mounts perf_event (rebind_subsystems)
+2. CPU1 unmounts net_prio (cgroup_kill_sb), queuing root destruction work
+3. A dying perf_event CSS gets queued for offline after root destruction
+4. Root destruction waits for offline completion, but offline work is
+ blocked behind root destruction in cgroup_destroy_wq (max_active=1)
+
+Solution:
+Split cgroup_destroy_wq into three dedicated workqueues:
+cgroup_offline_wq – Handles CSS offline operations
+cgroup_release_wq – Manages resource release
+cgroup_free_wq – Performs final memory deallocation
+
+This separation eliminates blocking in the CSS free path while waiting for
+offline operations to complete.
+
+[1] https://github.com/linux-test-project/ltp/blob/master/runtest/controllers
+Fixes: 334c3679ec4b ("cgroup: reimplement rebind_subsystems() using cgroup_apply_control() and friends")
+Reported-by: Gao Yingjie <gaoyingjie@uniontech.com>
+Signed-off-by: Chen Ridong <chenridong@huawei.com>
+Suggested-by: Teju Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index a723b7dc6e4e2..20f76b2176501 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -126,8 +126,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5553,7 +5576,7 @@ static void css_release_work_fn(struct work_struct *work)
+ cgroup_unlock();
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5562,7 +5585,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5696,7 +5719,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ list_del_rcu(&css->sibling);
+ err_free_css:
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5934,7 +5957,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -6320,8 +6343,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+--
+2.51.0
+
--- /dev/null
+From 8c922a9d34f27de93a392cc53dbac2e203f0a4c8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:46:02 +0800
+Subject: cnic: Fix use-after-free bugs in cnic_delete_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit cfa7d9b1e3a8604afc84e9e51d789c29574fb216 ]
+
+The original code uses cancel_delayed_work() in cnic_cm_stop_bnx2x_hw(),
+which does not guarantee that the delayed work item 'delete_task' has
+fully completed if it was already running. Additionally, the delayed work
+item is cyclic, the flush_workqueue() in cnic_cm_stop_bnx2x_hw() only
+blocks and waits for work items that were already queued to the
+workqueue prior to its invocation. Any work items submitted after
+flush_workqueue() is called are not included in the set of tasks that the
+flush operation awaits. This means that after the cyclic work items have
+finished executing, a delayed work item may still exist in the workqueue.
+This leads to use-after-free scenarios where the cnic_dev is deallocated
+by cnic_free_dev(), while delete_task remains active and attempt to
+dereference cnic_dev in cnic_delete_task().
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+cnic_netdev_event() |
+ cnic_stop_hw() | cnic_delete_task()
+ cnic_cm_stop_bnx2x_hw() | ...
+ cancel_delayed_work() | /* the queue_delayed_work()
+ flush_workqueue() | executes after flush_workqueue()*/
+ | queue_delayed_work()
+ cnic_free_dev(dev)//free | cnic_delete_task() //new instance
+ | dev = cp->dev; //use
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the cyclic delayed work item is properly canceled and that any
+ongoing execution of the work item completes before the cnic_dev is
+deallocated. Furthermore, since cancel_delayed_work_sync() uses
+__flush_work(work, true) to synchronously wait for any currently
+executing instance of the work item to finish, the flush_workqueue()
+becomes redundant and should be removed.
+
+This bug was identified through static analysis. To reproduce the issue
+and validate the fix, I simulated the cnic PCI device in QEMU and
+introduced intentional delays — such as inserting calls to ssleep()
+within the cnic_delete_task() function — to increase the likelihood
+of triggering the bug.
+
+Fixes: fdf24086f475 ("cnic: Defer iscsi connection cleanup")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/cnic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index a9040c42d2ff9..6e97a5a7daaf9 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4230,8 +4230,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+--
+2.51.0
+
--- /dev/null
+From e7c988463efeb036299c0edb6ba9a125f1357ba5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 14:05:28 +0000
+Subject: doc/netlink: Fix typos in operation attributes
+
+From: Remy D. Farley <one-d-wide@protonmail.com>
+
+[ Upstream commit 109f8b51543d106aee50dfe911f439e43fb30c7a ]
+
+I'm trying to generate Rust bindings for netlink using the yaml spec.
+
+It looks like there's a typo in conntrack spec: attribute set conntrack-attrs
+defines attributes "counters-{orig,reply}" (plural), while get operation
+references "counter-{orig,reply}" (singular). The latter should be fixed, as it
+denotes multiple counters (packet and byte). The corresonding C define is
+CTA_COUNTERS_ORIG.
+
+Also, dump request references "nfgen-family" attribute, which neither exists in
+conntrack-attrs attrset nor ctattr_type enum. There's member of nfgenmsg struct
+with the same name, which is where family value is actually taken from.
+
+> static int ctnetlink_dump_exp_ct(struct net *net, struct sock *ctnl,
+> struct sk_buff *skb,
+> const struct nlmsghdr *nlh,
+> const struct nlattr * const cda[],
+> struct netlink_ext_ack *extack)
+> {
+> int err;
+> struct nfgenmsg *nfmsg = nlmsg_data(nlh);
+> u_int8_t u3 = nfmsg->nfgen_family;
+ ^^^^^^^^^^^^
+
+Signed-off-by: Remy D. Farley <one-d-wide@protonmail.com>
+Fixes: 23fc9311a526 ("netlink: specs: add conntrack dump and stats dump support")
+Link: https://patch.msgid.link/20250913140515.1132886-1-one-d-wide@protonmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/netlink/specs/conntrack.yaml | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/Documentation/netlink/specs/conntrack.yaml b/Documentation/netlink/specs/conntrack.yaml
+index 840dc4504216b..1865ddf01fb0f 100644
+--- a/Documentation/netlink/specs/conntrack.yaml
++++ b/Documentation/netlink/specs/conntrack.yaml
+@@ -575,8 +575,8 @@ operations:
+ - nat-dst
+ - timeout
+ - mark
+- - counter-orig
+- - counter-reply
++ - counters-orig
++ - counters-reply
+ - use
+ - id
+ - nat-dst
+@@ -591,7 +591,6 @@ operations:
+ request:
+ value: 0x101
+ attributes:
+- - nfgen-family
+ - mark
+ - filter
+ - status
+@@ -608,8 +607,8 @@ operations:
+ - nat-dst
+ - timeout
+ - mark
+- - counter-orig
+- - counter-reply
++ - counters-orig
++ - counters-reply
+ - use
+ - id
+ - nat-dst
+--
+2.51.0
+
--- /dev/null
+From 61af5c852378b936a7ccfefdb6a657fa0c8cea73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 17:48:25 +0300
+Subject: dpaa2-switch: fix buffer pool seeding for control traffic
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 2690cb089502b80b905f2abdafd1bf2d54e1abef ]
+
+Starting with commit c50e7475961c ("dpaa2-switch: Fix error checking in
+dpaa2_switch_seed_bp()"), the probing of a second DPSW object errors out
+like below.
+
+fsl_dpaa2_switch dpsw.1: fsl_mc_driver_probe failed: -12
+fsl_dpaa2_switch dpsw.1: probe with driver fsl_dpaa2_switch failed with error -12
+
+The aforementioned commit brought to the surface the fact that seeding
+buffers into the buffer pool destined for control traffic is not
+successful and an access violation recoverable error can be seen in the
+MC firmware log:
+
+[E, qbman_rec_isr:391, QBMAN] QBMAN recoverable event 0x1000000
+
+This happens because the driver incorrectly used the ID of the DPBP
+object instead of the hardware buffer pool ID when trying to release
+buffers into it.
+
+This is because any DPSW object uses two buffer pools, one managed by
+the Linux driver and destined for control traffic packet buffers and the
+other one managed by the MC firmware and destined only for offloaded
+traffic. And since the buffer pool managed by the MC firmware does not
+have an external facing DPBP equivalent, any subsequent DPBP objects
+created after the first DPSW will have a DPBP id different to the
+underlying hardware buffer ID.
+
+The issue was not caught earlier because these two numbers can be
+identical when all DPBP objects are created before the DPSW objects are.
+This is the case when the DPL file is used to describe the entire DPAA2
+object layout and objects are created at boot time and it's also true
+for the first DPSW being created dynamically using ls-addsw.
+
+Fix this by using the buffer pool ID instead of the DPBP id when
+releasing buffers into the pool.
+
+Fixes: 2877e4f7e189 ("staging: dpaa2-switch: setup buffer pool and RX path rings")
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20250910144825.2416019-1-ioana.ciornei@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 4643a33806182..b1e1ad9e4b48e 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2736,7 +2736,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+- ethsw->bpid = dpbp_attrs.id;
++ ethsw->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+--
+2.51.0
+
--- /dev/null
+From 14b11467ec3cb433082c100824da6dc88531f664 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 11:33:31 +0200
+Subject: dpll: fix clock quality level reporting
+
+From: Ivan Vecera <ivecera@redhat.com>
+
+[ Upstream commit 70d99623d5c11e1a9bcc564b8fbad6fa916913d8 ]
+
+The DPLL_CLOCK_QUALITY_LEVEL_ITU_OPT1_EPRC is not reported via netlink
+due to bug in dpll_msg_add_clock_quality_level(). The usage of
+DPLL_CLOCK_QUALITY_LEVEL_MAX for both DECLARE_BITMAP() and
+for_each_set_bit() is not correct because these macros requires bitmap
+size and not the highest valid bit in the bitmap.
+
+Use correct bitmap size to fix this issue.
+
+Fixes: a1afb959add1 ("dpll: add clock quality level attribute and op")
+Signed-off-by: Ivan Vecera <ivecera@redhat.com>
+Reviewed-by: Arkadiusz Kubalewski <arkadiusz.kubalewski@intel.com>
+Link: https://patch.msgid.link/20250912093331.862333-1-ivecera@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dpll/dpll_netlink.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dpll/dpll_netlink.c b/drivers/dpll/dpll_netlink.c
+index c130f87147fa3..815de752bcd38 100644
+--- a/drivers/dpll/dpll_netlink.c
++++ b/drivers/dpll/dpll_netlink.c
+@@ -173,8 +173,8 @@ static int
+ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
+ struct netlink_ext_ack *extack)
+ {
++ DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1) = { 0 };
+ const struct dpll_device_ops *ops = dpll_device_ops(dpll);
+- DECLARE_BITMAP(qls, DPLL_CLOCK_QUALITY_LEVEL_MAX) = { 0 };
+ enum dpll_clock_quality_level ql;
+ int ret;
+
+@@ -183,7 +183,7 @@ dpll_msg_add_clock_quality_level(struct sk_buff *msg, struct dpll_device *dpll,
+ ret = ops->clock_quality_level_get(dpll, dpll_priv(dpll), qls, extack);
+ if (ret)
+ return ret;
+- for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX)
++ for_each_set_bit(ql, qls, DPLL_CLOCK_QUALITY_LEVEL_MAX + 1)
+ if (nla_put_u32(msg, DPLL_A_CLOCK_QUALITY_LEVEL, ql))
+ return -EMSGSIZE;
+
+--
+2.51.0
+
--- /dev/null
+From 5fcb8c7c335a1169c6f9d3678e792bcf3ae7dccb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 17:16:17 +0200
+Subject: i40e: remove redundant memory barrier when cleaning Tx descs
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e37084a26070c546ae7961ee135bbfb15fbe13fd ]
+
+i40e has a feature which writes to memory location last descriptor
+successfully sent. Memory barrier in i40e_clean_tx_irq() was used to
+avoid forward-reading descriptor fields in case DD bit was not set.
+Having mentioned feature in place implies that such situation will not
+happen as we know in advance how many descriptors HW has dealt with.
+
+Besides, this barrier placement was wrong. Idea is to have this
+protection *after* reading DD bit from HW descriptor, not before.
+Digging through git history showed me that indeed barrier was before DD
+bit check, anyways the commit introducing i40e_get_head() should have
+wiped it out altogether.
+
+Also, there was one commit doing s/read_barrier_depends/smp_rmb when get
+head feature was already in place, but it was only theoretical based on
+ixgbe experiences, which is different in these terms as that driver has
+to read DD bit from HW descriptor.
+
+Fixes: 1943d8ba9507 ("i40e/i40evf: enable hardware feature head write back")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index c006f716a3bdb..ca7517a68a2c3 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -947,9 +947,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+--
+2.51.0
+
--- /dev/null
+From 3a90a47e3f83917ff8c27fb1eb2d025ff046ad7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 16:00:14 -0700
+Subject: ice: fix Rx page leak on multi-buffer frames
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 84bf1ac85af84d354c7a2fdbdc0d4efc8aaec34b ]
+
+The ice_put_rx_mbuf() function handles calling ice_put_rx_buf() for each
+buffer in the current frame. This function was introduced as part of
+handling multi-buffer XDP support in the ice driver.
+
+It works by iterating over the buffers from first_desc up to 1 plus the
+total number of fragments in the frame, cached from before the XDP program
+was executed.
+
+If the hardware posts a descriptor with a size of 0, the logic used in
+ice_put_rx_mbuf() breaks. Such descriptors get skipped and don't get added
+as fragments in ice_add_xdp_frag. Since the buffer isn't counted as a
+fragment, we do not iterate over it in ice_put_rx_mbuf(), and thus we don't
+call ice_put_rx_buf().
+
+Because we don't call ice_put_rx_buf(), we don't attempt to re-use the
+page or free it. This leaves a stale page in the ring, as we don't
+increment next_to_alloc.
+
+The ice_reuse_rx_page() assumes that the next_to_alloc has been incremented
+properly, and that it always points to a buffer with a NULL page. Since
+this function doesn't check, it will happily recycle a page over the top
+of the next_to_alloc buffer, losing track of the old page.
+
+Note that this leak only occurs for multi-buffer frames. The
+ice_put_rx_mbuf() function always handles at least one buffer, so a
+single-buffer frame will always get handled correctly. It is not clear
+precisely why the hardware hands us descriptors with a size of 0 sometimes,
+but it happens somewhat regularly with "jumbo frames" used by 9K MTU.
+
+To fix ice_put_rx_mbuf(), we need to make sure to call ice_put_rx_buf() on
+all buffers between first_desc and next_to_clean. Borrow the logic of a
+similar function in i40e used for this same purpose. Use the same logic
+also in ice_get_pgcnts().
+
+Instead of iterating over just the number of fragments, use a loop which
+iterates until the current index reaches to the next_to_clean element just
+past the current frame. Unlike i40e, the ice_put_rx_mbuf() function does
+call ice_put_rx_buf() on the last buffer of the frame indicating the end of
+packet.
+
+For non-linear (multi-buffer) frames, we need to take care when adjusting
+the pagecnt_bias. An XDP program might release fragments from the tail of
+the frame, in which case that fragment page is already released. Only
+update the pagecnt_bias for the first descriptor and fragments still
+remaining post-XDP program. Take care to only access the shared info for
+fragmented buffers, as this avoids a significant cache miss.
+
+The xdp_xmit value only needs to be updated if an XDP program is run, and
+only once per packet. Drop the xdp_xmit pointer argument from
+ice_put_rx_mbuf(). Instead, set xdp_xmit in the ice_clean_rx_irq() function
+directly. This avoids needing to pass the argument and avoids an extra
+bit-wise OR for each buffer in the frame.
+
+Move the increment of the ntc local variable to ensure its updated *before*
+all calls to ice_get_pgcnts() or ice_put_rx_mbuf(), as the loop logic
+requires the index of the element just after the current frame.
+
+Now that we use an index pointer in the ring to identify the packet, we no
+longer need to track or cache the number of fragments in the rx_ring.
+
+Cc: Christoph Petrausch <christoph.petrausch@deepl.com>
+Cc: Jesper Dangaard Brouer <hawk@kernel.org>
+Reported-by: Jaroslav Pulchart <jaroslav.pulchart@gooddata.com>
+Closes: https://lore.kernel.org/netdev/CAK8fFZ4hY6GUJNENz3wY9jaYLZXGfpr7dnZxzGMYoE44caRbgw@mail.gmail.com/
+Fixes: 743bbd93cf29 ("ice: put Rx buffers after being done with current frame")
+Tested-by: Michal Kubiak <michal.kubiak@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Tested-by: Priya Singh <priyax.singh@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_txrx.c | 80 ++++++++++-------------
+ drivers/net/ethernet/intel/ice/ice_txrx.h | 1 -
+ 2 files changed, 34 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
+index c50cf3ad190e9..4766597ac5550 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
+@@ -865,10 +865,6 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+ __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
+ rx_buf->page_offset, size);
+ sinfo->xdp_frags_size += size;
+- /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+- * can pop off frags but driver has to handle it on its own
+- */
+- rx_ring->nr_frags = sinfo->nr_frags;
+
+ if (page_is_pfmemalloc(rx_buf->page))
+ xdp_buff_set_frag_pfmemalloc(xdp);
+@@ -939,20 +935,20 @@ ice_get_rx_buf(struct ice_rx_ring *rx_ring, const unsigned int size,
+ /**
+ * ice_get_pgcnts - grab page_count() for gathered fragments
+ * @rx_ring: Rx descriptor ring to store the page counts on
++ * @ntc: the next to clean element (not included in this frame!)
+ *
+ * This function is intended to be called right before running XDP
+ * program so that the page recycling mechanism will be able to take
+ * a correct decision regarding underlying pages; this is done in such
+ * way as XDP program can change the refcount of page
+ */
+-static void ice_get_pgcnts(struct ice_rx_ring *rx_ring)
++static void ice_get_pgcnts(struct ice_rx_ring *rx_ring, unsigned int ntc)
+ {
+- u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ struct ice_rx_buf *rx_buf;
+ u32 cnt = rx_ring->count;
+
+- for (int i = 0; i < nr_frags; i++) {
++ while (idx != ntc) {
+ rx_buf = &rx_ring->rx_buf[idx];
+ rx_buf->pgcnt = page_count(rx_buf->page);
+
+@@ -1125,62 +1121,51 @@ ice_put_rx_buf(struct ice_rx_ring *rx_ring, struct ice_rx_buf *rx_buf)
+ }
+
+ /**
+- * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all frame frags
++ * ice_put_rx_mbuf - ice_put_rx_buf() caller, for all buffers in frame
+ * @rx_ring: Rx ring with all the auxiliary data
+ * @xdp: XDP buffer carrying linear + frags part
+- * @xdp_xmit: XDP_TX/XDP_REDIRECT verdict storage
+- * @ntc: a current next_to_clean value to be stored at rx_ring
++ * @ntc: the next to clean element (not included in this frame!)
+ * @verdict: return code from XDP program execution
+ *
+- * Walk through gathered fragments and satisfy internal page
+- * recycle mechanism; we take here an action related to verdict
+- * returned by XDP program;
++ * Called after XDP program is completed, or on error with verdict set to
++ * ICE_XDP_CONSUMED.
++ *
++ * Walk through buffers from first_desc to the end of the frame, releasing
++ * buffers and satisfying internal page recycle mechanism. The action depends
++ * on verdict from XDP program.
+ */
+ static void ice_put_rx_mbuf(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
+- u32 *xdp_xmit, u32 ntc, u32 verdict)
++ u32 ntc, u32 verdict)
+ {
+- u32 nr_frags = rx_ring->nr_frags + 1;
+ u32 idx = rx_ring->first_desc;
+ u32 cnt = rx_ring->count;
+- u32 post_xdp_frags = 1;
+ struct ice_rx_buf *buf;
+- int i;
++ u32 xdp_frags = 0;
++ int i = 0;
+
+ if (unlikely(xdp_buff_has_frags(xdp)))
+- post_xdp_frags += xdp_get_shared_info_from_buff(xdp)->nr_frags;
++ xdp_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+
+- for (i = 0; i < post_xdp_frags; i++) {
++ while (idx != ntc) {
+ buf = &rx_ring->rx_buf[idx];
++ if (++idx == cnt)
++ idx = 0;
+
+- if (verdict & (ICE_XDP_TX | ICE_XDP_REDIR)) {
++ /* An XDP program could release fragments from the end of the
++ * buffer. For these, we need to keep the pagecnt_bias as-is.
++ * To do this, only adjust pagecnt_bias for fragments up to
++ * the total remaining after the XDP program has run.
++ */
++ if (verdict != ICE_XDP_CONSUMED)
+ ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+- *xdp_xmit |= verdict;
+- } else if (verdict & ICE_XDP_CONSUMED) {
++ else if (i++ <= xdp_frags)
+ buf->pagecnt_bias++;
+- } else if (verdict == ICE_XDP_PASS) {
+- ice_rx_buf_adjust_pg_offset(buf, xdp->frame_sz);
+- }
+
+ ice_put_rx_buf(rx_ring, buf);
+-
+- if (++idx == cnt)
+- idx = 0;
+- }
+- /* handle buffers that represented frags released by XDP prog;
+- * for these we keep pagecnt_bias as-is; refcount from struct page
+- * has been decremented within XDP prog and we do not have to increase
+- * the biased refcnt
+- */
+- for (; i < nr_frags; i++) {
+- buf = &rx_ring->rx_buf[idx];
+- ice_put_rx_buf(rx_ring, buf);
+- if (++idx == cnt)
+- idx = 0;
+ }
+
+ xdp->data = NULL;
+ rx_ring->first_desc = ntc;
+- rx_ring->nr_frags = 0;
+ }
+
+ /**
+@@ -1260,6 +1245,10 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ /* retrieve a buffer from the ring */
+ rx_buf = ice_get_rx_buf(rx_ring, size, ntc);
+
++ /* Increment ntc before calls to ice_put_rx_mbuf() */
++ if (++ntc == cnt)
++ ntc = 0;
++
+ if (!xdp->data) {
+ void *hard_start;
+
+@@ -1268,24 +1257,23 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ xdp_prepare_buff(xdp, hard_start, offset, size, !!offset);
+ xdp_buff_clear_frags_flag(xdp);
+ } else if (ice_add_xdp_frag(rx_ring, xdp, rx_buf, size)) {
+- ice_put_rx_mbuf(rx_ring, xdp, NULL, ntc, ICE_XDP_CONSUMED);
++ ice_put_rx_mbuf(rx_ring, xdp, ntc, ICE_XDP_CONSUMED);
+ break;
+ }
+- if (++ntc == cnt)
+- ntc = 0;
+
+ /* skip if it is NOP desc */
+ if (ice_is_non_eop(rx_ring, rx_desc))
+ continue;
+
+- ice_get_pgcnts(rx_ring);
++ ice_get_pgcnts(rx_ring, ntc);
+ xdp_verdict = ice_run_xdp(rx_ring, xdp, xdp_prog, xdp_ring, rx_desc);
+ if (xdp_verdict == ICE_XDP_PASS)
+ goto construct_skb;
+ total_rx_bytes += xdp_get_buff_len(xdp);
+ total_rx_pkts++;
+
+- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
++ xdp_xmit |= xdp_verdict & (ICE_XDP_TX | ICE_XDP_REDIR);
+
+ continue;
+ construct_skb:
+@@ -1298,7 +1286,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
+ rx_ring->ring_stats->rx_stats.alloc_buf_failed++;
+ xdp_verdict = ICE_XDP_CONSUMED;
+ }
+- ice_put_rx_mbuf(rx_ring, xdp, &xdp_xmit, ntc, xdp_verdict);
++ ice_put_rx_mbuf(rx_ring, xdp, ntc, xdp_verdict);
+
+ if (!skb)
+ break;
+diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
+index a4b1e95146327..07155e615f75a 100644
+--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
++++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
+@@ -358,7 +358,6 @@ struct ice_rx_ring {
+ struct ice_tx_ring *xdp_ring;
+ struct ice_rx_ring *next; /* pointer to next ring in q_vector */
+ struct xsk_buff_pool *xsk_pool;
+- u32 nr_frags;
+ u16 max_frame;
+ u16 rx_buf_len;
+ dma_addr_t dma; /* physical address of ring */
+--
+2.51.0
+
--- /dev/null
+From 1da67a9827e9c41105611b9d2ec16f6dcb3f1ecf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 22:47:21 +0900
+Subject: igc: don't fail igc_probe() on LED setup error
+
+From: Kohei Enju <enjuk@amazon.com>
+
+[ Upstream commit 528eb4e19ec0df30d0c9ae4074ce945667dde919 ]
+
+When igc_led_setup() fails, igc_probe() fails and triggers kernel panic
+in free_netdev() since unregister_netdev() is not called. [1]
+This behavior can be tested using fault-injection framework, especially
+the failslab feature. [2]
+
+Since LED support is not mandatory, treat LED setup failures as
+non-fatal and continue probe with a warning message, consequently
+avoiding the kernel panic.
+
+[1]
+ kernel BUG at net/core/dev.c:12047!
+ Oops: invalid opcode: 0000 [#1] SMP NOPTI
+ CPU: 0 UID: 0 PID: 937 Comm: repro-igc-led-e Not tainted 6.17.0-rc4-enjuk-tnguy-00865-gc4940196ab02 #64 PREEMPT(voluntary)
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+ RIP: 0010:free_netdev+0x278/0x2b0
+ [...]
+ Call Trace:
+ <TASK>
+ igc_probe+0x370/0x910
+ local_pci_probe+0x3a/0x80
+ pci_device_probe+0xd1/0x200
+ [...]
+
+[2]
+ #!/bin/bash -ex
+
+ FAILSLAB_PATH=/sys/kernel/debug/failslab/
+ DEVICE=0000:00:05.0
+ START_ADDR=$(grep " igc_led_setup" /proc/kallsyms \
+ | awk '{printf("0x%s", $1)}')
+ END_ADDR=$(printf "0x%x" $((START_ADDR + 0x100)))
+
+ echo $START_ADDR > $FAILSLAB_PATH/require-start
+ echo $END_ADDR > $FAILSLAB_PATH/require-end
+ echo 1 > $FAILSLAB_PATH/times
+ echo 100 > $FAILSLAB_PATH/probability
+ echo N > $FAILSLAB_PATH/ignore-gfp-wait
+
+ echo $DEVICE > /sys/bus/pci/drivers/igc/bind
+
+Fixes: ea578703b03d ("igc: Add support for LEDs on i225/i226")
+Signed-off-by: Kohei Enju <enjuk@amazon.com>
+Reviewed-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Vitaly Lifshits <vitaly.lifshits@intel.com>
+Reviewed-by: Kurt Kanzenbach <kurt@linutronix.de>
+Tested-by: Mor Bar-Gabay <morx.bar.gabay@intel.com>
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/igc/igc.h | 1 +
+ drivers/net/ethernet/intel/igc/igc_main.c | 12 +++++++++---
+ 2 files changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h
+index 859a15e4ccbab..1bbe7f72757c0 100644
+--- a/drivers/net/ethernet/intel/igc/igc.h
++++ b/drivers/net/ethernet/intel/igc/igc.h
+@@ -343,6 +343,7 @@ struct igc_adapter {
+ /* LEDs */
+ struct mutex led_mutex;
+ struct igc_led_classdev *leds;
++ bool leds_available;
+ };
+
+ void igc_up(struct igc_adapter *adapter);
+diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c
+index 1b4465d6b2b72..5b8f9b5121489 100644
+--- a/drivers/net/ethernet/intel/igc/igc_main.c
++++ b/drivers/net/ethernet/intel/igc/igc_main.c
+@@ -7301,8 +7301,14 @@ static int igc_probe(struct pci_dev *pdev,
+
+ if (IS_ENABLED(CONFIG_IGC_LEDS)) {
+ err = igc_led_setup(adapter);
+- if (err)
+- goto err_register;
++ if (err) {
++ netdev_warn_once(netdev,
++ "LED init failed (%d); continuing without LED support\n",
++ err);
++ adapter->leds_available = false;
++ } else {
++ adapter->leds_available = true;
++ }
+ }
+
+ return 0;
+@@ -7358,7 +7364,7 @@ static void igc_remove(struct pci_dev *pdev)
+ cancel_work_sync(&adapter->watchdog_task);
+ hrtimer_cancel(&adapter->hrtimer);
+
+- if (IS_ENABLED(CONFIG_IGC_LEDS))
++ if (IS_ENABLED(CONFIG_IGC_LEDS) && adapter->leds_available)
+ igc_led_free(adapter);
+
+ /* Release control of h/w to f/w. If f/w is AMT enabled, this
+--
+2.51.0
+
--- /dev/null
+From 0395dfe57a75f5537b6cf89cfd8f14fe37ccd2e1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Sep 2025 13:26:29 +0200
+Subject: ixgbe: destroy aci.lock later within ixgbe_remove path
+
+From: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+
+[ Upstream commit 316ba68175b04a9f6f75295764789ea94e31d48c ]
+
+There's another issue with aci.lock and previous patch uncovers it.
+aci.lock is being destroyed during removing ixgbe while some of the
+ixgbe closing routines are still ongoing. These routines use Admin
+Command Interface which require taking aci.lock which has been already
+destroyed what leads to call trace.
+
+[ +0.000004] DEBUG_LOCKS_WARN_ON(lock->magic != lock)
+[ +0.000007] WARNING: CPU: 12 PID: 10277 at kernel/locking/mutex.c:155 mutex_lock+0x5f/0x70
+[ +0.000002] Call Trace:
+[ +0.000003] <TASK>
+[ +0.000006] ixgbe_aci_send_cmd+0xc8/0x220 [ixgbe]
+[ +0.000049] ? try_to_wake_up+0x29d/0x5d0
+[ +0.000009] ixgbe_disable_rx_e610+0xc4/0x110 [ixgbe]
+[ +0.000032] ixgbe_disable_rx+0x3d/0x200 [ixgbe]
+[ +0.000027] ixgbe_down+0x102/0x3b0 [ixgbe]
+[ +0.000031] ixgbe_close_suspend+0x28/0x90 [ixgbe]
+[ +0.000028] ixgbe_close+0xfb/0x100 [ixgbe]
+[ +0.000025] __dev_close_many+0xae/0x220
+[ +0.000005] dev_close_many+0xc2/0x1a0
+[ +0.000004] ? kernfs_should_drain_open_files+0x2a/0x40
+[ +0.000005] unregister_netdevice_many_notify+0x204/0xb00
+[ +0.000006] ? __kernfs_remove.part.0+0x109/0x210
+[ +0.000006] ? kobj_kset_leave+0x4b/0x70
+[ +0.000008] unregister_netdevice_queue+0xf6/0x130
+[ +0.000006] unregister_netdev+0x1c/0x40
+[ +0.000005] ixgbe_remove+0x216/0x290 [ixgbe]
+[ +0.000021] pci_device_remove+0x42/0xb0
+[ +0.000007] device_release_driver_internal+0x19c/0x200
+[ +0.000008] driver_detach+0x48/0x90
+[ +0.000003] bus_remove_driver+0x6d/0xf0
+[ +0.000006] pci_unregister_driver+0x2e/0xb0
+[ +0.000005] ixgbe_exit_module+0x1c/0xc80 [ixgbe]
+
+Same as for the previous commit, the issue has been highlighted by the
+commit 337369f8ce9e ("locking/mutex: Add MUTEX_WARN_ON() into fast path").
+
+Move destroying aci.lock to the end of ixgbe_remove(), as this
+simply fixes the issue.
+
+Fixes: 4600cdf9f5ac ("ixgbe: Enable link management in E610 device")
+Signed-off-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index 2a857037dd102..d5c421451f319 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -11892,10 +11892,8 @@ static void ixgbe_remove(struct pci_dev *pdev)
+ set_bit(__IXGBE_REMOVING, &adapter->state);
+ cancel_work_sync(&adapter->service_task);
+
+- if (adapter->hw.mac.type == ixgbe_mac_e610) {
++ if (adapter->hw.mac.type == ixgbe_mac_e610)
+ ixgbe_disable_link_status_events(adapter);
+- mutex_destroy(&adapter->hw.aci.lock);
+- }
+
+ if (adapter->mii_bus)
+ mdiobus_unregister(adapter->mii_bus);
+@@ -11955,6 +11953,9 @@ static void ixgbe_remove(struct pci_dev *pdev)
+ disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
+ free_netdev(netdev);
+
++ if (adapter->hw.mac.type == ixgbe_mac_e610)
++ mutex_destroy(&adapter->hw.aci.lock);
++
+ if (disable_dev)
+ pci_disable_device(pdev);
+ }
+--
+2.51.0
+
--- /dev/null
+From d09d625328e7099d91d8b42ddb081446596fb889 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Sep 2025 13:26:28 +0200
+Subject: ixgbe: initialize aci.lock before it's used
+
+From: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+
+[ Upstream commit b85936e95a4bd2a07e134af71e2c0750a69d2b8b ]
+
+Currently aci.lock is initialized too late. A bunch of ACI callbacks
+using the lock are called prior it's initialized.
+
+Commit 337369f8ce9e ("locking/mutex: Add MUTEX_WARN_ON() into fast path")
+highlights that issue what results in call trace.
+
+[ 4.092899] DEBUG_LOCKS_WARN_ON(lock->magic != lock)
+[ 4.092910] WARNING: CPU: 0 PID: 578 at kernel/locking/mutex.c:154 mutex_lock+0x6d/0x80
+[ 4.098757] Call Trace:
+[ 4.098847] <TASK>
+[ 4.098922] ixgbe_aci_send_cmd+0x8c/0x1e0 [ixgbe]
+[ 4.099108] ? hrtimer_try_to_cancel+0x18/0x110
+[ 4.099277] ixgbe_aci_get_fw_ver+0x52/0xa0 [ixgbe]
+[ 4.099460] ixgbe_check_fw_error+0x1fc/0x2f0 [ixgbe]
+[ 4.099650] ? usleep_range_state+0x69/0xd0
+[ 4.099811] ? usleep_range_state+0x8c/0xd0
+[ 4.099964] ixgbe_probe+0x3b0/0x12d0 [ixgbe]
+[ 4.100132] local_pci_probe+0x43/0xa0
+[ 4.100267] work_for_cpu_fn+0x13/0x20
+[ 4.101647] </TASK>
+
+Move aci.lock mutex initialization to ixgbe_sw_init() before any ACI
+command is sent. Along with that move also related SWFW semaphore in
+order to reduce size of ixgbe_probe() and that way all locks are
+initialized in ixgbe_sw_init().
+
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Przemek Kitszel <przemyslaw.kitszel@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Fixes: 4600cdf9f5ac ("ixgbe: Enable link management in E610 device")
+Signed-off-by: Jedrzej Jagielski <jedrzej.jagielski@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+index cba860f0e1f15..2a857037dd102 100644
+--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+@@ -6801,6 +6801,13 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter,
+ break;
+ }
+
++ /* Make sure the SWFW semaphore is in a valid state */
++ if (hw->mac.ops.init_swfw_sync)
++ hw->mac.ops.init_swfw_sync(hw);
++
++ if (hw->mac.type == ixgbe_mac_e610)
++ mutex_init(&hw->aci.lock);
++
+ #ifdef IXGBE_FCOE
+ /* FCoE support exists, always init the FCoE lock */
+ spin_lock_init(&adapter->fcoe.lock);
+@@ -11474,10 +11481,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ if (err)
+ goto err_sw_init;
+
+- /* Make sure the SWFW semaphore is in a valid state */
+- if (hw->mac.ops.init_swfw_sync)
+- hw->mac.ops.init_swfw_sync(hw);
+-
+ if (ixgbe_check_fw_error(adapter))
+ return ixgbe_recovery_probe(adapter);
+
+@@ -11681,8 +11684,6 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ ether_addr_copy(hw->mac.addr, hw->mac.perm_addr);
+ ixgbe_mac_set_default_filter(adapter);
+
+- if (hw->mac.type == ixgbe_mac_e610)
+- mutex_init(&hw->aci.lock);
+ timer_setup(&adapter->service_timer, ixgbe_service_timer, 0);
+
+ if (ixgbe_removed(hw->hw_addr)) {
+@@ -11838,9 +11839,9 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+ devl_unlock(adapter->devlink);
+ ixgbe_release_hw_control(adapter);
+ ixgbe_clear_interrupt_scheme(adapter);
++err_sw_init:
+ if (hw->mac.type == ixgbe_mac_e610)
+ mutex_destroy(&adapter->hw.aci.lock);
+-err_sw_init:
+ ixgbe_disable_sriov(adapter);
+ adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
+ iounmap(adapter->io_addr);
+--
+2.51.0
+
--- /dev/null
+From 00d2fa438cd22006b1d835ff7cb31050ac8538eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:20 +0200
+Subject: mptcp: set remote_deny_join_id0 on SYN recv
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 96939cec994070aa5df852c10fad5fc303a97ea3 ]
+
+When a SYN containing the 'C' flag (deny join id0) was received, this
+piece of information was not propagated to the path-manager.
+
+Even if this flag is mainly set on the server side, a client can also
+tell the server it cannot try to establish new subflows to the client's
+initial IP address and port. The server's PM should then record such
+info when received, and before sending events about the new connection.
+
+Fixes: df377be38725 ("mptcp: add deny_join_id0 in mptcp_options_received")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-1-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/subflow.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 1802bc5435a1a..d77a2e374a7ae 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -882,6 +882,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+
+ ctx->subflow_id = 1;
+ owner = mptcp_sk(ctx->conn);
++
++ if (mp_opt.deny_join_id0)
++ WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
++
+ mptcp_pm_new_connection(owner, child, 1);
+
+ /* with OoO packets we can reach here without ingress
+--
+2.51.0
+
--- /dev/null
+From ab7c5e91091cbf6f9ba69dccace1250b92fc73e0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:23 +0200
+Subject: mptcp: tfo: record 'deny join id0' info
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 92da495cb65719583aa06bc946aeb18a10e1e6e2 ]
+
+When TFO is used, the check to see if the 'C' flag (deny join id0) was
+set was bypassed.
+
+This flag can be set when TFO is used, so the check should also be done
+when TFO is used.
+
+Note that the set_fully_established label is also used when a 4th ACK is
+received. In this case, deny_join_id0 will not be set.
+
+Fixes: dfc8d0603033 ("mptcp: implement delayed seq generation for passive fastopen")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-4-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/options.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index c6983471dca55..bb4253aa675a6 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -984,13 +984,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ return false;
+ }
+
+- if (mp_opt->deny_join_id0)
+- WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+-
+ if (unlikely(!READ_ONCE(msk->pm.server_side)))
+ pr_warn_once("bogus mpc option on established client sk");
+
+ set_fully_established:
++ if (mp_opt->deny_join_id0)
++ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
++
+ mptcp_data_lock((struct sock *)msk);
+ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
+ mptcp_data_unlock((struct sock *)msk);
+--
+2.51.0
+
--- /dev/null
+From be2c0d02a2255a0f7aca8361d240846d4bb5c5f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:53:37 +0000
+Subject: net: clear sk->sk_ino in sk_set_socket(sk, NULL)
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 87ebb628a5acb892eba41ef1d8989beb8f036034 ]
+
+Andrei Vagin reported that blamed commit broke CRIU.
+
+Indeed, while we want to keep sk_uid unchanged when a socket
+is cloned, we want to clear sk->sk_ino.
+
+Otherwise, sock_diag might report multiple sockets sharing
+the same inode number.
+
+Move the clearing part from sock_orphan() to sk_set_socket(sk, NULL),
+called both from sock_orphan() and sk_clone_lock().
+
+Fixes: 5d6b58c932ec ("net: lockless sock_i_ino()")
+Closes: https://lore.kernel.org/netdev/aMhX-VnXkYDpKd9V@google.com/
+Closes: https://github.com/checkpoint-restore/criu/issues/2744
+Reported-by: Andrei Vagin <avagin@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Andrei Vagin <avagin@google.com>
+Link: https://patch.msgid.link/20250917135337.1736101-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index a348ae145eda4..6e9f4c126672d 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -2061,6 +2061,9 @@ static inline void sk_set_socket(struct sock *sk, struct socket *sock)
+ if (sock) {
+ WRITE_ONCE(sk->sk_uid, SOCK_INODE(sock)->i_uid);
+ WRITE_ONCE(sk->sk_ino, SOCK_INODE(sock)->i_ino);
++ } else {
++ /* Note: sk_uid is unchanged. */
++ WRITE_ONCE(sk->sk_ino, 0);
+ }
+ }
+
+@@ -2082,8 +2085,6 @@ static inline void sock_orphan(struct sock *sk)
+ sock_set_flag(sk, SOCK_DEAD);
+ sk_set_socket(sk, NULL);
+ sk->sk_wq = NULL;
+- /* Note: sk_uid is unchanged. */
+- WRITE_ONCE(sk->sk_ino, 0);
+ write_unlock_bh(&sk->sk_callback_lock);
+ }
+
+--
+2.51.0
+
--- /dev/null
+From d50e63469735dc6d1fd1a556d3375aaab63fd747 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 9 Sep 2025 18:54:15 +0200
+Subject: net: dst_metadata: fix IP_DF bit not extracted from tunnel headers
+
+From: Ilya Maximets <i.maximets@ovn.org>
+
+[ Upstream commit a9888628cb2c768202a4530e2816da1889cc3165 ]
+
+Both OVS and TC flower allow extracting and matching on the DF bit of
+the outer IP header via OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT in the
+OVS_KEY_ATTR_TUNNEL and TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT in
+the TCA_FLOWER_KEY_ENC_FLAGS respectively. Flow dissector extracts
+this information as FLOW_DIS_F_TUNNEL_DONT_FRAGMENT from the tunnel
+info key.
+
+However, the IP_TUNNEL_DONT_FRAGMENT_BIT in the tunnel key is never
+actually set, because the tunneling code doesn't actually extract it
+from the IP header. OAM and CRIT_OPT are extracted by the tunnel
+implementation code, same code also sets the KEY flag, if present.
+UDP tunnel core takes care of setting the CSUM flag if the checksum
+is present in the UDP header, but the DONT_FRAGMENT is not handled at
+any layer.
+
+Fix that by checking the bit and setting the corresponding flag while
+populating the tunnel info in the IP layer where it belongs.
+
+Not using __assign_bit as we don't really need to clear the bit in a
+just initialized field. It also doesn't seem like using __assign_bit
+will make the code look better.
+
+Clearly, users didn't rely on this functionality for anything very
+important until now. The reason why this doesn't break OVS logic is
+that it only matches on what kernel previously parsed out and if kernel
+consistently reports this bit as zero, OVS will only match on it to be
+zero, which sort of works. But it is still a bug that the uAPI reports
+and allows matching on the field that is not actually checked in the
+packet. And this is causing misleading -df reporting in OVS datapath
+flows, while the tunnel traffic actually has the bit set in most cases.
+
+This may also cause issues if a hardware properly implements support
+for tunnel flag matching as it will disagree with the implementation
+in a software path of TC flower.
+
+Fixes: 7d5437c709de ("openvswitch: Add tunneling interface.")
+Fixes: 1d17568e74de ("net/sched: cls_flower: add support for matching tunnel control flags")
+Signed-off-by: Ilya Maximets <i.maximets@ovn.org>
+Reviewed-by: Ido Schimmel <idosch@nvidia.com>
+Link: https://patch.msgid.link/20250909165440.229890-2-i.maximets@ovn.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/dst_metadata.h | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/include/net/dst_metadata.h b/include/net/dst_metadata.h
+index 4160731dcb6e3..1fc2fb03ce3f9 100644
+--- a/include/net/dst_metadata.h
++++ b/include/net/dst_metadata.h
+@@ -3,6 +3,7 @@
+ #define __NET_DST_METADATA_H 1
+
+ #include <linux/skbuff.h>
++#include <net/ip.h>
+ #include <net/ip_tunnels.h>
+ #include <net/macsec.h>
+ #include <net/dst.h>
+@@ -220,9 +221,15 @@ static inline struct metadata_dst *ip_tun_rx_dst(struct sk_buff *skb,
+ int md_size)
+ {
+ const struct iphdr *iph = ip_hdr(skb);
++ struct metadata_dst *tun_dst;
++
++ tun_dst = __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
++ 0, flags, tunnel_id, md_size);
+
+- return __ip_tun_set_dst(iph->saddr, iph->daddr, iph->tos, iph->ttl,
+- 0, flags, tunnel_id, md_size);
++ if (tun_dst && (iph->frag_off & htons(IP_DF)))
++ __set_bit(IP_TUNNEL_DONT_FRAGMENT_BIT,
++ tun_dst->u.tun_info.key.tun_flags);
++ return tun_dst;
+ }
+
+ static inline struct metadata_dst *__ipv6_tun_set_dst(const struct in6_addr *saddr,
+--
+2.51.0
+
--- /dev/null
+From 5d21fe028f39756d219c1e243ab276c6d87f362d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 15:30:58 +0000
+Subject: net: liquidio: fix overflow in octeon_init_instr_queue()
+
+From: Alexey Nepomnyashih <sdl@nppct.ru>
+
+[ Upstream commit cca7b1cfd7b8a0eff2a3510c5e0f10efe8fa3758 ]
+
+The expression `(conf->instr_type == 64) << iq_no` can overflow because
+`iq_no` may be as high as 64 (`CN23XX_MAX_RINGS_PER_PF`). Casting the
+operand to `u64` ensures correct 64-bit arithmetic.
+
+Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters")
+Signed-off-by: Alexey Nepomnyashih <sdl@nppct.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index de8a6ce86ad7e..12105ffb5dac6 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+--
+2.51.0
+
--- /dev/null
+From 8619859b04a2a0300e4dc9b8a96676e398e5542c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 08:37:32 +0800
+Subject: net/mlx5: Not returning mlx5_link_info table when speed is unknown
+
+From: Li Tian <litian@redhat.com>
+
+[ Upstream commit 5577352b55833d0f4350eb5d62eda2df09e84922 ]
+
+Because mlx5e_link_info and mlx5e_ext_link_info have holes
+e.g. Azure mlx5 reports PTYS 19. Do not return it unless speed
+is retrieved successfully.
+
+Fixes: 65a5d35571849 ("net/mlx5: Refactor link speed handling with mlx5_link_info struct")
+Suggested-by: Vitaly Kuznetsov <vkuznets@redhat.com>
+Signed-off-by: Li Tian <litian@redhat.com>
+Reviewed-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/20250910003732.5973-1-litian@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/port.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+index 2d7adf7444ba2..aa9f2b0a77d36 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
+@@ -1170,7 +1170,11 @@ const struct mlx5_link_info *mlx5_port_ptys2info(struct mlx5_core_dev *mdev,
+ mlx5e_port_get_link_mode_info_arr(mdev, &table, &max_size,
+ force_legacy);
+ i = find_first_bit(&temp, max_size);
+- if (i < max_size)
++
++ /* mlx5e_link_info has holes. Check speed
++ * is not zero as indication of one.
++ */
++ if (i < max_size && table[i].speed)
+ return &table[i];
+
+ return NULL;
+--
+2.51.0
+
--- /dev/null
+From 233183e2eb1c3fb9696b3c66f9398df8a8478fa6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 15:24:34 +0300
+Subject: net/mlx5e: Add a miss level for ipsec crypto offload
+
+From: Lama Kayal <lkayal@nvidia.com>
+
+[ Upstream commit 7601a0a46216f4ba05adff2de75923b4e8e585c2 ]
+
+The cited commit adds a miss table for switchdev mode. But it
+uses the same level as policy table. Will hit the following error
+when running command:
+
+ # ip xfrm state add src 192.168.1.22 dst 192.168.1.21 proto \
+ esp spi 1001 reqid 10001 aead 'rfc4106(gcm(aes))' \
+ 0x3a189a7f9374955d3817886c8587f1da3df387ff 128 \
+ mode tunnel offload dev enp8s0f0 dir in
+ Error: mlx5_core: Device failed to offload this state.
+
+The dmesg error is:
+
+ mlx5_core 0000:03:00.0: ipsec_miss_create:578:(pid 311797): fail to create IPsec miss_rule err=-22
+
+Fix it by adding a new miss level to avoid the error.
+
+Fixes: 7d9e292ecd67 ("net/mlx5e: Move IPSec policy check after decryption")
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Signed-off-by: Chris Mi <cmi@nvidia.com>
+Signed-off-by: Lama Kayal <lkayal@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/1757939074-617281-4-git-send-email-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en/fs.h | 1 +
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h | 1 +
+ drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c | 3 ++-
+ drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 4 ++--
+ 4 files changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+index 9560fcba643f5..ac65e31914802 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h
+@@ -92,6 +92,7 @@ enum {
+ MLX5E_ACCEL_FS_ESP_FT_LEVEL = MLX5E_INNER_TTC_FT_LEVEL + 1,
+ MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL,
+ MLX5E_ACCEL_FS_POL_FT_LEVEL,
++ MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL,
+ MLX5E_ACCEL_FS_ESP_FT_ROCE_LEVEL,
+ #endif
+ };
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+index ffcd0cdeb7754..23703f28386ad 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.h
+@@ -185,6 +185,7 @@ struct mlx5e_ipsec_rx_create_attr {
+ u32 family;
+ int prio;
+ int pol_level;
++ int pol_miss_level;
+ int sa_level;
+ int status_level;
+ enum mlx5_flow_namespace_type chains_ns;
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+index 98b6a3a623f99..65dc3529283b6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
+@@ -747,6 +747,7 @@ static void ipsec_rx_create_attr_set(struct mlx5e_ipsec *ipsec,
+ attr->family = family;
+ attr->prio = MLX5E_NIC_PRIO;
+ attr->pol_level = MLX5E_ACCEL_FS_POL_FT_LEVEL;
++ attr->pol_miss_level = MLX5E_ACCEL_FS_POL_MISS_FT_LEVEL;
+ attr->sa_level = MLX5E_ACCEL_FS_ESP_FT_LEVEL;
+ attr->status_level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL;
+ attr->chains_ns = MLX5_FLOW_NAMESPACE_KERNEL;
+@@ -833,7 +834,7 @@ static int ipsec_rx_chains_create_miss(struct mlx5e_ipsec *ipsec,
+
+ ft_attr.max_fte = 1;
+ ft_attr.autogroup.max_num_groups = 1;
+- ft_attr.level = attr->pol_level;
++ ft_attr.level = attr->pol_miss_level;
+ ft_attr.prio = attr->prio;
+
+ ft = mlx5_create_auto_grouped_flow_table(attr->ns, &ft_attr);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 29ce09af59aef..3b57ef6b3de38 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -114,9 +114,9 @@
+ #define ETHTOOL_NUM_PRIOS 11
+ #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
+ /* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy,
+- * {IPsec RoCE MPV,Alias table},IPsec RoCE policy
++ * IPsec policy miss, {IPsec RoCE MPV,Alias table},IPsec RoCE policy
+ */
+-#define KERNEL_NIC_PRIO_NUM_LEVELS 10
++#define KERNEL_NIC_PRIO_NUM_LEVELS 11
+ #define KERNEL_NIC_NUM_PRIOS 1
+ /* One more level for tc, and one more for promisc */
+ #define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2)
+--
+2.51.0
+
--- /dev/null
+From 7621439d884fd1ed80f21e0f5108c4084ca71dd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 15:24:32 +0300
+Subject: net/mlx5e: Harden uplink netdev access against device unbind
+
+From: Jianbo Liu <jianbol@nvidia.com>
+
+[ Upstream commit 6b4be64fd9fec16418f365c2d8e47a7566e9eba5 ]
+
+The function mlx5_uplink_netdev_get() gets the uplink netdevice
+pointer from mdev->mlx5e_res.uplink_netdev. However, the netdevice can
+be removed and its pointer cleared when unbound from the mlx5_core.eth
+driver. This results in a NULL pointer, causing a kernel panic.
+
+ BUG: unable to handle page fault for address: 0000000000001300
+ at RIP: 0010:mlx5e_vport_rep_load+0x22a/0x270 [mlx5_core]
+ Call Trace:
+ <TASK>
+ mlx5_esw_offloads_rep_load+0x68/0xe0 [mlx5_core]
+ esw_offloads_enable+0x593/0x910 [mlx5_core]
+ mlx5_eswitch_enable_locked+0x341/0x420 [mlx5_core]
+ mlx5_devlink_eswitch_mode_set+0x17e/0x3a0 [mlx5_core]
+ devlink_nl_eswitch_set_doit+0x60/0xd0
+ genl_family_rcv_msg_doit+0xe0/0x130
+ genl_rcv_msg+0x183/0x290
+ netlink_rcv_skb+0x4b/0xf0
+ genl_rcv+0x24/0x40
+ netlink_unicast+0x255/0x380
+ netlink_sendmsg+0x1f3/0x420
+ __sock_sendmsg+0x38/0x60
+ __sys_sendto+0x119/0x180
+ do_syscall_64+0x53/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Ensure the pointer is valid before use by checking it for NULL. If it
+is valid, immediately call netdev_hold() to take a reference, and
+preventing the netdevice from being freed while it is in use.
+
+Fixes: 7a9fb35e8c3a ("net/mlx5e: Do not reload ethernet ports when changing eswitch mode")
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/1757939074-617281-2-git-send-email-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en_rep.c | 27 +++++++++++++++----
+ .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 1 +
+ .../ethernet/mellanox/mlx5/core/lib/mlx5.h | 15 ++++++++++-
+ include/linux/mlx5/driver.h | 1 +
+ 4 files changed, 38 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 63a7a788fb0db..cd0242eb008c2 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1506,12 +1506,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
+ static int
+ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+ {
+- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
++ struct net_device *netdev;
++ struct mlx5e_priv *priv;
++ int err;
++
++ netdev = mlx5_uplink_netdev_get(dev);
++ if (!netdev)
++ return 0;
+
++ priv = netdev_priv(netdev);
+ rpriv->netdev = priv->netdev;
+- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+- rpriv);
++ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
++ rpriv);
++ mlx5_uplink_netdev_put(dev, netdev);
++ return err;
+ }
+
+ static void
+@@ -1638,8 +1647,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+ {
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev = rpriv->netdev;
+- struct mlx5e_priv *priv = netdev_priv(netdev);
+- void *ppriv = priv->ppriv;
++ struct mlx5e_priv *priv;
++ void *ppriv;
++
++ if (!netdev) {
++ ppriv = rpriv;
++ goto free_ppriv;
++ }
++
++ priv = netdev_priv(netdev);
++ ppriv = priv->ppriv;
+
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ mlx5e_vport_uplink_rep_unload(rpriv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index ad9f6fca9b6a2..c6476e943e98d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -743,6 +743,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+ speed = lksettings.base.speed;
+
+ out:
++ mlx5_uplink_netdev_put(mdev, slave);
+ return speed;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+index 37d5f445598c7..a7486e6d0d5ef 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+@@ -52,7 +52,20 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+
+ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+ {
+- return mdev->mlx5e_res.uplink_netdev;
++ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
++ struct net_device *netdev;
++
++ mutex_lock(&mlx5e_res->uplink_netdev_lock);
++ netdev = mlx5e_res->uplink_netdev;
++ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
++ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
++ return netdev;
++}
++
++static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
++ struct net_device *netdev)
++{
++ netdev_put(netdev, &mdev->mlx5e_res.tracker);
+ }
+
+ struct mlx5_sd;
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index e6ba8f4f4bd1f..27850ebb651b3 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -662,6 +662,7 @@ struct mlx5e_resources {
+ bool tisn_valid;
+ } hw_objs;
+ struct net_device *uplink_netdev;
++ netdevice_tracker tracker;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
+ };
+--
+2.51.0
+
--- /dev/null
+From e31f1d88de5a75b3a6a4a554620acaa82d549c4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:01:36 +0900
+Subject: net: natsemi: fix `rx_dropped` double accounting on `netif_rx()`
+ failure
+
+From: Yeounsu Moon <yyyynoom@gmail.com>
+
+[ Upstream commit 93ab4881a4e2b9657bdce4b8940073bfb4ed5eab ]
+
+`netif_rx()` already increments `rx_dropped` core stat when it fails.
+The driver was also updating `ndev->stats.rx_dropped` in the same path.
+Since both are reported together via `ip -s -s` command, this resulted
+in drops being counted twice in user-visible stats.
+
+Keep the driver update on `if (unlikely(!skb))`, but skip it after
+`netif_rx()` errors.
+
+Fixes: caf586e5f23c ("net: add a core netdev->rx_dropped counter")
+Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250913060135.35282-3-yyyynoom@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/ns83820.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 56d5464222d97..cdbf82affa7be 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+--
+2.51.0
+
--- /dev/null
+From 5512d85152e6210832f487e8c236327d6eab796c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Sep 2025 20:07:44 -0300
+Subject: net/tcp: Fix a NULL pointer dereference when using TCP-AO with
+ TCP_REPAIR
+
+From: Anderson Nascimento <anderson@allelesecurity.com>
+
+[ Upstream commit 2e7bba08923ebc675b1f0e0e0959e68e53047838 ]
+
+A NULL pointer dereference can occur in tcp_ao_finish_connect() during a
+connect() system call on a socket with a TCP-AO key added and TCP_REPAIR
+enabled.
+
+The function is called with skb being NULL and attempts to dereference it
+on tcp_hdr(skb)->seq without a prior skb validation.
+
+Fix this by checking if skb is NULL before dereferencing it.
+
+The commentary is taken from bpf_skops_established(), which is also called
+in the same flow. Unlike the function being patched,
+bpf_skops_established() validates the skb before dereferencing it.
+
+int main(void){
+ struct sockaddr_in sockaddr;
+ struct tcp_ao_add tcp_ao;
+ int sk;
+ int one = 1;
+
+ memset(&sockaddr,'\0',sizeof(sockaddr));
+ memset(&tcp_ao,'\0',sizeof(tcp_ao));
+
+ sk = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+
+ sockaddr.sin_family = AF_INET;
+
+ memcpy(tcp_ao.alg_name,"cmac(aes128)",12);
+ memcpy(tcp_ao.key,"ABCDEFGHABCDEFGH",16);
+ tcp_ao.keylen = 16;
+
+ memcpy(&tcp_ao.addr,&sockaddr,sizeof(sockaddr));
+
+ setsockopt(sk, IPPROTO_TCP, TCP_AO_ADD_KEY, &tcp_ao,
+ sizeof(tcp_ao));
+ setsockopt(sk, IPPROTO_TCP, TCP_REPAIR, &one, sizeof(one));
+
+ sockaddr.sin_family = AF_INET;
+ sockaddr.sin_port = htobe16(123);
+
+ inet_aton("127.0.0.1", &sockaddr.sin_addr);
+
+ connect(sk,(struct sockaddr *)&sockaddr,sizeof(sockaddr));
+
+return 0;
+}
+
+$ gcc tcp-ao-nullptr.c -o tcp-ao-nullptr -Wall
+$ unshare -Urn
+
+BUG: kernel NULL pointer dereference, address: 00000000000000b6
+PGD 1f648d067 P4D 1f648d067 PUD 1982e8067 PMD 0
+Oops: Oops: 0000 [#1] SMP NOPTI
+Hardware name: VMware, Inc. VMware Virtual Platform/440BX Desktop
+Reference Platform, BIOS 6.00 11/12/2020
+RIP: 0010:tcp_ao_finish_connect (net/ipv4/tcp_ao.c:1182)
+
+Fixes: 7c2ffaf21bd6 ("net/tcp: Calculate TCP-AO traffic keys")
+Signed-off-by: Anderson Nascimento <anderson@allelesecurity.com>
+Reviewed-by: Dmitry Safonov <0x7f454c46@gmail.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250911230743.2551-3-anderson@allelesecurity.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp_ao.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c
+index bbb8d5f0eae7d..3338b6cc85c48 100644
+--- a/net/ipv4/tcp_ao.c
++++ b/net/ipv4/tcp_ao.c
+@@ -1178,7 +1178,9 @@ void tcp_ao_finish_connect(struct sock *sk, struct sk_buff *skb)
+ if (!ao)
+ return;
+
+- WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
++ /* sk with TCP_REPAIR_ON does not have skb in tcp_finish_connect */
++ if (skb)
++ WRITE_ONCE(ao->risn, tcp_hdr(skb)->seq);
+ ao->rcv_sne = 0;
+
+ hlist_for_each_entry_rcu(key, &ao->head, node, lockdep_sock_is_held(sk))
+--
+2.51.0
+
--- /dev/null
+From 1119883f44c9a79f5821422c720d2cf39e743269 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 15:32:49 +0200
+Subject: nvme: fix PI insert on write
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 7ac3c2889bc060c3f67cf44df0dbb093a835c176 ]
+
+I recently ran into an issue where the PI generated using the block layer
+integrity code differs from that from a kernel using the PRACT fallback
+when the block layer integrity code is disabled, and I tracked this down
+to us using PRACT incorrectly.
+
+The NVM Command Set Specification (section 5.33 in 1.2, similar in older
+versions) specifies the PRACT insert behavior as:
+
+ Inserted protection information consists of the computed CRC for the
+ protection information format (refer to section 5.3.1) in the Guard
+ field, the LBAT field value in the Application Tag field, the LBST
+ field value in the Storage Tag field, if defined, and the computed
+ reference tag in the Logical Block Reference Tag.
+
+Where the computed reference tag is defined as following for type 1 and
+type 2 using the text below that is duplicated in the respective bullet
+points:
+
+ the value of the computed reference tag for the first logical block of
+ the command is the value contained in the Initial Logical Block
+ Reference Tag (ILBRT) or Expected Initial Logical Block Reference Tag
+ (EILBRT) field in the command, and the computed reference tag is
+ incremented for each subsequent logical block.
+
+So we need to set ILBRT field, but we currently don't. Interestingly
+this works fine on my older type 1 formatted SSD, but Qemu trips up on
+this. We already set ILBRT for Write Same since commit aeb7bb061be5
+("nvme: set the PRACT bit when using Write Zeroes with T10 PI").
+
+To ease this, move the PI type check into nvme_set_ref_tag.
+
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 895fb163d48e6..5395623d2ba6a 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -903,6 +903,15 @@ static void nvme_set_ref_tag(struct nvme_ns *ns, struct nvme_command *cmnd,
+ u32 upper, lower;
+ u64 ref48;
+
++ /* only type1 and type 2 PI formats have a reftag */
++ switch (ns->head->pi_type) {
++ case NVME_NS_DPS_PI_TYPE1:
++ case NVME_NS_DPS_PI_TYPE2:
++ break;
++ default:
++ return;
++ }
++
+ /* both rw and write zeroes share the same reftag format */
+ switch (ns->head->guard_type) {
+ case NVME_NVM_NS_16B_GUARD:
+@@ -942,13 +951,7 @@ static inline blk_status_t nvme_setup_write_zeroes(struct nvme_ns *ns,
+
+ if (nvme_ns_has_pi(ns->head)) {
+ cmnd->write_zeroes.control |= cpu_to_le16(NVME_RW_PRINFO_PRACT);
+-
+- switch (ns->head->pi_type) {
+- case NVME_NS_DPS_PI_TYPE1:
+- case NVME_NS_DPS_PI_TYPE2:
+- nvme_set_ref_tag(ns, cmnd, req);
+- break;
+- }
++ nvme_set_ref_tag(ns, cmnd, req);
+ }
+
+ return BLK_STS_OK;
+@@ -1039,6 +1042,7 @@ static inline blk_status_t nvme_setup_rw(struct nvme_ns *ns,
+ if (WARN_ON_ONCE(!nvme_ns_has_pi(ns->head)))
+ return BLK_STS_NOTSUPP;
+ control |= NVME_RW_PRINFO_PRACT;
++ nvme_set_ref_tag(ns, cmnd, req);
+ }
+
+ if (bio_integrity_flagged(req->bio, BIP_CHECK_GUARD))
+--
+2.51.0
+
--- /dev/null
+From 202e62f9bbca84fb0f9aeae7debd9dbb2bdf9333 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 06:32:07 -0700
+Subject: octeon_ep: fix VF MAC address lifecycle handling
+
+From: Sathesh B Edara <sedara@marvell.com>
+
+[ Upstream commit a72175c985132885573593222a7b088cf49b07ae ]
+
+Currently, VF MAC address info is not updated when the MAC address is
+configured from VF, and it is not cleared when the VF is removed. This
+leads to stale or missing MAC information in the PF, which may cause
+incorrect state tracking or inconsistencies when VFs are hot-plugged
+or reassigned.
+
+Fix this by:
+ - storing the VF MAC address in the PF when it is set from VF
+ - clearing the stored VF MAC address when the VF is removed
+
+This ensures that the PF always has correct VF MAC state.
+
+Fixes: cde29af9e68e ("octeon_ep: add PF-VF mailbox communication")
+Signed-off-by: Sathesh B Edara <sedara@marvell.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250916133207.21737-1-sedara@marvell.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+index ebecdd29f3bd0..0867fab61b190 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_pfvf_mbox.c
+@@ -196,6 +196,7 @@ static void octep_pfvf_get_mac_addr(struct octep_device *oct, u32 vf_id,
+ vf_id);
+ return;
+ }
++ ether_addr_copy(oct->vf_info[vf_id].mac_addr, rsp->s_set_mac.mac_addr);
+ rsp->s_set_mac.type = OCTEP_PFVF_MBOX_TYPE_RSP_ACK;
+ }
+
+@@ -205,6 +206,8 @@ static void octep_pfvf_dev_remove(struct octep_device *oct, u32 vf_id,
+ {
+ int err;
+
++ /* Reset VF-specific information maintained by the PF */
++ memset(&oct->vf_info[vf_id], 0, sizeof(struct octep_pfvf_info));
+ err = octep_ctrl_net_dev_remove(oct, vf_id);
+ if (err) {
+ rsp->s.type = OCTEP_PFVF_MBOX_TYPE_RSP_NACK;
+--
+2.51.0
+
--- /dev/null
+From 6c2e1ec973720aea0c68465333b554892f682539 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Sep 2025 18:36:10 -0400
+Subject: octeon_ep: Validate the VF ID
+
+From: Kamal Heib <kheib@redhat.com>
+
+[ Upstream commit af82e857df5dd883a4867bcaf5dde041e57a4e33 ]
+
+Add a helper to validate the VF ID and use it in the VF ndo ops to
+prevent accessing out-of-range entries.
+
+Without this check, users can run commands such as:
+
+ # ip link show dev enp135s0
+ 2: enp135s0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP mode DEFAULT group default qlen 1000
+ link/ether 00:00:00:01:01:00 brd ff:ff:ff:ff:ff:ff
+ vf 0 link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state enable, trust off
+ vf 1 link/ether 00:00:00:00:00:00 brd ff:ff:ff:ff:ff:ff, spoof checking on, link-state enable, trust off
+ # ip link set dev enp135s0 vf 4 mac 00:00:00:00:00:14
+ # echo $?
+ 0
+
+even though VF 4 does not exist, which results in silent success instead
+of returning an error.
+
+Fixes: 8a241ef9b9b8 ("octeon_ep: add ndo ops for VFs in PF driver")
+Signed-off-by: Kamal Heib <kheib@redhat.com>
+Reviewed-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250911223610.1803144-1-kheib@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/marvell/octeon_ep/octep_main.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+index 24499bb36c005..bcea3fc26a8c7 100644
+--- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
++++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c
+@@ -1124,11 +1124,24 @@ static int octep_set_features(struct net_device *dev, netdev_features_t features
+ return err;
+ }
+
++static bool octep_is_vf_valid(struct octep_device *oct, int vf)
++{
++ if (vf >= CFG_GET_ACTIVE_VFS(oct->conf)) {
++ netdev_err(oct->netdev, "Invalid VF ID %d\n", vf);
++ return false;
++ }
++
++ return true;
++}
++
+ static int octep_get_vf_config(struct net_device *dev, int vf,
+ struct ifla_vf_info *ivi)
+ {
+ struct octep_device *oct = netdev_priv(dev);
+
++ if (!octep_is_vf_valid(oct, vf))
++ return -EINVAL;
++
+ ivi->vf = vf;
+ ether_addr_copy(ivi->mac, oct->vf_info[vf].mac_addr);
+ ivi->spoofchk = true;
+@@ -1143,6 +1156,9 @@ static int octep_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+ struct octep_device *oct = netdev_priv(dev);
+ int err;
+
++ if (!octep_is_vf_valid(oct, vf))
++ return -EINVAL;
++
+ if (!is_valid_ether_addr(mac)) {
+ dev_err(&oct->pdev->dev, "Invalid MAC Address %pM\n", mac);
+ return -EADDRNOTAVAIL;
+--
+2.51.0
+
--- /dev/null
+From 1e85cb0c26e2704641de0203ee141e1cbf51f5d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 14:38:53 +0800
+Subject: octeontx2-pf: Fix use-after-free bugs in otx2_sync_tstamp()
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit f8b4687151021db61841af983f1cb7be6915d4ef ]
+
+The original code relies on cancel_delayed_work() in otx2_ptp_destroy(),
+which does not ensure that the delayed work item synctstamp_work has fully
+completed if it was already running. This leads to use-after-free scenarios
+where otx2_ptp is deallocated by otx2_ptp_destroy(), while synctstamp_work
+remains active and attempts to dereference otx2_ptp in otx2_sync_tstamp().
+Furthermore, the synctstamp_work is cyclic, the likelihood of triggering
+the bug is nonnegligible.
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+otx2_remove() |
+ otx2_ptp_destroy() | otx2_sync_tstamp()
+ cancel_delayed_work() |
+ kfree(ptp) |
+ | ptp = container_of(...); //UAF
+ | ptp-> //UAF
+
+This is confirmed by a KASAN report:
+
+BUG: KASAN: slab-use-after-free in __run_timer_base.part.0+0x7d7/0x8c0
+Write of size 8 at addr ffff88800aa09a18 by task bash/136
+...
+Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x55/0x70
+ print_report+0xcf/0x610
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ kasan_report+0xb8/0xf0
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ __run_timer_base.part.0+0x7d7/0x8c0
+ ? __pfx___run_timer_base.part.0+0x10/0x10
+ ? __pfx_read_tsc+0x10/0x10
+ ? ktime_get+0x60/0x140
+ ? lapic_next_event+0x11/0x20
+ ? clockevents_program_event+0x1d4/0x2a0
+ run_timer_softirq+0xd1/0x190
+ handle_softirqs+0x16a/0x550
+ irq_exit_rcu+0xaf/0xe0
+ sysvec_apic_timer_interrupt+0x70/0x80
+ </IRQ>
+...
+Allocated by task 1:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ __kasan_kmalloc+0x7f/0x90
+ otx2_ptp_init+0xb1/0x860
+ otx2_probe+0x4eb/0xc30
+ local_pci_probe+0xdc/0x190
+ pci_device_probe+0x2fe/0x470
+ really_probe+0x1ca/0x5c0
+ __driver_probe_device+0x248/0x310
+ driver_probe_device+0x44/0x120
+ __driver_attach+0xd2/0x310
+ bus_for_each_dev+0xed/0x170
+ bus_add_driver+0x208/0x500
+ driver_register+0x132/0x460
+ do_one_initcall+0x89/0x300
+ kernel_init_freeable+0x40d/0x720
+ kernel_init+0x1a/0x150
+ ret_from_fork+0x10c/0x1a0
+ ret_from_fork_asm+0x1a/0x30
+
+Freed by task 136:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ kasan_save_free_info+0x3a/0x60
+ __kasan_slab_free+0x3f/0x50
+ kfree+0x137/0x370
+ otx2_ptp_destroy+0x38/0x80
+ otx2_remove+0x10d/0x4c0
+ pci_device_remove+0xa6/0x1d0
+ device_release_driver_internal+0xf8/0x210
+ pci_stop_bus_device+0x105/0x150
+ pci_stop_and_remove_bus_device_locked+0x15/0x30
+ remove_store+0xcc/0xe0
+ kernfs_fop_write_iter+0x2c3/0x440
+ vfs_write+0x871/0xd70
+ ksys_write+0xee/0x1c0
+ do_syscall_64+0xac/0x280
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+...
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the delayed work item is properly canceled before the otx2_ptp is
+deallocated.
+
+This bug was initially identified through static analysis. To reproduce
+and test it, I simulated the OcteonTX2 PCI device in QEMU and introduced
+artificial delays within the otx2_sync_tstamp() function to increase the
+likelihood of triggering the bug.
+
+Fixes: 2958d17a8984 ("octeontx2-pf: Add support for ptp 1-step mode on CN10K silicon")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+index 63130ba37e9df..69b435ed8fbbe 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+@@ -491,7 +491,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
+ if (!ptp)
+ return;
+
+- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
++ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+--
+2.51.0
+
--- /dev/null
+From ed3447d37ad35079545370105c5b55daee2d46a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:50:14 +0200
+Subject: pcmcia: omap_cf: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit d1dfcdd30140c031ae091868fb5bed084132bca1 ]
+
+As described in the added code comment, a reference to .exit.text is ok
+for drivers registered via platform_driver_probe(). Make this explicit
+to prevent the following section mismatch warning
+
+ WARNING: modpost: drivers/pcmcia/omap_cf: section mismatch in reference: omap_cf_driver+0x4 (section: .data) -> omap_cf_remove (section: .exit.text)
+
+that triggers on an omap1_defconfig + CONFIG_OMAP_CF=m build.
+
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/omap_cf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index 441cdf83f5a44..d6f24c7d15622 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -304,7 +304,13 @@ static void __exit omap_cf_remove(struct platform_device *pdev)
+ kfree(cf);
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = driver_name,
+ },
+--
+2.51.0
+
--- /dev/null
+From 3f2b55e319369c9e2582e572850555c514d4093c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 14 Sep 2025 11:18:08 -0700
+Subject: perf maps: Ensure kmap is set up for all inserts
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 20c9ccffccd61b37325a0519fb6d485caeecf7fa ]
+
+__maps__fixup_overlap_and_insert may split or directly insert a map,
+when doing this the map may need to have a kmap set up for the sake of
+the kmaps. The missing kmap set up fails the check_invariants test in
+maps, later "Internal error" reports from map__kmap and ultimately
+causes segfaults.
+
+Similar fixes were added in commit e0e4e0b8b7fa ("perf maps: Add
+missing map__set_kmap_maps() when replacing a kernel map") and commit
+25d9c0301d36 ("perf maps: Set the kmaps for newly created/added kernel
+maps") but they missed cases. To try to reduce the risk of this,
+update the kmap directly following any manual insert. This identified
+another problem in maps__copy_from.
+
+Fixes: e0e4e0b8b7fa ("perf maps: Add missing map__set_kmap_maps() when replacing a kernel map")
+Fixes: 25d9c0301d36 ("perf maps: Set the kmaps for newly created/added kernel maps")
+Signed-off-by: Ian Rogers <irogers@google.com>
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/maps.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/tools/perf/util/maps.c b/tools/perf/util/maps.c
+index 85b2a93a59ac6..779f6230130af 100644
+--- a/tools/perf/util/maps.c
++++ b/tools/perf/util/maps.c
+@@ -477,6 +477,7 @@ static int __maps__insert(struct maps *maps, struct map *new)
+ }
+ /* Insert the value at the end. */
+ maps_by_address[nr_maps] = map__get(new);
++ map__set_kmap_maps(new, maps);
+ if (maps_by_name)
+ maps_by_name[nr_maps] = map__get(new);
+
+@@ -502,8 +503,6 @@ static int __maps__insert(struct maps *maps, struct map *new)
+ if (map__end(new) < map__start(new))
+ RC_CHK_ACCESS(maps)->ends_broken = true;
+
+- map__set_kmap_maps(new, maps);
+-
+ return 0;
+ }
+
+@@ -891,6 +890,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
+ if (before) {
+ map__put(maps_by_address[i]);
+ maps_by_address[i] = before;
++ map__set_kmap_maps(before, maps);
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+@@ -918,6 +918,7 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
+ */
+ map__put(maps_by_address[i]);
+ maps_by_address[i] = map__get(new);
++ map__set_kmap_maps(new, maps);
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+@@ -942,14 +943,13 @@ static int __maps__fixup_overlap_and_insert(struct maps *maps, struct map *new)
+ */
+ map__put(maps_by_address[i]);
+ maps_by_address[i] = map__get(new);
++ map__set_kmap_maps(new, maps);
+
+ if (maps_by_name) {
+ map__put(maps_by_name[ni]);
+ maps_by_name[ni] = map__get(new);
+ }
+
+- map__set_kmap_maps(new, maps);
+-
+ check_invariants(maps);
+ return err;
+ }
+@@ -1019,6 +1019,7 @@ int maps__copy_from(struct maps *dest, struct maps *parent)
+ err = unwind__prepare_access(dest, new, NULL);
+ if (!err) {
+ dest_maps_by_address[i] = new;
++ map__set_kmap_maps(new, dest);
+ if (dest_maps_by_name)
+ dest_maps_by_name[i] = map__get(new);
+ RC_CHK_ACCESS(dest)->nr_maps = i + 1;
+--
+2.51.0
+
--- /dev/null
+From 698af8cda8f553b8b1cf4c347c9f0f7015a9d06c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 16:29:16 +1000
+Subject: qed: Don't collect too many protection override GRC elements
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit 56c0a2a9ddc2f5b5078c5fb0f81ab76bbc3d4c37 ]
+
+In the protection override dump path, the firmware can return far too
+many GRC elements, resulting in attempting to write past the end of the
+previously-kmalloc'ed dump buffer.
+
+This will result in a kernel panic with reason:
+
+ BUG: unable to handle kernel paging request at ADDRESS
+
+where "ADDRESS" is just past the end of the protection override dump
+buffer. The start address of the buffer is:
+ p_hwfn->cdev->dbg_features[DBG_FEATURE_PROTECTION_OVERRIDE].dump_buf
+and the size of the buffer is buf_size in the same data structure.
+
+The panic can be arrived at from either the qede Ethernet driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc02662ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc0267792 [qed]
+ qed_dbg_feature at ffffffffc026aa8f [qed]
+ qed_dbg_all_data at ffffffffc026b211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc027298a [qed]
+ devlink_health_do_dump at ffffffff82497f61
+ devlink_health_report at ffffffff8249cf29
+ qed_report_fatal_error at ffffffffc0272baf [qed]
+ qede_sp_task at ffffffffc045ed32 [qede]
+ process_one_work at ffffffff81d19783
+
+or the qedf storage driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc068b2ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc068c792 [qed]
+ qed_dbg_feature at ffffffffc068fa8f [qed]
+ qed_dbg_all_data at ffffffffc0690211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc069798a [qed]
+ devlink_health_do_dump at ffffffff8aa95e51
+ devlink_health_report at ffffffff8aa9ae19
+ qed_report_fatal_error at ffffffffc0697baf [qed]
+ qed_hw_err_notify at ffffffffc06d32d7 [qed]
+ qed_spq_post at ffffffffc06b1011 [qed]
+ qed_fcoe_destroy_conn at ffffffffc06b2e91 [qed]
+ qedf_cleanup_fcport at ffffffffc05e7597 [qedf]
+ qedf_rport_event_handler at ffffffffc05e7bf7 [qedf]
+ fc_rport_work at ffffffffc02da715 [libfc]
+ process_one_work at ffffffff8a319663
+
+Resolve this by clamping the firmware's return value to the maximum
+number of legal elements the firmware should return.
+
+Fixes: d52c89f120de8 ("qed*: Utilize FW 8.37.2.0")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Link: https://patch.msgid.link/f8e1182934aa274c18d0682a12dbaf347595469c.1757485536.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_debug.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index 9c3d3dd2f8475..1f0cea3cae92f 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4462,10 +4462,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ goto out;
+ }
+
+- /* Add override window info to buffer */
++ /* Add override window info to buffer, preventing buffer overflow */
+ override_window_dwords =
+- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++ PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+--
+2.51.0
+
--- /dev/null
+From 3e037c01b0d4a7008776c7985fb4a96acbab6d55 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 16:48:54 +0300
+Subject: Revert "net/mlx5e: Update and set Xon/Xoff upon port speed set"
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit 3fbfe251cc9f6d391944282cdb9bcf0bd02e01f8 ]
+
+This reverts commit d24341740fe48add8a227a753e68b6eedf4b385a.
+It causes errors when trying to configure QoS, as well as
+loss of L2 connectivity (on multi-host devices).
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20250910170011.70528106@kernel.org
+Fixes: d24341740fe4 ("net/mlx5e: Update and set Xon/Xoff upon port speed set")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index e39c51cfc8e6c..f0142d32b648f 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -136,8 +136,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (up) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+--
+2.51.0
+
--- /dev/null
+From 317691c22c4f6e4aa6200c80c0610a986f4026b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 11 Sep 2025 23:58:16 +0100
+Subject: rxrpc: Fix unhandled errors in rxgk_verify_packet_integrity()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 64863f4ca4945bdb62ce2b30823f39ea9fe95415 ]
+
+rxgk_verify_packet_integrity() may get more errors than just -EPROTO from
+rxgk_verify_mic_skb(). Pretty much anything other than -ENOMEM constitutes
+an unrecoverable error. In the case of -ENOMEM, we can just drop the
+packet and wait for a retransmission.
+
+Similar happens with rxgk_decrypt_skb() and its callers.
+
+Fix rxgk_decrypt_skb() or rxgk_verify_mic_skb() to return a greater variety
+of abort codes and fix their callers to abort the connection on any error
+apart from -ENOMEM.
+
+Also preclear the variables used to hold the abort code returned from
+rxgk_decrypt_skb() or rxgk_verify_mic_skb() to eliminate uninitialised
+variable warnings.
+
+Fixes: 9d1d2b59341f ("rxrpc: rxgk: Implement the yfs-rxgk security class (GSSAPI)")
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lists.infradead.org/pipermail/linux-afs/2025-April/009739.html
+Closes: https://lists.infradead.org/pipermail/linux-afs/2025-April/009740.html
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/2038804.1757631496@warthog.procyon.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/rxgk.c | 18 ++++++++++--------
+ net/rxrpc/rxgk_app.c | 10 ++++++----
+ net/rxrpc/rxgk_common.h | 14 ++++++++++++--
+ 3 files changed, 28 insertions(+), 14 deletions(-)
+
+diff --git a/net/rxrpc/rxgk.c b/net/rxrpc/rxgk.c
+index 1e19c605bcc82..dce5a3d8a964f 100644
+--- a/net/rxrpc/rxgk.c
++++ b/net/rxrpc/rxgk.c
+@@ -475,7 +475,7 @@ static int rxgk_verify_packet_integrity(struct rxrpc_call *call,
+ struct krb5_buffer metadata;
+ unsigned int offset = sp->offset, len = sp->len;
+ size_t data_offset = 0, data_len = len;
+- u32 ac;
++ u32 ac = 0;
+ int ret = -ENOMEM;
+
+ _enter("");
+@@ -499,9 +499,10 @@ static int rxgk_verify_packet_integrity(struct rxrpc_call *call,
+ ret = rxgk_verify_mic_skb(gk->krb5, gk->rx_Kc, &metadata,
+ skb, &offset, &len, &ac);
+ kfree(hdr);
+- if (ret == -EPROTO) {
+- rxrpc_abort_eproto(call, skb, ac,
+- rxgk_abort_1_verify_mic_eproto);
++ if (ret < 0) {
++ if (ret != -ENOMEM)
++ rxrpc_abort_eproto(call, skb, ac,
++ rxgk_abort_1_verify_mic_eproto);
+ } else {
+ sp->offset = offset;
+ sp->len = len;
+@@ -524,15 +525,16 @@ static int rxgk_verify_packet_encrypted(struct rxrpc_call *call,
+ struct rxgk_header hdr;
+ unsigned int offset = sp->offset, len = sp->len;
+ int ret;
+- u32 ac;
++ u32 ac = 0;
+
+ _enter("");
+
+ ret = rxgk_decrypt_skb(gk->krb5, gk->rx_enc, skb, &offset, &len, &ac);
+- if (ret == -EPROTO)
+- rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto);
+- if (ret < 0)
++ if (ret < 0) {
++ if (ret != -ENOMEM)
++ rxrpc_abort_eproto(call, skb, ac, rxgk_abort_2_decrypt_eproto);
+ goto error;
++ }
+
+ if (len < sizeof(hdr)) {
+ ret = rxrpc_abort_eproto(call, skb, RXGK_PACKETSHORT,
+diff --git a/net/rxrpc/rxgk_app.c b/net/rxrpc/rxgk_app.c
+index b94b77a1c3178..df684b5a85314 100644
+--- a/net/rxrpc/rxgk_app.c
++++ b/net/rxrpc/rxgk_app.c
+@@ -187,7 +187,7 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb,
+ struct key *server_key;
+ unsigned int ticket_offset, ticket_len;
+ u32 kvno, enctype;
+- int ret, ec;
++ int ret, ec = 0;
+
+ struct {
+ __be32 kvno;
+@@ -236,9 +236,11 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb,
+ &ticket_offset, &ticket_len, &ec);
+ crypto_free_aead(token_enc);
+ token_enc = NULL;
+- if (ret < 0)
+- return rxrpc_abort_conn(conn, skb, ec, ret,
+- rxgk_abort_resp_tok_dec);
++ if (ret < 0) {
++ if (ret != -ENOMEM)
++ return rxrpc_abort_conn(conn, skb, ec, ret,
++ rxgk_abort_resp_tok_dec);
++ }
+
+ ret = conn->security->default_decode_ticket(conn, skb, ticket_offset,
+ ticket_len, _key);
+diff --git a/net/rxrpc/rxgk_common.h b/net/rxrpc/rxgk_common.h
+index 7370a56559853..80164d89e19c0 100644
+--- a/net/rxrpc/rxgk_common.h
++++ b/net/rxrpc/rxgk_common.h
+@@ -88,11 +88,16 @@ int rxgk_decrypt_skb(const struct krb5_enctype *krb5,
+ *_offset += offset;
+ *_len = len;
+ break;
++ case -EBADMSG: /* Checksum mismatch. */
+ case -EPROTO:
+- case -EBADMSG:
+ *_error_code = RXGK_SEALEDINCON;
+ break;
++ case -EMSGSIZE:
++ *_error_code = RXGK_PACKETSHORT;
++ break;
++ case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */
+ default:
++ *_error_code = RXGK_INCONSISTENCY;
+ break;
+ }
+
+@@ -127,11 +132,16 @@ int rxgk_verify_mic_skb(const struct krb5_enctype *krb5,
+ *_offset += offset;
+ *_len = len;
+ break;
++ case -EBADMSG: /* Checksum mismatch */
+ case -EPROTO:
+- case -EBADMSG:
+ *_error_code = RXGK_SEALEDINCON;
+ break;
++ case -EMSGSIZE:
++ *_error_code = RXGK_PACKETSHORT;
++ break;
++ case -ENOPKG: /* Would prefer RXGK_BADETYPE, but not available for YFS. */
+ default:
++ *_error_code = RXGK_INCONSISTENCY;
+ break;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 15eceb655233ec0d5a74ffc31d086368dfc99981 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 00:06:17 +0100
+Subject: rxrpc: Fix untrusted unsigned subtract
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 2429a197648178cd4dc930a9d87c13c547460564 ]
+
+Fix the following Smatch static checker warning:
+
+ net/rxrpc/rxgk_app.c:65 rxgk_yfs_decode_ticket()
+ warn: untrusted unsigned subtract. 'ticket_len - 10 * 4'
+
+by prechecking the length of what we're trying to extract in two places in
+the token and decoding for a response packet.
+
+Also use sizeof() on the struct we're extracting rather specifying the size
+numerically to be consistent with the other related statements.
+
+Fixes: 9d1d2b59341f ("rxrpc: rxgk: Implement the yfs-rxgk security class (GSSAPI)")
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lists.infradead.org/pipermail/linux-afs/2025-September/010135.html
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Marc Dionne <marc.dionne@auristor.com>
+cc: linux-afs@lists.infradead.org
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/2039268.1757631977@warthog.procyon.org.uk
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/rxrpc/rxgk_app.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/net/rxrpc/rxgk_app.c b/net/rxrpc/rxgk_app.c
+index df684b5a85314..30275cb5ba3e2 100644
+--- a/net/rxrpc/rxgk_app.c
++++ b/net/rxrpc/rxgk_app.c
+@@ -54,6 +54,10 @@ int rxgk_yfs_decode_ticket(struct rxrpc_connection *conn, struct sk_buff *skb,
+
+ _enter("");
+
++ if (ticket_len < 10 * sizeof(__be32))
++ return rxrpc_abort_conn(conn, skb, RXGK_INCONSISTENCY, -EPROTO,
++ rxgk_abort_resp_short_yfs_tkt);
++
+ /* Get the session key length */
+ ret = skb_copy_bits(skb, ticket_offset, tmp, sizeof(tmp));
+ if (ret < 0)
+@@ -195,22 +199,23 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb,
+ __be32 token_len;
+ } container;
+
++ if (token_len < sizeof(container))
++ goto short_packet;
++
+ /* Decode the RXGK_TokenContainer object. This tells us which server
+ * key we should be using. We can then fetch the key, get the secret
+ * and set up the crypto to extract the token.
+ */
+ if (skb_copy_bits(skb, token_offset, &container, sizeof(container)) < 0)
+- return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
+- rxgk_abort_resp_tok_short);
++ goto short_packet;
+
+ kvno = ntohl(container.kvno);
+ enctype = ntohl(container.enctype);
+ ticket_len = ntohl(container.token_len);
+ ticket_offset = token_offset + sizeof(container);
+
+- if (xdr_round_up(ticket_len) > token_len - 3 * 4)
+- return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
+- rxgk_abort_resp_tok_short);
++ if (xdr_round_up(ticket_len) > token_len - sizeof(container))
++ goto short_packet;
+
+ _debug("KVNO %u", kvno);
+ _debug("ENC %u", enctype);
+@@ -285,4 +290,8 @@ int rxgk_extract_token(struct rxrpc_connection *conn, struct sk_buff *skb,
+ * also come out this way if the ticket decryption fails.
+ */
+ return ret;
++
++short_packet:
++ return rxrpc_abort_conn(conn, skb, RXGK_PACKETSHORT, -EPROTO,
++ rxgk_abort_resp_tok_short);
+ }
+--
+2.51.0
+
--- /dev/null
+From e929a18b72d5ff8f7016e5f4f8d9f68f2d069127 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:24 +0200
+Subject: selftests: mptcp: sockopt: fix error messages
+
+From: Geliang Tang <tanggeliang@kylinos.cn>
+
+[ Upstream commit b86418beade11d45540a2d20c4ec1128849b6c27 ]
+
+This patch fixes several issues in the error reporting of the MPTCP sockopt
+selftest:
+
+1. Fix diff not printed: The error messages for counter mismatches had
+ the actual difference ('diff') as argument, but it was missing in the
+ format string. Displaying it makes the debugging easier.
+
+2. Fix variable usage: The error check for 'mptcpi_bytes_acked' incorrectly
+ used 'ret2' (sent bytes) for both the expected value and the difference
+ calculation. It now correctly uses 'ret' (received bytes), which is the
+ expected value for bytes_acked.
+
+3. Fix off-by-one in diff: The calculation for the 'mptcpi_rcv_delta' diff
+ was 's.mptcpi_rcv_delta - ret', which is off-by-one. It has been
+ corrected to 's.mptcpi_rcv_delta - (ret + 1)' to match the expected
+ value in the condition above it.
+
+Fixes: 5dcff89e1455 ("selftests: mptcp: explicitly tests aggregate counters")
+Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-5-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../testing/selftests/net/mptcp/mptcp_sockopt.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+index e934dd26a59d9..112c07c4c37a3 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+@@ -667,22 +667,26 @@ static void process_one_client(int fd, int pipefd)
+
+ do_getsockopts(&s, fd, ret, ret2);
+ if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
+- xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
++ xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64,
++ s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1));
+
+ /* be nice when running on top of older kernel */
+ if (s.pkt_stats_avail) {
+ if (s.last_sample.mptcpi_bytes_sent != ret2)
+- xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
++ xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_sent, ret2,
+ s.last_sample.mptcpi_bytes_sent - ret2);
+ if (s.last_sample.mptcpi_bytes_received != ret)
+- xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
++ xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_received, ret,
+ s.last_sample.mptcpi_bytes_received - ret);
+ if (s.last_sample.mptcpi_bytes_acked != ret)
+- xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
+- s.last_sample.mptcpi_bytes_acked, ret2,
+- s.last_sample.mptcpi_bytes_acked - ret2);
++ xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
++ s.last_sample.mptcpi_bytes_acked, ret,
++ s.last_sample.mptcpi_bytes_acked - ret);
+ }
+
+ close(fd);
+--
+2.51.0
+
--- /dev/null
+From 4c438b0ad5b22f4ca8d382da2585829490f6ef42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:22 +0200
+Subject: selftests: mptcp: userspace pm: validate deny-join-id0 flag
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 24733e193a0d68f20d220e86da0362460c9aa812 ]
+
+The previous commit adds the MPTCP_PM_EV_FLAG_DENY_JOIN_ID0 flag. Make
+sure it is correctly announced by the other peer when it has been
+received.
+
+pm_nl_ctl will now display 'deny_join_id0:1' when monitoring the events,
+and when this flag was set by the other peer.
+
+The 'Fixes' tag here below is the same as the one from the previous
+commit: this patch here is not fixing anything wrong in the selftests,
+but it validates the previous fix for an issue introduced by this commit
+ID.
+
+Fixes: 702c2f646d42 ("mptcp: netlink: allow userspace-driven subflow establishment")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-3-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/mptcp/pm_nl_ctl.c | 7 +++++++
+ tools/testing/selftests/net/mptcp/userspace_pm.sh | 14 +++++++++++---
+ 2 files changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+index 994a556f46c15..93fea3442216c 100644
+--- a/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
++++ b/tools/testing/selftests/net/mptcp/pm_nl_ctl.c
+@@ -188,6 +188,13 @@ static int capture_events(int fd, int event_group)
+ fprintf(stderr, ",error:%u", *(__u8 *)RTA_DATA(attrs));
+ else if (attrs->rta_type == MPTCP_ATTR_SERVER_SIDE)
+ fprintf(stderr, ",server_side:%u", *(__u8 *)RTA_DATA(attrs));
++ else if (attrs->rta_type == MPTCP_ATTR_FLAGS) {
++ __u16 flags = *(__u16 *)RTA_DATA(attrs);
++
++ /* only print when present, easier */
++ if (flags & MPTCP_PM_EV_FLAG_DENY_JOIN_ID0)
++ fprintf(stderr, ",deny_join_id0:1");
++ }
+
+ attrs = RTA_NEXT(attrs, msg_len);
+ }
+diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+index 333064b0b5ac0..97819e18578f4 100755
+--- a/tools/testing/selftests/net/mptcp/userspace_pm.sh
++++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh
+@@ -201,6 +201,9 @@ make_connection()
+ is_v6="v4"
+ fi
+
++ # set this on the client side only: will not affect the rest
++ ip netns exec "$ns2" sysctl -q net.mptcp.allow_join_initial_addr_port=0
++
+ :>"$client_evts"
+ :>"$server_evts"
+
+@@ -223,23 +226,28 @@ make_connection()
+ local client_token
+ local client_port
+ local client_serverside
++ local client_nojoin
+ local server_token
+ local server_serverside
++ local server_nojoin
+
+ client_token=$(mptcp_lib_evts_get_info token "$client_evts")
+ client_port=$(mptcp_lib_evts_get_info sport "$client_evts")
+ client_serverside=$(mptcp_lib_evts_get_info server_side "$client_evts")
++ client_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$client_evts")
+ server_token=$(mptcp_lib_evts_get_info token "$server_evts")
+ server_serverside=$(mptcp_lib_evts_get_info server_side "$server_evts")
++ server_nojoin=$(mptcp_lib_evts_get_info deny_join_id0 "$server_evts")
+
+ print_test "Established IP${is_v6} MPTCP Connection ns2 => ns1"
+- if [ "$client_token" != "" ] && [ "$server_token" != "" ] && [ "$client_serverside" = 0 ] &&
+- [ "$server_serverside" = 1 ]
++ if [ "${client_token}" != "" ] && [ "${server_token}" != "" ] &&
++ [ "${client_serverside}" = 0 ] && [ "${server_serverside}" = 1 ] &&
++ [ "${client_nojoin:-0}" = 0 ] && [ "${server_nojoin:-0}" = 1 ]
+ then
+ test_pass
+ print_title "Connection info: ${client_addr}:${client_port} -> ${connect_addr}:${app_port}"
+ else
+- test_fail "Expected tokens (c:${client_token} - s:${server_token}) and server (c:${client_serverside} - s:${server_serverside})"
++ test_fail "Expected tokens (c:${client_token} - s:${server_token}), server (c:${client_serverside} - s:${server_serverside}), nojoin (c:${client_nojoin} - s:${server_nojoin})"
+ mptcp_lib_result_print_all_tap
+ exit ${KSFT_FAIL}
+ fi
+--
+2.51.0
+
--- /dev/null
+cgroup-split-cgroup_destroy_wq-into-3-workqueues.patch
+btrfs-fix-invalid-extref-key-setup-when-replaying-de.patch
+btrfs-zoned-fix-incorrect-assert-in-btrfs_zoned_rese.patch
+perf-maps-ensure-kmap-is-set-up-for-all-inserts.patch
+wifi-wilc1000-avoid-buffer-overflow-in-wid-string-co.patch
+nvme-fix-pi-insert-on-write.patch
+alsa-firewire-motu-drop-epollout-from-poll-return-va.patch
+wifi-mt76-do-not-add-non-sta-wcid-entries-to-the-pol.patch
+wifi-mac80211-increase-scan_ies_len-for-s1g.patch
+wifi-mac80211-fix-incorrect-type-for-ret.patch
+pcmcia-omap_cf-mark-driver-struct-with-__refdata-to-.patch
+smb-server-let-smb_direct_writev-respect-smb_direct_.patch
+um-virtio_uml-fix-use-after-free-after-put_device-in.patch
+um-fix-fd-copy-size-in-os_rcv_fd_msg.patch
+net-mlx5-not-returning-mlx5_link_info-table-when-spe.patch
+dpaa2-switch-fix-buffer-pool-seeding-for-control-tra.patch
+net-tcp-fix-a-null-pointer-dereference-when-using-tc.patch
+dpll-fix-clock-quality-level-reporting.patch
+rxrpc-fix-unhandled-errors-in-rxgk_verify_packet_int.patch
+rxrpc-fix-untrusted-unsigned-subtract.patch
+octeon_ep-validate-the-vf-id.patch
+qed-don-t-collect-too-many-protection-override-grc-e.patch
+net-dst_metadata-fix-ip_df-bit-not-extracted-from-tu.patch
+bonding-set-random-address-only-when-slaves-already-.patch
+mptcp-set-remote_deny_join_id0-on-syn-recv.patch
+selftests-mptcp-userspace-pm-validate-deny-join-id0-.patch
+mptcp-tfo-record-deny-join-id0-info.patch
+selftests-mptcp-sockopt-fix-error-messages.patch
+net-natsemi-fix-rx_dropped-double-accounting-on-neti.patch
+ice-fix-rx-page-leak-on-multi-buffer-frames.patch
+i40e-remove-redundant-memory-barrier-when-cleaning-t.patch
+ixgbe-initialize-aci.lock-before-it-s-used.patch
+ixgbe-destroy-aci.lock-later-within-ixgbe_remove-pat.patch
+igc-don-t-fail-igc_probe-on-led-setup-error.patch
+doc-netlink-fix-typos-in-operation-attributes.patch
+net-mlx5e-harden-uplink-netdev-access-against-device.patch
+net-mlx5e-add-a-miss-level-for-ipsec-crypto-offload.patch
+bonding-don-t-set-oif-to-bond-dev-when-getting-ns-ta.patch
+octeon_ep-fix-vf-mac-address-lifecycle-handling.patch
+tcp-clear-tcp_sk-sk-fastopen_rsk-in-tcp_disconnect.patch
+tls-make-sure-to-abort-the-stream-if-headers-are-bog.patch
+revert-net-mlx5e-update-and-set-xon-xoff-upon-port-s.patch
+net-clear-sk-sk_ino-in-sk_set_socket-sk-null.patch
+net-liquidio-fix-overflow-in-octeon_init_instr_queue.patch
+cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
+octeontx2-pf-fix-use-after-free-bugs-in-otx2_sync_ts.patch
--- /dev/null
+From 282b361faeb468cd7aee94aa2ce1967955c5ff91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Sep 2025 13:06:41 +0200
+Subject: smb: server: let smb_direct_writev() respect SMB_DIRECT_MAX_SEND_SGES
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit d162694037215fe25f1487999c58d70df809a2fd ]
+
+We should not use more sges for ib_post_send() than we told the rdma
+device in rdma_create_qp()!
+
+Otherwise ib_post_send() will return -EINVAL, so we disconnect the
+connection. Or with the current siw.ko we'll get 0 from ib_post_send(),
+but will never ever get a completion for the request. I've already sent a
+fix for siw.ko...
+
+So we need to make sure smb_direct_writev() limits the number of vectors
+we pass to individual smb_direct_post_send_data() calls, so that we
+don't go over the queue pair limits.
+
+Commit 621433b7e25d ("ksmbd: smbd: relax the count of sges required")
+was very strange and I guess only needed because
+SMB_DIRECT_MAX_SEND_SGES was 8 at that time. It basically removed the
+check that the rdma device is able to handle the number of sges we try
+to use.
+
+While the real problem was added by commit ddbdc861e37c ("ksmbd: smbd:
+introduce read/write credits for RDMA read/write") as it used the
+minumun of device->attrs.max_send_sge and device->attrs.max_sge_rd, with
+the problem that device->attrs.max_sge_rd is always 1 for iWarp. And
+that limitation should only apply to RDMA Read operations. For now we
+keep that limitation for RDMA Write operations too, fixing that is a
+task for another day as it's not really required a bug fix.
+
+Commit 2b4eeeaa9061 ("ksmbd: decrease the number of SMB3 smbdirect
+server SGEs") lowered SMB_DIRECT_MAX_SEND_SGES to 6, which is also used
+by our client code. And that client code enforces
+device->attrs.max_send_sge >= 6 since commit d2e81f92e5b7 ("Decrease the
+number of SMB3 smbdirect client SGEs") and (briefly looking) only the
+i40w driver provides only 3, see I40IW_MAX_WQ_FRAGMENT_COUNT. But
+currently we'd require 4 anyway, so that would not work anyway, but now
+it fails early.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Cc: linux-rdma@vger.kernel.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Fixes: ddbdc861e37c ("ksmbd: smbd: introduce read/write credits for RDMA read/write")
+Fixes: 621433b7e25d ("ksmbd: smbd: relax the count of sges required")
+Fixes: 2b4eeeaa9061 ("ksmbd: decrease the number of SMB3 smbdirect server SGEs")
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 157 ++++++++++++++++++++++-----------
+ 1 file changed, 107 insertions(+), 50 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 5466aa8c39b1c..cc4322bfa1d61 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -1209,78 +1209,130 @@ static int smb_direct_writev(struct ksmbd_transport *t,
+ bool need_invalidate, unsigned int remote_key)
+ {
+ struct smb_direct_transport *st = smb_trans_direct_transfort(t);
+- int remaining_data_length;
+- int start, i, j;
+- int max_iov_size = st->max_send_size -
++ size_t remaining_data_length;
++ size_t iov_idx;
++ size_t iov_ofs;
++ size_t max_iov_size = st->max_send_size -
+ sizeof(struct smb_direct_data_transfer);
+ int ret;
+- struct kvec vec;
+ struct smb_direct_send_ctx send_ctx;
++ int error = 0;
+
+ if (st->status != SMB_DIRECT_CS_CONNECTED)
+ return -ENOTCONN;
+
+ //FIXME: skip RFC1002 header..
++ if (WARN_ON_ONCE(niovs <= 1 || iov[0].iov_len != 4))
++ return -EINVAL;
+ buflen -= 4;
++ iov_idx = 1;
++ iov_ofs = 0;
+
+ remaining_data_length = buflen;
+ ksmbd_debug(RDMA, "Sending smb (RDMA): smb_len=%u\n", buflen);
+
+ smb_direct_send_ctx_init(st, &send_ctx, need_invalidate, remote_key);
+- start = i = 1;
+- buflen = 0;
+- while (true) {
+- buflen += iov[i].iov_len;
+- if (buflen > max_iov_size) {
+- if (i > start) {
+- remaining_data_length -=
+- (buflen - iov[i].iov_len);
+- ret = smb_direct_post_send_data(st, &send_ctx,
+- &iov[start], i - start,
+- remaining_data_length);
+- if (ret)
++ while (remaining_data_length) {
++ struct kvec vecs[SMB_DIRECT_MAX_SEND_SGES - 1]; /* minus smbdirect hdr */
++ size_t possible_bytes = max_iov_size;
++ size_t possible_vecs;
++ size_t bytes = 0;
++ size_t nvecs = 0;
++
++ /*
++ * For the last message remaining_data_length should be
++ * have been 0 already!
++ */
++ if (WARN_ON_ONCE(iov_idx >= niovs)) {
++ error = -EINVAL;
++ goto done;
++ }
++
++ /*
++ * We have 2 factors which limit the arguments we pass
++ * to smb_direct_post_send_data():
++ *
++ * 1. The number of supported sges for the send,
++ * while one is reserved for the smbdirect header.
++ * And we currently need one SGE per page.
++ * 2. The number of negotiated payload bytes per send.
++ */
++ possible_vecs = min_t(size_t, ARRAY_SIZE(vecs), niovs - iov_idx);
++
++ while (iov_idx < niovs && possible_vecs && possible_bytes) {
++ struct kvec *v = &vecs[nvecs];
++ int page_count;
++
++ v->iov_base = ((u8 *)iov[iov_idx].iov_base) + iov_ofs;
++ v->iov_len = min_t(size_t,
++ iov[iov_idx].iov_len - iov_ofs,
++ possible_bytes);
++ page_count = get_buf_page_count(v->iov_base, v->iov_len);
++ if (page_count > possible_vecs) {
++ /*
++ * If the number of pages in the buffer
++ * is to much (because we currently require
++ * one SGE per page), we need to limit the
++ * length.
++ *
++ * We know possible_vecs is at least 1,
++ * so we always keep the first page.
++ *
++ * We need to calculate the number extra
++ * pages (epages) we can also keep.
++ *
++ * We calculate the number of bytes in the
++ * first page (fplen), this should never be
++ * larger than v->iov_len because page_count is
++ * at least 2, but adding a limitation feels
++ * better.
++ *
++ * Then we calculate the number of bytes (elen)
++ * we can keep for the extra pages.
++ */
++ size_t epages = possible_vecs - 1;
++ size_t fpofs = offset_in_page(v->iov_base);
++ size_t fplen = min_t(size_t, PAGE_SIZE - fpofs, v->iov_len);
++ size_t elen = min_t(size_t, v->iov_len - fplen, epages*PAGE_SIZE);
++
++ v->iov_len = fplen + elen;
++ page_count = get_buf_page_count(v->iov_base, v->iov_len);
++ if (WARN_ON_ONCE(page_count > possible_vecs)) {
++ /*
++ * Something went wrong in the above
++ * logic...
++ */
++ error = -EINVAL;
+ goto done;
+- } else {
+- /* iov[start] is too big, break it */
+- int nvec = (buflen + max_iov_size - 1) /
+- max_iov_size;
+-
+- for (j = 0; j < nvec; j++) {
+- vec.iov_base =
+- (char *)iov[start].iov_base +
+- j * max_iov_size;
+- vec.iov_len =
+- min_t(int, max_iov_size,
+- buflen - max_iov_size * j);
+- remaining_data_length -= vec.iov_len;
+- ret = smb_direct_post_send_data(st, &send_ctx, &vec, 1,
+- remaining_data_length);
+- if (ret)
+- goto done;
+ }
+- i++;
+- if (i == niovs)
+- break;
+ }
+- start = i;
+- buflen = 0;
+- } else {
+- i++;
+- if (i == niovs) {
+- /* send out all remaining vecs */
+- remaining_data_length -= buflen;
+- ret = smb_direct_post_send_data(st, &send_ctx,
+- &iov[start], i - start,
+- remaining_data_length);
+- if (ret)
+- goto done;
+- break;
++ possible_vecs -= page_count;
++ nvecs += 1;
++ possible_bytes -= v->iov_len;
++ bytes += v->iov_len;
++
++ iov_ofs += v->iov_len;
++ if (iov_ofs >= iov[iov_idx].iov_len) {
++ iov_idx += 1;
++ iov_ofs = 0;
+ }
+ }
++
++ remaining_data_length -= bytes;
++
++ ret = smb_direct_post_send_data(st, &send_ctx,
++ vecs, nvecs,
++ remaining_data_length);
++ if (unlikely(ret)) {
++ error = ret;
++ goto done;
++ }
+ }
+
+ done:
+ ret = smb_direct_flush_send_list(st, &send_ctx, true);
++ if (unlikely(!ret && error))
++ ret = error;
+
+ /*
+ * As an optimization, we don't wait for individual I/O to finish
+@@ -1744,6 +1796,11 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
+ return -EINVAL;
+ }
+
++ if (device->attrs.max_send_sge < SMB_DIRECT_MAX_SEND_SGES) {
++ pr_err("warning: device max_send_sge = %d too small\n",
++ device->attrs.max_send_sge);
++ return -EINVAL;
++ }
+ if (device->attrs.max_recv_sge < SMB_DIRECT_MAX_RECV_SGES) {
+ pr_err("warning: device max_recv_sge = %d too small\n",
+ device->attrs.max_recv_sge);
+@@ -1767,7 +1824,7 @@ static int smb_direct_init_params(struct smb_direct_transport *t,
+
+ cap->max_send_wr = max_send_wrs;
+ cap->max_recv_wr = t->recv_credit_max;
+- cap->max_send_sge = max_sge_per_wr;
++ cap->max_send_sge = SMB_DIRECT_MAX_SEND_SGES;
+ cap->max_recv_sge = SMB_DIRECT_MAX_RECV_SGES;
+ cap->max_inline_data = 0;
+ cap->max_rdma_ctxs = t->max_rw_credits;
+--
+2.51.0
+
--- /dev/null
+From a7302ecdc69840138ea91d7ebcfc4edc44c7fc9d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 17:56:46 +0000
+Subject: tcp: Clear tcp_sk(sk)->fastopen_rsk in tcp_disconnect().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 45c8a6cc2bcd780e634a6ba8e46bffbdf1fc5c01 ]
+
+syzbot reported the splat below where a socket had tcp_sk(sk)->fastopen_rsk
+in the TCP_ESTABLISHED state. [0]
+
+syzbot reused the server-side TCP Fast Open socket as a new client before
+the TFO socket completes 3WHS:
+
+ 1. accept()
+ 2. connect(AF_UNSPEC)
+ 3. connect() to another destination
+
+As of accept(), sk->sk_state is TCP_SYN_RECV, and tcp_disconnect() changes
+it to TCP_CLOSE and makes connect() possible, which restarts timers.
+
+Since tcp_disconnect() forgot to clear tcp_sk(sk)->fastopen_rsk, the
+retransmit timer triggered the warning and the intended packet was not
+retransmitted.
+
+Let's call reqsk_fastopen_remove() in tcp_disconnect().
+
+[0]:
+WARNING: CPU: 2 PID: 0 at net/ipv4/tcp_timer.c:542 tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Modules linked in:
+CPU: 2 UID: 0 PID: 0 Comm: swapper/2 Not tainted 6.17.0-rc5-g201825fb4278 #62 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Code: 41 55 41 54 55 53 48 8b af b8 08 00 00 48 89 fb 48 85 ed 0f 84 55 01 00 00 0f b6 47 12 3c 03 74 0c 0f b6 47 12 3c 04 74 04 90 <0f> 0b 90 48 8b 85 c0 00 00 00 48 89 ef 48 8b 40 30 e8 6a 4f 06 3e
+RSP: 0018:ffffc900002f8d40 EFLAGS: 00010293
+RAX: 0000000000000002 RBX: ffff888106911400 RCX: 0000000000000017
+RDX: 0000000002517619 RSI: ffffffff83764080 RDI: ffff888106911400
+RBP: ffff888106d5c000 R08: 0000000000000001 R09: ffffc900002f8de8
+R10: 00000000000000c2 R11: ffffc900002f8ff8 R12: ffff888106911540
+R13: ffff888106911480 R14: ffff888106911840 R15: ffffc900002f8de0
+FS: 0000000000000000(0000) GS:ffff88907b768000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8044d69d90 CR3: 0000000002c30003 CR4: 0000000000370ef0
+Call Trace:
+ <IRQ>
+ tcp_write_timer (net/ipv4/tcp_timer.c:738)
+ call_timer_fn (kernel/time/timer.c:1747)
+ __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
+ timer_expire_remote (kernel/time/timer.c:2385 kernel/time/timer.c:2376 kernel/time/timer.c:2135)
+ tmigr_handle_remote_up (kernel/time/timer_migration.c:944 kernel/time/timer_migration.c:1035)
+ __walk_groups.isra.0 (kernel/time/timer_migration.c:533 (discriminator 1))
+ tmigr_handle_remote (kernel/time/timer_migration.c:1096)
+ handle_softirqs (./arch/x86/include/asm/jump_label.h:36 ./include/trace/events/irq.h:142 kernel/softirq.c:580)
+ irq_exit_rcu (kernel/softirq.c:614 kernel/softirq.c:453 kernel/softirq.c:680 kernel/softirq.c:696)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 (discriminator 35) arch/x86/kernel/apic/apic.c:1050 (discriminator 35))
+ </IRQ>
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250915175800.118793-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 461a9ab540af0..98da33e0c308b 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3330,6 +3330,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ if (old_state != TCP_CLOSE)
+@@ -3445,6 +3446,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet_clear_bit(DEFER_CONNECT, sk);
+ tp->fastopen_client_fail = 0;
+--
+2.51.0
+
--- /dev/null
+From 5cbff25bc4d0d84138659d4afaaa01a3116cbee1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 17:28:13 -0700
+Subject: tls: make sure to abort the stream if headers are bogus
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0aeb54ac4cd5cf8f60131b4d9ec0b6dc9c27b20d ]
+
+Normally we wait for the socket to buffer up the whole record
+before we service it. If the socket has a tiny buffer, however,
+we read out the data sooner, to prevent connection stalls.
+Make sure that we abort the connection when we find out late
+that the record is actually invalid. Retrying the parsing is
+fine in itself but since we copy some more data each time
+before we parse we can overflow the allocated skb space.
+
+Constructing a scenario in which we're under pressure without
+enough data in the socket to parse the length upfront is quite
+hard. syzbot figured out a way to do this by serving us the header
+in small OOB sends, and then filling in the recvbuf with a large
+normal send.
+
+Make sure that tls_rx_msg_size() aborts strp, if we reach
+an invalid record there's really no way to recover.
+
+Reported-by: Lee Jones <lee@kernel.org>
+Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser")
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://patch.msgid.link/20250917002814.1743558-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls.h | 1 +
+ net/tls/tls_strp.c | 14 +++++++++-----
+ net/tls/tls_sw.c | 3 +--
+ 3 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 4e077068e6d98..e4c42731ce39a 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -141,6 +141,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+
+ int wait_on_pending_writer(struct sock *sk, long *timeo);
+ void tls_err_abort(struct sock *sk, int err);
++void tls_strp_abort_strp(struct tls_strparser *strp, int err);
+
+ int init_prot_info(struct tls_prot_info *prot,
+ const struct tls_crypto_info *crypto_info,
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index d71643b494a1a..98e12f0ff57e5 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -13,7 +13,7 @@
+
+ static struct workqueue_struct *tls_strp_wq;
+
+-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
++void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+ {
+ if (strp->stopped)
+ return;
+@@ -211,11 +211,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+ struct sk_buff *in_skb, unsigned int offset,
+ size_t in_len)
+ {
++ unsigned int nfrag = skb->len / PAGE_SIZE;
+ size_t len, chunk;
+ skb_frag_t *frag;
+ int sz;
+
+- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
++ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
++ return -EMSGSIZE;
++ }
++
++ frag = &skb_shinfo(skb)->frags[nfrag];
+
+ len = in_len;
+ /* First make sure we got the header */
+@@ -520,10 +526,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ tls_strp_load_anchor_with_queue(strp, inq);
+ if (!strp->stm.full_len) {
+ sz = tls_rx_msg_size(strp, strp->anchor);
+- if (sz < 0) {
+- tls_strp_abort_strp(strp, sz);
++ if (sz < 0)
+ return sz;
+- }
+
+ strp->stm.full_len = sz;
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index bac65d0d4e3e1..daac9fd4be7eb 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2474,8 +2474,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
+ return data_len + TLS_HEADER_SIZE;
+
+ read_failure:
+- tls_err_abort(strp->sk, ret);
+-
++ tls_strp_abort_strp(strp, ret);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 1a0eec03366ad0fa40f3077a9bef92643c837b3f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Sep 2025 08:27:15 +0800
+Subject: um: Fix FD copy size in os_rcv_fd_msg()
+
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+
+[ Upstream commit df447a3b4a4b961c9979b4b3ffb74317394b9b40 ]
+
+When copying FDs, the copy size should not include the control
+message header (cmsghdr). Fix it.
+
+Fixes: 5cde6096a4dd ("um: generalize os_rcv_fd")
+Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/os-Linux/file.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/um/os-Linux/file.c b/arch/um/os-Linux/file.c
+index 617886d1fb1e9..21f0e50fb1df9 100644
+--- a/arch/um/os-Linux/file.c
++++ b/arch/um/os-Linux/file.c
+@@ -535,7 +535,7 @@ ssize_t os_rcv_fd_msg(int fd, int *fds, unsigned int n_fds,
+ cmsg->cmsg_type != SCM_RIGHTS)
+ return n;
+
+- memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len);
++ memcpy(fds, CMSG_DATA(cmsg), cmsg->cmsg_len - CMSG_LEN(0));
+ return n;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From 7277b2bae4099f83d048dbc1f97e779968225e3a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 15:00:51 +0800
+Subject: um: virtio_uml: Fix use-after-free after put_device in probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0 ]
+
+When register_virtio_device() fails in virtio_uml_probe(),
+the code sets vu_dev->registered = 1 even though
+the device was not successfully registered.
+This can lead to use-after-free or other issues.
+
+Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/virtio_uml.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index ad8d78fb1d9aa..de7867ae220d0 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1250,10 +1250,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+
+ rc = register_virtio_device(&vu_dev->vdev);
+- if (rc)
++ if (rc) {
+ put_device(&vu_dev->vdev.dev);
++ return rc;
++ }
+ vu_dev->registered = 1;
+- return rc;
++ return 0;
+
+ error_init:
+ os_close_file(vu_dev->sock);
+--
+2.51.0
+
--- /dev/null
+From 7023c83bda8604a7d13f00c476f04b3d9dd070be Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 10:29:11 +0800
+Subject: wifi: mac80211: fix incorrect type for ret
+
+From: Liao Yuanhong <liaoyuanhong@vivo.com>
+
+[ Upstream commit a33b375ab5b3a9897a0ab76be8258d9f6b748628 ]
+
+The variable ret is declared as a u32 type, but it is assigned a value
+of -EOPNOTSUPP. Since unsigned types cannot correctly represent negative
+values, the type of ret should be changed to int.
+
+Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
+Link: https://patch.msgid.link/20250825022911.139377-1-liaoyuanhong@vivo.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/driver-ops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index 307587c8a0037..7964a7c5f0b2b 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1389,7 +1389,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ might_sleep();
+ lockdep_assert_wiphy(local->hw.wiphy);
+--
+2.51.0
+
--- /dev/null
+From 637e5dae90bd2a7609eae8cf69adb880db52e55d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Aug 2025 18:54:37 +1000
+Subject: wifi: mac80211: increase scan_ies_len for S1G
+
+From: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+
+[ Upstream commit 7e2f3213e85eba00acb4cfe6d71647892d63c3a1 ]
+
+Currently the S1G capability element is not taken into account
+for the scan_ies_len, which leads to a buffer length validation
+failure in ieee80211_prep_hw_scan() and subsequent WARN in
+__ieee80211_start_scan(). This prevents hw scanning from functioning.
+To fix ensure we accommodate for the S1G capability length.
+
+Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+Link: https://patch.msgid.link/20250826085437.3493-1-lachlan.hodges@morsemicro.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 1bad353d8a772..35c6755b817a8 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -1136,7 +1136,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ int result, i;
+ enum nl80211_band band;
+ int channels, max_bitrates;
+- bool supp_ht, supp_vht, supp_he, supp_eht;
++ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
+ struct cfg80211_chan_def dflt_chandef = {};
+
+ if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
+@@ -1252,6 +1252,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ supp_vht = false;
+ supp_he = false;
+ supp_eht = false;
++ supp_s1g = false;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ const struct ieee80211_sband_iftype_data *iftd;
+ struct ieee80211_supported_band *sband;
+@@ -1299,6 +1300,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ max_bitrates = sband->n_bitrates;
+ supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
++ supp_s1g = supp_s1g || sband->s1g_cap.s1g;
+
+ for_each_sband_iftype_data(sband, i, iftd) {
+ u8 he_40_mhz_cap;
+@@ -1432,6 +1434,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_cap);
+
++ if (supp_s1g)
++ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
++
+ /*
+ * HE cap element is variable in size - set len to allow max size */
+ if (supp_he) {
+--
+2.51.0
+
--- /dev/null
+From 756a1a0a8ab62831430d24e3c70ff952f02d9833 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 27 Aug 2025 10:53:48 +0200
+Subject: wifi: mt76: do not add non-sta wcid entries to the poll list
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit a3c99ef88a084e1c2b99dd56bbfa7f89c9be3e92 ]
+
+Polling and airtime reporting is valid for station entries only
+
+Link: https://patch.msgid.link/20250827085352.51636-2-nbd@nbd.name
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/mac80211.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c
+index 8e6ce16ab5b88..c9e2dca308312 100644
+--- a/drivers/net/wireless/mediatek/mt76/mac80211.c
++++ b/drivers/net/wireless/mediatek/mt76/mac80211.c
+@@ -1731,7 +1731,7 @@ EXPORT_SYMBOL_GPL(mt76_wcid_cleanup);
+
+ void mt76_wcid_add_poll(struct mt76_dev *dev, struct mt76_wcid *wcid)
+ {
+- if (test_bit(MT76_MCU_RESET, &dev->phy.state))
++ if (test_bit(MT76_MCU_RESET, &dev->phy.state) || !wcid->sta)
+ return;
+
+ spin_lock_bh(&dev->sta_poll_lock);
+--
+2.51.0
+
--- /dev/null
+From 408f4284e548c32204be8717ea6b7ce42abca627 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Aug 2025 22:58:43 +0000
+Subject: wifi: wilc1000: avoid buffer overflow in WID string configuration
+
+From: Ajay.Kathat@microchip.com <Ajay.Kathat@microchip.com>
+
+[ Upstream commit fe9e4d0c39311d0f97b024147a0d155333f388b5 ]
+
+Fix the following copy overflow warning identified by Smatch checker.
+
+ drivers/net/wireless/microchip/wilc1000/wlan_cfg.c:184 wilc_wlan_parse_response_frame()
+ error: '__memcpy()' 'cfg->s[i]->str' copy overflow (512 vs 65537)
+
+This patch introduces size check before accessing the memory buffer.
+The checks are base on the WID type of received data from the firmware.
+For WID string configuration, the size limit is determined by individual
+element size in 'struct wilc_cfg_str_vals' that is maintained in 'len' field
+of 'struct wilc_cfg_str'.
+
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/linux-wireless/aLFbr9Yu9j_TQTey@stanley.mountain
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Ajay Singh <ajay.kathat@microchip.com>
+Link: https://patch.msgid.link/20250829225829.5423-1-ajay.kathat@microchip.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../wireless/microchip/wilc1000/wlan_cfg.c | 37 ++++++++++++++-----
+ .../wireless/microchip/wilc1000/wlan_cfg.h | 5 ++-
+ 2 files changed, 30 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+index 131388886acbf..cfabd5aebb540 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = {
+ };
+
+ static const struct wilc_cfg_str g_cfg_str[] = {
+- {WID_FIRMWARE_VERSION, NULL},
+- {WID_MAC_ADDR, NULL},
+- {WID_ASSOC_RES_INFO, NULL},
+- {WID_NIL, NULL}
++ {WID_FIRMWARE_VERSION, 0, NULL},
++ {WID_MAC_ADDR, 0, NULL},
++ {WID_ASSOC_RES_INFO, 0, NULL},
++ {WID_NIL, 0, NULL}
+ };
+
+ #define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
+@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
+
+ switch (FIELD_GET(WILC_WID_TYPE, wid)) {
+ case WID_CHAR:
++ len = 3;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
+ i++;
+
+ if (cfg->b[i].id == wid)
+ cfg->b[i].val = info[4];
+
+- len = 3;
+ break;
+
+ case WID_SHORT:
++ len = 4;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
+ i++;
+
+ if (cfg->hw[i].id == wid)
+ cfg->hw[i].val = get_unaligned_le16(&info[4]);
+
+- len = 4;
+ break;
+
+ case WID_INT:
++ len = 6;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
+ i++;
+
+ if (cfg->w[i].id == wid)
+ cfg->w[i].val = get_unaligned_le32(&info[4]);
+
+- len = 6;
+ break;
+
+ case WID_STR:
++ len = 2 + get_unaligned_le16(&info[2]);
++
+ while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
+ i++;
+
+- if (cfg->s[i].id == wid)
++ if (cfg->s[i].id == wid) {
++ if (len > cfg->s[i].len || (len + 2 > size))
++ return;
++
+ memcpy(cfg->s[i].str, &info[2],
+- get_unaligned_le16(&info[2]) + 2);
++ len);
++ }
+
+- len = 2 + get_unaligned_le16(&info[2]);
+ break;
+
+ default:
+@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl)
+ /* store the string cfg parameters */
+ wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
+ wl->cfg.s[i].str = str_vals->firmware_version;
++ wl->cfg.s[i].len = sizeof(str_vals->firmware_version);
+ i++;
+ wl->cfg.s[i].id = WID_MAC_ADDR;
+ wl->cfg.s[i].str = str_vals->mac_address;
++ wl->cfg.s[i].len = sizeof(str_vals->mac_address);
+ i++;
+ wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
+ wl->cfg.s[i].str = str_vals->assoc_rsp;
++ wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp);
+ i++;
+ wl->cfg.s[i].id = WID_NIL;
+ wl->cfg.s[i].str = NULL;
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+index 7038b74f8e8ff..5ae74bced7d74 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+@@ -24,12 +24,13 @@ struct wilc_cfg_word {
+
+ struct wilc_cfg_str {
+ u16 id;
++ u16 len;
+ u8 *str;
+ };
+
+ struct wilc_cfg_str_vals {
+- u8 mac_address[7];
+- u8 firmware_version[129];
++ u8 mac_address[8];
++ u8 firmware_version[130];
+ u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
+ };
+
+--
+2.51.0
+
--- /dev/null
+From 69a2d7069933b0b222f3f6fca55d45394743c3cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Aug 2025 08:37:49 +0900
+Subject: ALSA: firewire-motu: drop EPOLLOUT from poll return values as write
+ is not supported
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit aea3493246c474bc917d124d6fb627663ab6bef0 ]
+
+The ALSA HwDep character device of the firewire-motu driver incorrectly
+returns EPOLLOUT in poll(2), even though the driver implements no operation
+for write(2). This misleads userspace applications to believe write() is
+allowed, potentially resulting in unnecessarily wakeups.
+
+This issue dates back to the driver's initial code added by a commit
+71c3797779d3 ("ALSA: firewire-motu: add hwdep interface"), and persisted
+when POLLOUT was updated to EPOLLOUT by a commit a9a08845e9ac ('vfs: do
+bulk POLL* -> EPOLL* replacement("").').
+
+This commit fixes the bug.
+
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://patch.msgid.link/20250829233749.366222-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/motu/motu-hwdep.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/firewire/motu/motu-hwdep.c b/sound/firewire/motu/motu-hwdep.c
+index 88d1f4b56e4be..a220ac0c8eb83 100644
+--- a/sound/firewire/motu/motu-hwdep.c
++++ b/sound/firewire/motu/motu-hwdep.c
+@@ -111,7 +111,7 @@ static __poll_t hwdep_poll(struct snd_hwdep *hwdep, struct file *file,
+ events = 0;
+ spin_unlock_irq(&motu->lock);
+
+- return events | EPOLLOUT;
++ return events;
+ }
+
+ static int hwdep_get_info(struct snd_motu *motu, void __user *arg)
+--
+2.51.0
+
--- /dev/null
+From b2391f1a13751051b3f0d21b3ecfe3b77b2e845f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 08:01:26 +0000
+Subject: bonding: don't set oif to bond dev when getting NS target destination
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit a8ba87f04ca9cdec06776ce92dce1395026dc3bb ]
+
+Unlike IPv4, IPv6 routing strictly requires the source address to be valid
+on the outgoing interface. If the NS target is set to a remote VLAN interface,
+and the source address is also configured on a VLAN over a bond interface,
+setting the oif to the bond device will fail to retrieve the correct
+destination route.
+
+Fix this by not setting the oif to the bond device when retrieving the NS
+target destination. This allows the correct destination device (the VLAN
+interface) to be determined, so that bond_verify_device_path can return the
+proper VLAN tags for sending NS messages.
+
+Reported-by: David Wilder <wilder@us.ibm.com>
+Closes: https://lore.kernel.org/netdev/aGOKggdfjv0cApTO@fedora/
+Suggested-by: Jay Vosburgh <jv@jvosburgh.net>
+Tested-by: David Wilder <wilder@us.ibm.com>
+Acked-by: Jay Vosburgh <jv@jvosburgh.net>
+Fixes: 4e24be018eb9 ("bonding: add new parameter ns_targets")
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250916080127.430626-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index 291f33c772161..f7ed129fc8110 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -3261,7 +3261,6 @@ static void bond_ns_send_all(struct bonding *bond, struct slave *slave)
+ /* Find out through which dev should the packet go */
+ memset(&fl6, 0, sizeof(struct flowi6));
+ fl6.daddr = targets[i];
+- fl6.flowi6_oif = bond->dev->ifindex;
+
+ dst = ip6_route_output(dev_net(bond->dev), NULL, &fl6);
+ if (dst->error) {
+--
+2.51.0
+
--- /dev/null
+From a01ca9cb55e1ea68e6eab3483b4b2bf4abeeb56c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 02:43:34 +0000
+Subject: bonding: set random address only when slaves already exist
+
+From: Hangbin Liu <liuhangbin@gmail.com>
+
+[ Upstream commit 35ae4e86292ef7dfe4edbb9942955c884e984352 ]
+
+After commit 5c3bf6cba791 ("bonding: assign random address if device
+address is same as bond"), bonding will erroneously randomize the MAC
+address of the first interface added to the bond if fail_over_mac =
+follow.
+
+Correct this by additionally testing for the bond being empty before
+randomizing the MAC.
+
+Fixes: 5c3bf6cba791 ("bonding: assign random address if device address is same as bond")
+Reported-by: Qiuling Ren <qren@redhat.com>
+Signed-off-by: Hangbin Liu <liuhangbin@gmail.com>
+Link: https://patch.msgid.link/20250910024336.400253-1-liuhangbin@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/bonding/bond_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
+index cd5691ed9f171..291f33c772161 100644
+--- a/drivers/net/bonding/bond_main.c
++++ b/drivers/net/bonding/bond_main.c
+@@ -2042,6 +2042,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev,
+ memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len);
+ } else if (bond->params.fail_over_mac == BOND_FOM_FOLLOW &&
+ BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
++ bond_has_slaves(bond) &&
+ memcmp(slave_dev->dev_addr, bond_dev->dev_addr, bond_dev->addr_len) == 0) {
+ /* Set slave to random address to avoid duplicate mac
+ * address in later fail over.
+--
+2.51.0
+
--- /dev/null
+From 603b0f4b241635e2875367e0e961826f56ce9c34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 3 Sep 2025 16:53:21 +0100
+Subject: btrfs: fix invalid extref key setup when replaying dentry
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit b62fd63ade7cb573b114972ef8f9fa505be8d74a ]
+
+The offset for an extref item's key is not the object ID of the parent
+dir, otherwise we would not need the extref item and would use plain ref
+items. Instead the offset is the result of a hash computation that uses
+the object ID of the parent dir and the name associated to the entry.
+So fix this by setting the key offset at replay_one_name() to be the
+result of calling btrfs_extref_hash().
+
+Fixes: 725af92a6251 ("btrfs: Open-code name_in_log_ref in replay_one_name")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/tree-log.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
+index e5d6bc1bb5e5d..4b53e19f7520f 100644
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -1998,7 +1998,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans,
+
+ search_key.objectid = log_key.objectid;
+ search_key.type = BTRFS_INODE_EXTREF_KEY;
+- search_key.offset = key->objectid;
++ search_key.offset = btrfs_extref_hash(key->objectid, name.name, name.len);
+ ret = backref_in_log(root->log_root, &search_key, key->objectid, &name);
+ if (ret < 0) {
+ goto out;
+--
+2.51.0
+
--- /dev/null
+From 267ea81b1371d3cf99533bcdc9f5b32b5b98a893 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Aug 2025 01:07:24 +0000
+Subject: cgroup: split cgroup_destroy_wq into 3 workqueues
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Chen Ridong <chenridong@huawei.com>
+
+[ Upstream commit 79f919a89c9d06816dbdbbd168fa41d27411a7f9 ]
+
+A hung task can occur during [1] LTP cgroup testing when repeatedly
+mounting/unmounting perf_event and net_prio controllers with
+systemd.unified_cgroup_hierarchy=1. The hang manifests in
+cgroup_lock_and_drain_offline() during root destruction.
+
+Related case:
+cgroup_fj_function_perf_event cgroup_fj_function.sh perf_event
+cgroup_fj_function_net_prio cgroup_fj_function.sh net_prio
+
+Call Trace:
+ cgroup_lock_and_drain_offline+0x14c/0x1e8
+ cgroup_destroy_root+0x3c/0x2c0
+ css_free_rwork_fn+0x248/0x338
+ process_one_work+0x16c/0x3b8
+ worker_thread+0x22c/0x3b0
+ kthread+0xec/0x100
+ ret_from_fork+0x10/0x20
+
+Root Cause:
+
+CPU0 CPU1
+mount perf_event umount net_prio
+cgroup1_get_tree cgroup_kill_sb
+rebind_subsystems // root destruction enqueues
+ // cgroup_destroy_wq
+// kill all perf_event css
+ // one perf_event css A is dying
+ // css A offline enqueues cgroup_destroy_wq
+ // root destruction will be executed first
+ css_free_rwork_fn
+ cgroup_destroy_root
+ cgroup_lock_and_drain_offline
+ // some perf descendants are dying
+ // cgroup_destroy_wq max_active = 1
+ // waiting for css A to die
+
+Problem scenario:
+1. CPU0 mounts perf_event (rebind_subsystems)
+2. CPU1 unmounts net_prio (cgroup_kill_sb), queuing root destruction work
+3. A dying perf_event CSS gets queued for offline after root destruction
+4. Root destruction waits for offline completion, but offline work is
+ blocked behind root destruction in cgroup_destroy_wq (max_active=1)
+
+Solution:
+Split cgroup_destroy_wq into three dedicated workqueues:
+cgroup_offline_wq – Handles CSS offline operations
+cgroup_release_wq – Manages resource release
+cgroup_free_wq – Performs final memory deallocation
+
+This separation eliminates blocking in the CSS free path while waiting for
+offline operations to complete.
+
+[1] https://github.com/linux-test-project/ltp/blob/master/runtest/controllers
+Fixes: 334c3679ec4b ("cgroup: reimplement rebind_subsystems() using cgroup_apply_control() and friends")
+Reported-by: Gao Yingjie <gaoyingjie@uniontech.com>
+Signed-off-by: Chen Ridong <chenridong@huawei.com>
+Suggested-by: Teju Heo <tj@kernel.org>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/cgroup/cgroup.c | 43 +++++++++++++++++++++++++++++++++++-------
+ 1 file changed, 36 insertions(+), 7 deletions(-)
+
+diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
+index e8ef062f6ca05..5135838b5899f 100644
+--- a/kernel/cgroup/cgroup.c
++++ b/kernel/cgroup/cgroup.c
+@@ -123,8 +123,31 @@ DEFINE_PERCPU_RWSEM(cgroup_threadgroup_rwsem);
+ * of concurrent destructions. Use a separate workqueue so that cgroup
+ * destruction work items don't end up filling up max_active of system_wq
+ * which may lead to deadlock.
++ *
++ * A cgroup destruction should enqueue work sequentially to:
++ * cgroup_offline_wq: use for css offline work
++ * cgroup_release_wq: use for css release work
++ * cgroup_free_wq: use for free work
++ *
++ * Rationale for using separate workqueues:
++ * The cgroup root free work may depend on completion of other css offline
++ * operations. If all tasks were enqueued to a single workqueue, this could
++ * create a deadlock scenario where:
++ * - Free work waits for other css offline work to complete.
++ * - But other css offline work is queued after free work in the same queue.
++ *
++ * Example deadlock scenario with single workqueue (cgroup_destroy_wq):
++ * 1. umount net_prio
++ * 2. net_prio root destruction enqueues work to cgroup_destroy_wq (CPUx)
++ * 3. perf_event CSS A offline enqueues work to same cgroup_destroy_wq (CPUx)
++ * 4. net_prio cgroup_destroy_root->cgroup_lock_and_drain_offline.
++ * 5. net_prio root destruction blocks waiting for perf_event CSS A offline,
++ * which can never complete as it's behind in the same queue and
++ * workqueue's max_active is 1.
+ */
+-static struct workqueue_struct *cgroup_destroy_wq;
++static struct workqueue_struct *cgroup_offline_wq;
++static struct workqueue_struct *cgroup_release_wq;
++static struct workqueue_struct *cgroup_free_wq;
+
+ /* generate an array of cgroup subsystem pointers */
+ #define SUBSYS(_x) [_x ## _cgrp_id] = &_x ## _cgrp_subsys,
+@@ -5435,7 +5458,7 @@ static void css_release_work_fn(struct work_struct *work)
+ cgroup_unlock();
+
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -5444,7 +5467,7 @@ static void css_release(struct percpu_ref *ref)
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_release_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_release_wq, &css->destroy_work);
+ }
+
+ static void init_and_link_css(struct cgroup_subsys_state *css,
+@@ -5566,7 +5589,7 @@ static struct cgroup_subsys_state *css_create(struct cgroup *cgrp,
+ err_free_css:
+ list_del_rcu(&css->rstat_css_node);
+ INIT_RCU_WORK(&css->destroy_rwork, css_free_rwork_fn);
+- queue_rcu_work(cgroup_destroy_wq, &css->destroy_rwork);
++ queue_rcu_work(cgroup_free_wq, &css->destroy_rwork);
+ return ERR_PTR(err);
+ }
+
+@@ -5801,7 +5824,7 @@ static void css_killed_ref_fn(struct percpu_ref *ref)
+
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- queue_work(cgroup_destroy_wq, &css->destroy_work);
++ queue_work(cgroup_offline_wq, &css->destroy_work);
+ }
+ }
+
+@@ -6173,8 +6196,14 @@ static int __init cgroup_wq_init(void)
+ * We would prefer to do this in cgroup_init() above, but that
+ * is called before init_workqueues(): so leave this until after.
+ */
+- cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
+- BUG_ON(!cgroup_destroy_wq);
++ cgroup_offline_wq = alloc_workqueue("cgroup_offline", 0, 1);
++ BUG_ON(!cgroup_offline_wq);
++
++ cgroup_release_wq = alloc_workqueue("cgroup_release", 0, 1);
++ BUG_ON(!cgroup_release_wq);
++
++ cgroup_free_wq = alloc_workqueue("cgroup_free", 0, 1);
++ BUG_ON(!cgroup_free_wq);
+ return 0;
+ }
+ core_initcall(cgroup_wq_init);
+--
+2.51.0
+
--- /dev/null
+From 8bb617239412796f6fe8b81a33c1c9eb96de41b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 13:46:02 +0800
+Subject: cnic: Fix use-after-free bugs in cnic_delete_task
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit cfa7d9b1e3a8604afc84e9e51d789c29574fb216 ]
+
+The original code uses cancel_delayed_work() in cnic_cm_stop_bnx2x_hw(),
+which does not guarantee that the delayed work item 'delete_task' has
+fully completed if it was already running. Additionally, the delayed work
+item is cyclic, the flush_workqueue() in cnic_cm_stop_bnx2x_hw() only
+blocks and waits for work items that were already queued to the
+workqueue prior to its invocation. Any work items submitted after
+flush_workqueue() is called are not included in the set of tasks that the
+flush operation awaits. This means that after the cyclic work items have
+finished executing, a delayed work item may still exist in the workqueue.
+This leads to use-after-free scenarios where the cnic_dev is deallocated
+by cnic_free_dev(), while delete_task remains active and attempt to
+dereference cnic_dev in cnic_delete_task().
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+cnic_netdev_event() |
+ cnic_stop_hw() | cnic_delete_task()
+ cnic_cm_stop_bnx2x_hw() | ...
+ cancel_delayed_work() | /* the queue_delayed_work()
+ flush_workqueue() | executes after flush_workqueue()*/
+ | queue_delayed_work()
+ cnic_free_dev(dev)//free | cnic_delete_task() //new instance
+ | dev = cp->dev; //use
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the cyclic delayed work item is properly canceled and that any
+ongoing execution of the work item completes before the cnic_dev is
+deallocated. Furthermore, since cancel_delayed_work_sync() uses
+__flush_work(work, true) to synchronously wait for any currently
+executing instance of the work item to finish, the flush_workqueue()
+becomes redundant and should be removed.
+
+This bug was identified through static analysis. To reproduce the issue
+and validate the fix, I simulated the cnic PCI device in QEMU and
+introduced intentional delays — such as inserting calls to ssleep()
+within the cnic_delete_task() function — to increase the likelihood
+of triggering the bug.
+
+Fixes: fdf24086f475 ("cnic: Defer iscsi connection cleanup")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/cnic.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
+index 7926aaef8f0c5..ad2745c07c1ae 100644
+--- a/drivers/net/ethernet/broadcom/cnic.c
++++ b/drivers/net/ethernet/broadcom/cnic.c
+@@ -4220,8 +4220,7 @@ static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
+
+ cnic_bnx2x_delete_wait(dev, 0);
+
+- cancel_delayed_work(&cp->delete_task);
+- flush_workqueue(cnic_wq);
++ cancel_delayed_work_sync(&cp->delete_task);
+
+ if (atomic_read(&cp->iscsi_conn) != 0)
+ netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
+--
+2.51.0
+
--- /dev/null
+From 67036afc8e2dbba9b50304b2a85cd8430a2c0f6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 17:48:25 +0300
+Subject: dpaa2-switch: fix buffer pool seeding for control traffic
+
+From: Ioana Ciornei <ioana.ciornei@nxp.com>
+
+[ Upstream commit 2690cb089502b80b905f2abdafd1bf2d54e1abef ]
+
+Starting with commit c50e7475961c ("dpaa2-switch: Fix error checking in
+dpaa2_switch_seed_bp()"), the probing of a second DPSW object errors out
+like below.
+
+fsl_dpaa2_switch dpsw.1: fsl_mc_driver_probe failed: -12
+fsl_dpaa2_switch dpsw.1: probe with driver fsl_dpaa2_switch failed with error -12
+
+The aforementioned commit brought to the surface the fact that seeding
+buffers into the buffer pool destined for control traffic is not
+successful and an access violation recoverable error can be seen in the
+MC firmware log:
+
+[E, qbman_rec_isr:391, QBMAN] QBMAN recoverable event 0x1000000
+
+This happens because the driver incorrectly used the ID of the DPBP
+object instead of the hardware buffer pool ID when trying to release
+buffers into it.
+
+This is because any DPSW object uses two buffer pools, one managed by
+the Linux driver and destined for control traffic packet buffers and the
+other one managed by the MC firmware and destined only for offloaded
+traffic. And since the buffer pool managed by the MC firmware does not
+have an external facing DPBP equivalent, any subsequent DPBP objects
+created after the first DPSW will have a DPBP id different to the
+underlying hardware buffer ID.
+
+The issue was not caught earlier because these two numbers can be
+identical when all DPBP objects are created before the DPSW objects are.
+This is the case when the DPL file is used to describe the entire DPAA2
+object layout and objects are created at boot time and it's also true
+for the first DPSW being created dynamically using ls-addsw.
+
+Fix this by using the buffer pool ID instead of the DPBP id when
+releasing buffers into the pool.
+
+Fixes: 2877e4f7e189 ("staging: dpaa2-switch: setup buffer pool and RX path rings")
+Signed-off-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Link: https://patch.msgid.link/20250910144825.2416019-1-ioana.ciornei@nxp.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+index 76795bb0b564b..cdab37e9634d4 100644
+--- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
++++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c
+@@ -2700,7 +2700,7 @@ static int dpaa2_switch_setup_dpbp(struct ethsw_core *ethsw)
+ dev_err(dev, "dpsw_ctrl_if_set_pools() failed\n");
+ goto err_get_attr;
+ }
+- ethsw->bpid = dpbp_attrs.id;
++ ethsw->bpid = dpbp_attrs.bpid;
+
+ return 0;
+
+--
+2.51.0
+
--- /dev/null
+From 76a7a2f5d34e0f65041a060b218b4f9ef47ea3ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Aug 2025 17:16:17 +0200
+Subject: i40e: remove redundant memory barrier when cleaning Tx descs
+
+From: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+
+[ Upstream commit e37084a26070c546ae7961ee135bbfb15fbe13fd ]
+
+i40e has a feature which writes to memory location last descriptor
+successfully sent. Memory barrier in i40e_clean_tx_irq() was used to
+avoid forward-reading descriptor fields in case DD bit was not set.
+Having mentioned feature in place implies that such situation will not
+happen as we know in advance how many descriptors HW has dealt with.
+
+Besides, this barrier placement was wrong. Idea is to have this
+protection *after* reading DD bit from HW descriptor, not before.
+Digging through git history showed me that indeed barrier was before DD
+bit check, anyways the commit introducing i40e_get_head() should have
+wiped it out altogether.
+
+Also, there was one commit doing s/read_barrier_depends/smp_rmb when get
+head feature was already in place, but it was only theoretical based on
+ixgbe experiences, which is different in these terms as that driver has
+to read DD bit from HW descriptor.
+
+Fixes: 1943d8ba9507 ("i40e/i40evf: enable hardware feature head write back")
+Signed-off-by: Maciej Fijalkowski <maciej.fijalkowski@intel.com>
+Reviewed-by: Aleksandr Loktionov <aleksandr.loktionov@intel.com>
+Tested-by: Rinitha S <sx.rinitha@intel.com> (A Contingent worker at Intel)
+Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index c962987d8b51b..6a9b47b005d29 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -950,9 +950,6 @@ static bool i40e_clean_tx_irq(struct i40e_vsi *vsi,
+ if (!eop_desc)
+ break;
+
+- /* prevent any other reads prior to eop_desc */
+- smp_rmb();
+-
+ i40e_trace(clean_tx_irq, tx_ring, tx_desc, tx_buf);
+ /* we have caught up to head, no work left to do */
+ if (tx_head == tx_desc)
+--
+2.51.0
+
--- /dev/null
+From d2b550fbdf153fc6a5eeb0add13a2459eb609e68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:20 +0200
+Subject: mptcp: set remote_deny_join_id0 on SYN recv
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 96939cec994070aa5df852c10fad5fc303a97ea3 ]
+
+When a SYN containing the 'C' flag (deny join id0) was received, this
+piece of information was not propagated to the path-manager.
+
+Even if this flag is mainly set on the server side, a client can also
+tell the server it cannot try to establish new subflows to the client's
+initial IP address and port. The server's PM should then record such
+info when received, and before sending events about the new connection.
+
+Fixes: df377be38725 ("mptcp: add deny_join_id0 in mptcp_options_received")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-1-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/subflow.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c
+index 0c9b9c0c277c2..dfee1890c841b 100644
+--- a/net/mptcp/subflow.c
++++ b/net/mptcp/subflow.c
+@@ -863,6 +863,10 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk,
+
+ ctx->subflow_id = 1;
+ owner = mptcp_sk(ctx->conn);
++
++ if (mp_opt.deny_join_id0)
++ WRITE_ONCE(owner->pm.remote_deny_join_id0, true);
++
+ mptcp_pm_new_connection(owner, child, 1);
+
+ /* with OoO packets we can reach here without ingress
+--
+2.51.0
+
--- /dev/null
+From 1499bf32fc41010ac6514c04f956e0f7cb67074e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:23 +0200
+Subject: mptcp: tfo: record 'deny join id0' info
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+[ Upstream commit 92da495cb65719583aa06bc946aeb18a10e1e6e2 ]
+
+When TFO is used, the check to see if the 'C' flag (deny join id0) was
+set was bypassed.
+
+This flag can be set when TFO is used, so the check should also be done
+when TFO is used.
+
+Note that the set_fully_established label is also used when a 4th ACK is
+received. In this case, deny_join_id0 will not be set.
+
+Fixes: dfc8d0603033 ("mptcp: implement delayed seq generation for passive fastopen")
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-4-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/options.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/mptcp/options.c b/net/mptcp/options.c
+index 9406d2d555e74..b245abd08c824 100644
+--- a/net/mptcp/options.c
++++ b/net/mptcp/options.c
+@@ -985,13 +985,13 @@ static bool check_fully_established(struct mptcp_sock *msk, struct sock *ssk,
+ return false;
+ }
+
+- if (mp_opt->deny_join_id0)
+- WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
+-
+ if (unlikely(!READ_ONCE(msk->pm.server_side)))
+ pr_warn_once("bogus mpc option on established client sk");
+
+ set_fully_established:
++ if (mp_opt->deny_join_id0)
++ WRITE_ONCE(msk->pm.remote_deny_join_id0, true);
++
+ mptcp_data_lock((struct sock *)msk);
+ __mptcp_subflow_fully_established(msk, subflow, mp_opt);
+ mptcp_data_unlock((struct sock *)msk);
+--
+2.51.0
+
--- /dev/null
+From 1dd4791339264156a62be532e9441b6ff9d76528 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 15:30:58 +0000
+Subject: net: liquidio: fix overflow in octeon_init_instr_queue()
+
+From: Alexey Nepomnyashih <sdl@nppct.ru>
+
+[ Upstream commit cca7b1cfd7b8a0eff2a3510c5e0f10efe8fa3758 ]
+
+The expression `(conf->instr_type == 64) << iq_no` can overflow because
+`iq_no` may be as high as 64 (`CN23XX_MAX_RINGS_PER_PF`). Casting the
+operand to `u64` ensures correct 64-bit arithmetic.
+
+Fixes: f21fb3ed364b ("Add support of Cavium Liquidio ethernet adapters")
+Signed-off-by: Alexey Nepomnyashih <sdl@nppct.ru>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cavium/liquidio/request_manager.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+index de8a6ce86ad7e..12105ffb5dac6 100644
+--- a/drivers/net/ethernet/cavium/liquidio/request_manager.c
++++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c
+@@ -126,7 +126,7 @@ int octeon_init_instr_queue(struct octeon_device *oct,
+ oct->io_qmask.iq |= BIT_ULL(iq_no);
+
+ /* Set the 32B/64B mode for each input queue */
+- oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
++ oct->io_qmask.iq64B |= ((u64)(conf->instr_type == 64) << iq_no);
+ iq->iqcmd_64B = (conf->instr_type == 64);
+
+ oct->fn_list.setup_iq_regs(oct, iq_no);
+--
+2.51.0
+
--- /dev/null
+From fc35e434bdd5094c883b1e47946afb10f8e9efa8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Aug 2023 01:19:01 +0000
+Subject: net/mlx5e: Consider aggregated port speed during rate configuration
+
+From: Jianbo Liu <jianbol@nvidia.com>
+
+[ Upstream commit 8d88e198dcaf700e33c2a4c796af9434652c56e7 ]
+
+When LAG is configured, functions (PF,VF,SF) can utilize the maximum
+aggregated link speed for transmission. Currently the aggregated link
+speed is not considered.
+
+Hence, improve it to use the aggregated link speed by referring to the
+physical port's upper bonding device when LAG is configured.
+
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Reviewed-by: Parav Pandit <parav@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Stable-dep-of: 6b4be64fd9fe ("net/mlx5e: Harden uplink netdev access against device unbind")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 84 ++++++++++++++++---
+ 1 file changed, 72 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index cc0f2be21a265..34f7d814859db 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -2,6 +2,7 @@
+ /* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
+
+ #include "eswitch.h"
++#include "lib/mlx5.h"
+ #include "esw/qos.h"
+ #include "en/port.h"
+ #define CREATE_TRACE_POINTS
+@@ -712,6 +713,70 @@ int mlx5_esw_qos_set_vport_rate(struct mlx5_eswitch *esw, struct mlx5_vport *vpo
+ return err;
+ }
+
++static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
++{
++ struct ethtool_link_ksettings lksettings;
++ struct net_device *slave, *master;
++ u32 speed = SPEED_UNKNOWN;
++
++ /* Lock ensures a stable reference to master and slave netdevice
++ * while port speed of master is queried.
++ */
++ ASSERT_RTNL();
++
++ slave = mlx5_uplink_netdev_get(mdev);
++ if (!slave)
++ goto out;
++
++ master = netdev_master_upper_dev_get(slave);
++ if (master && !__ethtool_get_link_ksettings(master, &lksettings))
++ speed = lksettings.base.speed;
++
++out:
++ return speed;
++}
++
++static int mlx5_esw_qos_max_link_speed_get(struct mlx5_core_dev *mdev, u32 *link_speed_max,
++ bool hold_rtnl_lock, struct netlink_ext_ack *extack)
++{
++ int err;
++
++ if (!mlx5_lag_is_active(mdev))
++ goto skip_lag;
++
++ if (hold_rtnl_lock)
++ rtnl_lock();
++
++ *link_speed_max = mlx5_esw_qos_lag_link_speed_get_locked(mdev);
++
++ if (hold_rtnl_lock)
++ rtnl_unlock();
++
++ if (*link_speed_max != (u32)SPEED_UNKNOWN)
++ return 0;
++
++skip_lag:
++ err = mlx5_port_max_linkspeed(mdev, link_speed_max);
++ if (err)
++ NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
++
++ return err;
++}
++
++static int mlx5_esw_qos_link_speed_verify(struct mlx5_core_dev *mdev,
++ const char *name, u32 link_speed_max,
++ u64 value, struct netlink_ext_ack *extack)
++{
++ if (value > link_speed_max) {
++ pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
++ name, value, link_speed_max);
++ NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++
+ int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps)
+ {
+ u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
+@@ -755,12 +820,6 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
+ u64 value;
+ int err;
+
+- err = mlx5_port_max_linkspeed(mdev, &link_speed_max);
+- if (err) {
+- NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed");
+- return err;
+- }
+-
+ value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &remainder);
+ if (remainder) {
+ pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n",
+@@ -769,12 +828,13 @@ static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *
+ return -EINVAL;
+ }
+
+- if (value > link_speed_max) {
+- pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n",
+- name, value, link_speed_max);
+- NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed");
+- return -EINVAL;
+- }
++ err = mlx5_esw_qos_max_link_speed_get(mdev, &link_speed_max, true, extack);
++ if (err)
++ return err;
++
++ err = mlx5_esw_qos_link_speed_verify(mdev, name, link_speed_max, value, extack);
++ if (err)
++ return err;
+
+ *rate = value;
+ return 0;
+--
+2.51.0
+
--- /dev/null
+From 12e7e487ffdf82fe1ddf94c8b1e54d2422c7f4fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 15:24:32 +0300
+Subject: net/mlx5e: Harden uplink netdev access against device unbind
+
+From: Jianbo Liu <jianbol@nvidia.com>
+
+[ Upstream commit 6b4be64fd9fec16418f365c2d8e47a7566e9eba5 ]
+
+The function mlx5_uplink_netdev_get() gets the uplink netdevice
+pointer from mdev->mlx5e_res.uplink_netdev. However, the netdevice can
+be removed and its pointer cleared when unbound from the mlx5_core.eth
+driver. This results in a NULL pointer, causing a kernel panic.
+
+ BUG: unable to handle page fault for address: 0000000000001300
+ at RIP: 0010:mlx5e_vport_rep_load+0x22a/0x270 [mlx5_core]
+ Call Trace:
+ <TASK>
+ mlx5_esw_offloads_rep_load+0x68/0xe0 [mlx5_core]
+ esw_offloads_enable+0x593/0x910 [mlx5_core]
+ mlx5_eswitch_enable_locked+0x341/0x420 [mlx5_core]
+ mlx5_devlink_eswitch_mode_set+0x17e/0x3a0 [mlx5_core]
+ devlink_nl_eswitch_set_doit+0x60/0xd0
+ genl_family_rcv_msg_doit+0xe0/0x130
+ genl_rcv_msg+0x183/0x290
+ netlink_rcv_skb+0x4b/0xf0
+ genl_rcv+0x24/0x40
+ netlink_unicast+0x255/0x380
+ netlink_sendmsg+0x1f3/0x420
+ __sock_sendmsg+0x38/0x60
+ __sys_sendto+0x119/0x180
+ do_syscall_64+0x53/0x1d0
+ entry_SYSCALL_64_after_hwframe+0x4b/0x53
+
+Ensure the pointer is valid before use by checking it for NULL. If it
+is valid, immediately call netdev_hold() to take a reference, and
+preventing the netdevice from being freed while it is in use.
+
+Fixes: 7a9fb35e8c3a ("net/mlx5e: Do not reload ethernet ports when changing eswitch mode")
+Signed-off-by: Jianbo Liu <jianbol@nvidia.com>
+Reviewed-by: Cosmin Ratiu <cratiu@nvidia.com>
+Reviewed-by: Jiri Pirko <jiri@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Link: https://patch.msgid.link/1757939074-617281-2-git-send-email-tariqt@nvidia.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en_rep.c | 27 +++++++++++++++----
+ .../net/ethernet/mellanox/mlx5/core/esw/qos.c | 1 +
+ .../ethernet/mellanox/mlx5/core/lib/mlx5.h | 15 ++++++++++-
+ include/linux/mlx5/driver.h | 1 +
+ 4 files changed, 38 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+index 851c499faa795..656a7b65f4c7b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+@@ -1448,12 +1448,21 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
+ static int
+ mlx5e_vport_uplink_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
+ {
+- struct mlx5e_priv *priv = netdev_priv(mlx5_uplink_netdev_get(dev));
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
++ struct net_device *netdev;
++ struct mlx5e_priv *priv;
++ int err;
++
++ netdev = mlx5_uplink_netdev_get(dev);
++ if (!netdev)
++ return 0;
+
++ priv = netdev_priv(netdev);
+ rpriv->netdev = priv->netdev;
+- return mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
+- rpriv);
++ err = mlx5e_netdev_change_profile(priv, &mlx5e_uplink_rep_profile,
++ rpriv);
++ mlx5_uplink_netdev_put(dev, netdev);
++ return err;
+ }
+
+ static void
+@@ -1565,8 +1574,16 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
+ {
+ struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
+ struct net_device *netdev = rpriv->netdev;
+- struct mlx5e_priv *priv = netdev_priv(netdev);
+- void *ppriv = priv->ppriv;
++ struct mlx5e_priv *priv;
++ void *ppriv;
++
++ if (!netdev) {
++ ppriv = rpriv;
++ goto free_ppriv;
++ }
++
++ priv = netdev_priv(netdev);
++ ppriv = priv->ppriv;
+
+ if (rep->vport == MLX5_VPORT_UPLINK) {
+ mlx5e_vport_uplink_rep_unload(rpriv);
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+index 34f7d814859db..05fbd2098b268 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c
+@@ -733,6 +733,7 @@ static u32 mlx5_esw_qos_lag_link_speed_get_locked(struct mlx5_core_dev *mdev)
+ speed = lksettings.base.speed;
+
+ out:
++ mlx5_uplink_netdev_put(mdev, slave);
+ return speed;
+ }
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+index 2b5826a785c4f..adcc2bc9c8c87 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h
+@@ -52,6 +52,19 @@ static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+
+ static inline struct net_device *mlx5_uplink_netdev_get(struct mlx5_core_dev *mdev)
+ {
+- return mdev->mlx5e_res.uplink_netdev;
++ struct mlx5e_resources *mlx5e_res = &mdev->mlx5e_res;
++ struct net_device *netdev;
++
++ mutex_lock(&mlx5e_res->uplink_netdev_lock);
++ netdev = mlx5e_res->uplink_netdev;
++ netdev_hold(netdev, &mlx5e_res->tracker, GFP_KERNEL);
++ mutex_unlock(&mlx5e_res->uplink_netdev_lock);
++ return netdev;
++}
++
++static inline void mlx5_uplink_netdev_put(struct mlx5_core_dev *mdev,
++ struct net_device *netdev)
++{
++ netdev_put(netdev, &mdev->mlx5e_res.tracker);
+ }
+ #endif
+diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
+index 696a2227869fb..c0e0468b25a18 100644
+--- a/include/linux/mlx5/driver.h
++++ b/include/linux/mlx5/driver.h
+@@ -677,6 +677,7 @@ struct mlx5e_resources {
+ struct mlx5_sq_bfreg bfreg;
+ } hw_objs;
+ struct net_device *uplink_netdev;
++ netdevice_tracker tracker;
+ struct mutex uplink_netdev_lock;
+ struct mlx5_crypto_dek_priv *dek_priv;
+ };
+--
+2.51.0
+
--- /dev/null
+From 3d270fa0244685cf355b81c33b3d0f03a63a5350 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Sep 2025 15:01:36 +0900
+Subject: net: natsemi: fix `rx_dropped` double accounting on `netif_rx()`
+ failure
+
+From: Yeounsu Moon <yyyynoom@gmail.com>
+
+[ Upstream commit 93ab4881a4e2b9657bdce4b8940073bfb4ed5eab ]
+
+`netif_rx()` already increments `rx_dropped` core stat when it fails.
+The driver was also updating `ndev->stats.rx_dropped` in the same path.
+Since both are reported together via `ip -s -s` command, this resulted
+in drops being counted twice in user-visible stats.
+
+Keep the driver update on `if (unlikely(!skb))`, but skip it after
+`netif_rx()` errors.
+
+Fixes: caf586e5f23c ("net: add a core netdev->rx_dropped counter")
+Signed-off-by: Yeounsu Moon <yyyynoom@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250913060135.35282-3-yyyynoom@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/natsemi/ns83820.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
+index 998586872599b..c692d2e878b2e 100644
+--- a/drivers/net/ethernet/natsemi/ns83820.c
++++ b/drivers/net/ethernet/natsemi/ns83820.c
+@@ -820,7 +820,7 @@ static void rx_irq(struct net_device *ndev)
+ struct ns83820 *dev = PRIV(ndev);
+ struct rx_info *info = &dev->rx_info;
+ unsigned next_rx;
+- int rx_rc, len;
++ int len;
+ u32 cmdsts;
+ __le32 *desc;
+ unsigned long flags;
+@@ -881,8 +881,10 @@ static void rx_irq(struct net_device *ndev)
+ if (likely(CMDSTS_OK & cmdsts)) {
+ #endif
+ skb_put(skb, len);
+- if (unlikely(!skb))
++ if (unlikely(!skb)) {
++ ndev->stats.rx_dropped++;
+ goto netdev_mangle_me_harder_failed;
++ }
+ if (cmdsts & CMDSTS_DEST_MULTI)
+ ndev->stats.multicast++;
+ ndev->stats.rx_packets++;
+@@ -901,15 +903,12 @@ static void rx_irq(struct net_device *ndev)
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_IPV6), tag);
+ }
+ #endif
+- rx_rc = netif_rx(skb);
+- if (NET_RX_DROP == rx_rc) {
+-netdev_mangle_me_harder_failed:
+- ndev->stats.rx_dropped++;
+- }
++ netif_rx(skb);
+ } else {
+ dev_kfree_skb_irq(skb);
+ }
+
++netdev_mangle_me_harder_failed:
+ nr++;
+ next_rx = info->next_rx;
+ desc = info->descs + (DESC_SIZE * next_rx);
+--
+2.51.0
+
--- /dev/null
+From 7b73b50a91fc4b746fbda6bf24f334420c1e1786 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 14:38:53 +0800
+Subject: octeontx2-pf: Fix use-after-free bugs in otx2_sync_tstamp()
+
+From: Duoming Zhou <duoming@zju.edu.cn>
+
+[ Upstream commit f8b4687151021db61841af983f1cb7be6915d4ef ]
+
+The original code relies on cancel_delayed_work() in otx2_ptp_destroy(),
+which does not ensure that the delayed work item synctstamp_work has fully
+completed if it was already running. This leads to use-after-free scenarios
+where otx2_ptp is deallocated by otx2_ptp_destroy(), while synctstamp_work
+remains active and attempts to dereference otx2_ptp in otx2_sync_tstamp().
+Furthermore, the synctstamp_work is cyclic, the likelihood of triggering
+the bug is nonnegligible.
+
+A typical race condition is illustrated below:
+
+CPU 0 (cleanup) | CPU 1 (delayed work callback)
+otx2_remove() |
+ otx2_ptp_destroy() | otx2_sync_tstamp()
+ cancel_delayed_work() |
+ kfree(ptp) |
+ | ptp = container_of(...); //UAF
+ | ptp-> //UAF
+
+This is confirmed by a KASAN report:
+
+BUG: KASAN: slab-use-after-free in __run_timer_base.part.0+0x7d7/0x8c0
+Write of size 8 at addr ffff88800aa09a18 by task bash/136
+...
+Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x55/0x70
+ print_report+0xcf/0x610
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ kasan_report+0xb8/0xf0
+ ? __run_timer_base.part.0+0x7d7/0x8c0
+ __run_timer_base.part.0+0x7d7/0x8c0
+ ? __pfx___run_timer_base.part.0+0x10/0x10
+ ? __pfx_read_tsc+0x10/0x10
+ ? ktime_get+0x60/0x140
+ ? lapic_next_event+0x11/0x20
+ ? clockevents_program_event+0x1d4/0x2a0
+ run_timer_softirq+0xd1/0x190
+ handle_softirqs+0x16a/0x550
+ irq_exit_rcu+0xaf/0xe0
+ sysvec_apic_timer_interrupt+0x70/0x80
+ </IRQ>
+...
+Allocated by task 1:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ __kasan_kmalloc+0x7f/0x90
+ otx2_ptp_init+0xb1/0x860
+ otx2_probe+0x4eb/0xc30
+ local_pci_probe+0xdc/0x190
+ pci_device_probe+0x2fe/0x470
+ really_probe+0x1ca/0x5c0
+ __driver_probe_device+0x248/0x310
+ driver_probe_device+0x44/0x120
+ __driver_attach+0xd2/0x310
+ bus_for_each_dev+0xed/0x170
+ bus_add_driver+0x208/0x500
+ driver_register+0x132/0x460
+ do_one_initcall+0x89/0x300
+ kernel_init_freeable+0x40d/0x720
+ kernel_init+0x1a/0x150
+ ret_from_fork+0x10c/0x1a0
+ ret_from_fork_asm+0x1a/0x30
+
+Freed by task 136:
+ kasan_save_stack+0x24/0x50
+ kasan_save_track+0x14/0x30
+ kasan_save_free_info+0x3a/0x60
+ __kasan_slab_free+0x3f/0x50
+ kfree+0x137/0x370
+ otx2_ptp_destroy+0x38/0x80
+ otx2_remove+0x10d/0x4c0
+ pci_device_remove+0xa6/0x1d0
+ device_release_driver_internal+0xf8/0x210
+ pci_stop_bus_device+0x105/0x150
+ pci_stop_and_remove_bus_device_locked+0x15/0x30
+ remove_store+0xcc/0xe0
+ kernfs_fop_write_iter+0x2c3/0x440
+ vfs_write+0x871/0xd70
+ ksys_write+0xee/0x1c0
+ do_syscall_64+0xac/0x280
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+...
+
+Replace cancel_delayed_work() with cancel_delayed_work_sync() to ensure
+that the delayed work item is properly canceled before the otx2_ptp is
+deallocated.
+
+This bug was initially identified through static analysis. To reproduce
+and test it, I simulated the OcteonTX2 PCI device in QEMU and introduced
+artificial delays within the otx2_sync_tstamp() function to increase the
+likelihood of triggering the bug.
+
+Fixes: 2958d17a8984 ("octeontx2-pf: Add support for ptp 1-step mode on CN10K silicon")
+Signed-off-by: Duoming Zhou <duoming@zju.edu.cn>
+Reviewed-by: Vadim Fedorenko <vadim.fedorenko@linux.dev>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+index 3a72b0793d4a7..82725923555c5 100644
+--- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
++++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c
+@@ -476,7 +476,7 @@ void otx2_ptp_destroy(struct otx2_nic *pfvf)
+ if (!ptp)
+ return;
+
+- cancel_delayed_work(&pfvf->ptp->synctstamp_work);
++ cancel_delayed_work_sync(&pfvf->ptp->synctstamp_work);
+
+ ptp_clock_unregister(ptp->ptp_clock);
+ kfree(ptp);
+--
+2.51.0
+
--- /dev/null
+From 56cb5109363255a5625a35e1262075519a1134c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Aug 2025 17:50:14 +0200
+Subject: pcmcia: omap_cf: Mark driver struct with __refdata to prevent section
+ mismatch
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Geert Uytterhoeven <geert+renesas@glider.be>
+
+[ Upstream commit d1dfcdd30140c031ae091868fb5bed084132bca1 ]
+
+As described in the added code comment, a reference to .exit.text is ok
+for drivers registered via platform_driver_probe(). Make this explicit
+to prevent the following section mismatch warning
+
+ WARNING: modpost: drivers/pcmcia/omap_cf: section mismatch in reference: omap_cf_driver+0x4 (section: .data) -> omap_cf_remove (section: .exit.text)
+
+that triggers on an omap1_defconfig + CONFIG_OMAP_CF=m build.
+
+Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Acked-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Reviewed-by: Uwe Kleine-König <u.kleine-koenig@baylibre.com>
+Signed-off-by: Dominik Brodowski <linux@dominikbrodowski.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pcmcia/omap_cf.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
+index 25382612e48ac..a8e0dd5d30c43 100644
+--- a/drivers/pcmcia/omap_cf.c
++++ b/drivers/pcmcia/omap_cf.c
+@@ -305,7 +305,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
+ return 0;
+ }
+
+-static struct platform_driver omap_cf_driver = {
++/*
++ * omap_cf_remove() lives in .exit.text. For drivers registered via
++ * platform_driver_probe() this is ok because they cannot get unbound at
++ * runtime. So mark the driver struct with __refdata to prevent modpost
++ * triggering a section mismatch warning.
++ */
++static struct platform_driver omap_cf_driver __refdata = {
+ .driver = {
+ .name = driver_name,
+ },
+--
+2.51.0
+
--- /dev/null
+From 9a486f70cd1872ee820911093c872d44464cce0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 10 Sep 2025 16:29:16 +1000
+Subject: qed: Don't collect too many protection override GRC elements
+
+From: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+
+[ Upstream commit 56c0a2a9ddc2f5b5078c5fb0f81ab76bbc3d4c37 ]
+
+In the protection override dump path, the firmware can return far too
+many GRC elements, resulting in attempting to write past the end of the
+previously-kmalloc'ed dump buffer.
+
+This will result in a kernel panic with reason:
+
+ BUG: unable to handle kernel paging request at ADDRESS
+
+where "ADDRESS" is just past the end of the protection override dump
+buffer. The start address of the buffer is:
+ p_hwfn->cdev->dbg_features[DBG_FEATURE_PROTECTION_OVERRIDE].dump_buf
+and the size of the buffer is buf_size in the same data structure.
+
+The panic can be arrived at from either the qede Ethernet driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc02662ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc0267792 [qed]
+ qed_dbg_feature at ffffffffc026aa8f [qed]
+ qed_dbg_all_data at ffffffffc026b211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc027298a [qed]
+ devlink_health_do_dump at ffffffff82497f61
+ devlink_health_report at ffffffff8249cf29
+ qed_report_fatal_error at ffffffffc0272baf [qed]
+ qede_sp_task at ffffffffc045ed32 [qede]
+ process_one_work at ffffffff81d19783
+
+or the qedf storage driver path:
+
+ [exception RIP: qed_grc_dump_addr_range+0x108]
+ qed_protection_override_dump at ffffffffc068b2ed [qed]
+ qed_dbg_protection_override_dump at ffffffffc068c792 [qed]
+ qed_dbg_feature at ffffffffc068fa8f [qed]
+ qed_dbg_all_data at ffffffffc0690211 [qed]
+ qed_fw_fatal_reporter_dump at ffffffffc069798a [qed]
+ devlink_health_do_dump at ffffffff8aa95e51
+ devlink_health_report at ffffffff8aa9ae19
+ qed_report_fatal_error at ffffffffc0697baf [qed]
+ qed_hw_err_notify at ffffffffc06d32d7 [qed]
+ qed_spq_post at ffffffffc06b1011 [qed]
+ qed_fcoe_destroy_conn at ffffffffc06b2e91 [qed]
+ qedf_cleanup_fcport at ffffffffc05e7597 [qedf]
+ qedf_rport_event_handler at ffffffffc05e7bf7 [qedf]
+ fc_rport_work at ffffffffc02da715 [libfc]
+ process_one_work at ffffffff8a319663
+
+Resolve this by clamping the firmware's return value to the maximum
+number of legal elements the firmware should return.
+
+Fixes: d52c89f120de8 ("qed*: Utilize FW 8.37.2.0")
+Signed-off-by: Jamie Bainbridge <jamie.bainbridge@gmail.com>
+Link: https://patch.msgid.link/f8e1182934aa274c18d0682a12dbaf347595469c.1757485536.git.jamie.bainbridge@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/qlogic/qed/qed_debug.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+index cdcead614e9fa..ae421c2707785 100644
+--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
++++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
+@@ -4461,10 +4461,11 @@ static enum dbg_status qed_protection_override_dump(struct qed_hwfn *p_hwfn,
+ goto out;
+ }
+
+- /* Add override window info to buffer */
++ /* Add override window info to buffer, preventing buffer overflow */
+ override_window_dwords =
+- qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
+- PROTECTION_OVERRIDE_ELEMENT_DWORDS;
++ min(qed_rd(p_hwfn, p_ptt, GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW) *
++ PROTECTION_OVERRIDE_ELEMENT_DWORDS,
++ PROTECTION_OVERRIDE_DEPTH_DWORDS);
+ if (override_window_dwords) {
+ addr = BYTES_TO_DWORDS(GRC_REG_PROTECTION_OVERRIDE_WINDOW);
+ offset += qed_grc_dump_addr_range(p_hwfn,
+--
+2.51.0
+
--- /dev/null
+From 73e42d6b6e5b6abb3d4a87f440da240704d43850 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 17 Sep 2025 16:48:54 +0300
+Subject: Revert "net/mlx5e: Update and set Xon/Xoff upon port speed set"
+
+From: Tariq Toukan <tariqt@nvidia.com>
+
+[ Upstream commit 3fbfe251cc9f6d391944282cdb9bcf0bd02e01f8 ]
+
+This reverts commit d24341740fe48add8a227a753e68b6eedf4b385a.
+It causes errors when trying to configure QoS, as well as
+loss of L2 connectivity (on multi-host devices).
+
+Reported-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://lore.kernel.org/20250910170011.70528106@kernel.org
+Fixes: d24341740fe4 ("net/mlx5e: Update and set Xon/Xoff upon port speed set")
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index d378aa55f22f9..09ba60b2e744b 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -109,8 +109,6 @@ void mlx5e_update_carrier(struct mlx5e_priv *priv)
+ if (up) {
+ netdev_info(priv->netdev, "Link up\n");
+ netif_carrier_on(priv->netdev);
+- mlx5e_port_manual_buffer_config(priv, 0, priv->netdev->mtu,
+- NULL, NULL, NULL);
+ } else {
+ netdev_info(priv->netdev, "Link down\n");
+ netif_carrier_off(priv->netdev);
+--
+2.51.0
+
--- /dev/null
+From d1d8cfbbd078d818418880d9e42404f0704d91b5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Sep 2025 14:52:24 +0200
+Subject: selftests: mptcp: sockopt: fix error messages
+
+From: Geliang Tang <tanggeliang@kylinos.cn>
+
+[ Upstream commit b86418beade11d45540a2d20c4ec1128849b6c27 ]
+
+This patch fixes several issues in the error reporting of the MPTCP sockopt
+selftest:
+
+1. Fix diff not printed: The error messages for counter mismatches had
+ the actual difference ('diff') as argument, but it was missing in the
+ format string. Displaying it makes the debugging easier.
+
+2. Fix variable usage: The error check for 'mptcpi_bytes_acked' incorrectly
+ used 'ret2' (sent bytes) for both the expected value and the difference
+ calculation. It now correctly uses 'ret' (received bytes), which is the
+ expected value for bytes_acked.
+
+3. Fix off-by-one in diff: The calculation for the 'mptcpi_rcv_delta' diff
+ was 's.mptcpi_rcv_delta - ret', which is off-by-one. It has been
+ corrected to 's.mptcpi_rcv_delta - (ret + 1)' to match the expected
+ value in the condition above it.
+
+Fixes: 5dcff89e1455 ("selftests: mptcp: explicitly tests aggregate counters")
+Signed-off-by: Geliang Tang <tanggeliang@kylinos.cn>
+Reviewed-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20250912-net-mptcp-pm-uspace-deny_join_id0-v1-5-40171884ade8@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../testing/selftests/net/mptcp/mptcp_sockopt.c | 16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+index 926b0be87c990..1dc2bd6ee4a50 100644
+--- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
++++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.c
+@@ -658,22 +658,26 @@ static void process_one_client(int fd, int pipefd)
+
+ do_getsockopts(&s, fd, ret, ret2);
+ if (s.mptcpi_rcv_delta != (uint64_t)ret + 1)
+- xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64, s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - ret);
++ xerror("mptcpi_rcv_delta %" PRIu64 ", expect %" PRIu64 ", diff %" PRId64,
++ s.mptcpi_rcv_delta, ret + 1, s.mptcpi_rcv_delta - (ret + 1));
+
+ /* be nice when running on top of older kernel */
+ if (s.pkt_stats_avail) {
+ if (s.last_sample.mptcpi_bytes_sent != ret2)
+- xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64,
++ xerror("mptcpi_bytes_sent %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_sent, ret2,
+ s.last_sample.mptcpi_bytes_sent - ret2);
+ if (s.last_sample.mptcpi_bytes_received != ret)
+- xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64,
++ xerror("mptcpi_bytes_received %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
+ s.last_sample.mptcpi_bytes_received, ret,
+ s.last_sample.mptcpi_bytes_received - ret);
+ if (s.last_sample.mptcpi_bytes_acked != ret)
+- xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64,
+- s.last_sample.mptcpi_bytes_acked, ret2,
+- s.last_sample.mptcpi_bytes_acked - ret2);
++ xerror("mptcpi_bytes_acked %" PRIu64 ", expect %" PRIu64
++ ", diff %" PRId64,
++ s.last_sample.mptcpi_bytes_acked, ret,
++ s.last_sample.mptcpi_bytes_acked - ret);
+ }
+
+ close(fd);
+--
+2.51.0
+
--- /dev/null
+wifi-wilc1000-avoid-buffer-overflow-in-wid-string-co.patch
+alsa-firewire-motu-drop-epollout-from-poll-return-va.patch
+wifi-mac80211-increase-scan_ies_len-for-s1g.patch
+wifi-mac80211-fix-incorrect-type-for-ret.patch
+pcmcia-omap_cf-mark-driver-struct-with-__refdata-to-.patch
+cgroup-split-cgroup_destroy_wq-into-3-workqueues.patch
+btrfs-fix-invalid-extref-key-setup-when-replaying-de.patch
+um-virtio_uml-fix-use-after-free-after-put_device-in.patch
+dpaa2-switch-fix-buffer-pool-seeding-for-control-tra.patch
+qed-don-t-collect-too-many-protection-override-grc-e.patch
+bonding-set-random-address-only-when-slaves-already-.patch
+mptcp-set-remote_deny_join_id0-on-syn-recv.patch
+mptcp-tfo-record-deny-join-id0-info.patch
+selftests-mptcp-sockopt-fix-error-messages.patch
+net-natsemi-fix-rx_dropped-double-accounting-on-neti.patch
+i40e-remove-redundant-memory-barrier-when-cleaning-t.patch
+net-mlx5e-consider-aggregated-port-speed-during-rate.patch
+net-mlx5e-harden-uplink-netdev-access-against-device.patch
+bonding-don-t-set-oif-to-bond-dev-when-getting-ns-ta.patch
+tcp-clear-tcp_sk-sk-fastopen_rsk-in-tcp_disconnect.patch
+tls-make-sure-to-abort-the-stream-if-headers-are-bog.patch
+revert-net-mlx5e-update-and-set-xon-xoff-upon-port-s.patch
+net-liquidio-fix-overflow-in-octeon_init_instr_queue.patch
+cnic-fix-use-after-free-bugs-in-cnic_delete_task.patch
+octeontx2-pf-fix-use-after-free-bugs-in-otx2_sync_ts.patch
--- /dev/null
+From 5060368a1cecf71e5f4ab0d50eedcb5963687989 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Sep 2025 17:56:46 +0000
+Subject: tcp: Clear tcp_sk(sk)->fastopen_rsk in tcp_disconnect().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 45c8a6cc2bcd780e634a6ba8e46bffbdf1fc5c01 ]
+
+syzbot reported the splat below where a socket had tcp_sk(sk)->fastopen_rsk
+in the TCP_ESTABLISHED state. [0]
+
+syzbot reused the server-side TCP Fast Open socket as a new client before
+the TFO socket completes 3WHS:
+
+ 1. accept()
+ 2. connect(AF_UNSPEC)
+ 3. connect() to another destination
+
+As of accept(), sk->sk_state is TCP_SYN_RECV, and tcp_disconnect() changes
+it to TCP_CLOSE and makes connect() possible, which restarts timers.
+
+Since tcp_disconnect() forgot to clear tcp_sk(sk)->fastopen_rsk, the
+retransmit timer triggered the warning and the intended packet was not
+retransmitted.
+
+Let's call reqsk_fastopen_remove() in tcp_disconnect().
+
+[0]:
+WARNING: CPU: 2 PID: 0 at net/ipv4/tcp_timer.c:542 tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Modules linked in:
+CPU: 2 UID: 0 PID: 0 Comm: swapper/2 Not tainted 6.17.0-rc5-g201825fb4278 #62 PREEMPT(voluntary)
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.3-debian-1.16.3-2 04/01/2014
+RIP: 0010:tcp_retransmit_timer (net/ipv4/tcp_timer.c:542 (discriminator 7))
+Code: 41 55 41 54 55 53 48 8b af b8 08 00 00 48 89 fb 48 85 ed 0f 84 55 01 00 00 0f b6 47 12 3c 03 74 0c 0f b6 47 12 3c 04 74 04 90 <0f> 0b 90 48 8b 85 c0 00 00 00 48 89 ef 48 8b 40 30 e8 6a 4f 06 3e
+RSP: 0018:ffffc900002f8d40 EFLAGS: 00010293
+RAX: 0000000000000002 RBX: ffff888106911400 RCX: 0000000000000017
+RDX: 0000000002517619 RSI: ffffffff83764080 RDI: ffff888106911400
+RBP: ffff888106d5c000 R08: 0000000000000001 R09: ffffc900002f8de8
+R10: 00000000000000c2 R11: ffffc900002f8ff8 R12: ffff888106911540
+R13: ffff888106911480 R14: ffff888106911840 R15: ffffc900002f8de0
+FS: 0000000000000000(0000) GS:ffff88907b768000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f8044d69d90 CR3: 0000000002c30003 CR4: 0000000000370ef0
+Call Trace:
+ <IRQ>
+ tcp_write_timer (net/ipv4/tcp_timer.c:738)
+ call_timer_fn (kernel/time/timer.c:1747)
+ __run_timers (kernel/time/timer.c:1799 kernel/time/timer.c:2372)
+ timer_expire_remote (kernel/time/timer.c:2385 kernel/time/timer.c:2376 kernel/time/timer.c:2135)
+ tmigr_handle_remote_up (kernel/time/timer_migration.c:944 kernel/time/timer_migration.c:1035)
+ __walk_groups.isra.0 (kernel/time/timer_migration.c:533 (discriminator 1))
+ tmigr_handle_remote (kernel/time/timer_migration.c:1096)
+ handle_softirqs (./arch/x86/include/asm/jump_label.h:36 ./include/trace/events/irq.h:142 kernel/softirq.c:580)
+ irq_exit_rcu (kernel/softirq.c:614 kernel/softirq.c:453 kernel/softirq.c:680 kernel/softirq.c:696)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1050 (discriminator 35) arch/x86/kernel/apic/apic.c:1050 (discriminator 35))
+ </IRQ>
+
+Fixes: 8336886f786f ("tcp: TCP Fast Open Server - support TFO listeners")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250915175800.118793-2-kuniyu@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/tcp.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index a4bbe959d1e25..40a2f172be2cb 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -3011,6 +3011,7 @@ int tcp_disconnect(struct sock *sk, int flags)
+ struct inet_connection_sock *icsk = inet_csk(sk);
+ struct tcp_sock *tp = tcp_sk(sk);
+ int old_state = sk->sk_state;
++ struct request_sock *req;
+ u32 seq;
+
+ if (old_state != TCP_CLOSE)
+@@ -3121,6 +3122,10 @@ int tcp_disconnect(struct sock *sk, int flags)
+
+
+ /* Clean up fastopen related fields */
++ req = rcu_dereference_protected(tp->fastopen_rsk,
++ lockdep_sock_is_held(sk));
++ if (req)
++ reqsk_fastopen_remove(sk, req, false);
+ tcp_free_fastopen_req(tp);
+ inet_clear_bit(DEFER_CONNECT, sk);
+ tp->fastopen_client_fail = 0;
+--
+2.51.0
+
--- /dev/null
+From 8a15873cf4b7801800c710d6ccf57762291b4e39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Sep 2025 17:28:13 -0700
+Subject: tls: make sure to abort the stream if headers are bogus
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 0aeb54ac4cd5cf8f60131b4d9ec0b6dc9c27b20d ]
+
+Normally we wait for the socket to buffer up the whole record
+before we service it. If the socket has a tiny buffer, however,
+we read out the data sooner, to prevent connection stalls.
+Make sure that we abort the connection when we find out late
+that the record is actually invalid. Retrying the parsing is
+fine in itself but since we copy some more data each time
+before we parse we can overflow the allocated skb space.
+
+Constructing a scenario in which we're under pressure without
+enough data in the socket to parse the length upfront is quite
+hard. syzbot figured out a way to do this by serving us the header
+in small OOB sends, and then filling in the recvbuf with a large
+normal send.
+
+Make sure that tls_rx_msg_size() aborts strp, if we reach
+an invalid record there's really no way to recover.
+
+Reported-by: Lee Jones <lee@kernel.org>
+Fixes: 84c61fe1a75b ("tls: rx: do not use the standard strparser")
+Reviewed-by: Sabrina Dubroca <sd@queasysnail.net>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Link: https://patch.msgid.link/20250917002814.1743558-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tls/tls.h | 1 +
+ net/tls/tls_strp.c | 14 +++++++++-----
+ net/tls/tls_sw.c | 3 +--
+ 3 files changed, 11 insertions(+), 7 deletions(-)
+
+diff --git a/net/tls/tls.h b/net/tls/tls.h
+index 5dc61c85c076e..a3c5c5a59fda6 100644
+--- a/net/tls/tls.h
++++ b/net/tls/tls.h
+@@ -141,6 +141,7 @@ void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+
+ int wait_on_pending_writer(struct sock *sk, long *timeo);
+ void tls_err_abort(struct sock *sk, int err);
++void tls_strp_abort_strp(struct tls_strparser *strp, int err);
+
+ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
+ void tls_update_rx_zc_capable(struct tls_context *tls_ctx);
+diff --git a/net/tls/tls_strp.c b/net/tls/tls_strp.c
+index 6ce64a6e4495e..ae723cd6af397 100644
+--- a/net/tls/tls_strp.c
++++ b/net/tls/tls_strp.c
+@@ -12,7 +12,7 @@
+
+ static struct workqueue_struct *tls_strp_wq;
+
+-static void tls_strp_abort_strp(struct tls_strparser *strp, int err)
++void tls_strp_abort_strp(struct tls_strparser *strp, int err)
+ {
+ if (strp->stopped)
+ return;
+@@ -210,11 +210,17 @@ static int tls_strp_copyin_frag(struct tls_strparser *strp, struct sk_buff *skb,
+ struct sk_buff *in_skb, unsigned int offset,
+ size_t in_len)
+ {
++ unsigned int nfrag = skb->len / PAGE_SIZE;
+ size_t len, chunk;
+ skb_frag_t *frag;
+ int sz;
+
+- frag = &skb_shinfo(skb)->frags[skb->len / PAGE_SIZE];
++ if (unlikely(nfrag >= skb_shinfo(skb)->nr_frags)) {
++ DEBUG_NET_WARN_ON_ONCE(1);
++ return -EMSGSIZE;
++ }
++
++ frag = &skb_shinfo(skb)->frags[nfrag];
+
+ len = in_len;
+ /* First make sure we got the header */
+@@ -519,10 +525,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
+ tls_strp_load_anchor_with_queue(strp, inq);
+ if (!strp->stm.full_len) {
+ sz = tls_rx_msg_size(strp, strp->anchor);
+- if (sz < 0) {
+- tls_strp_abort_strp(strp, sz);
++ if (sz < 0)
+ return sz;
+- }
+
+ strp->stm.full_len = sz;
+
+diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
+index 27ce1feb79e14..435235a351e2f 100644
+--- a/net/tls/tls_sw.c
++++ b/net/tls/tls_sw.c
+@@ -2441,8 +2441,7 @@ int tls_rx_msg_size(struct tls_strparser *strp, struct sk_buff *skb)
+ return data_len + TLS_HEADER_SIZE;
+
+ read_failure:
+- tls_err_abort(strp->sk, ret);
+-
++ tls_strp_abort_strp(strp, ret);
+ return ret;
+ }
+
+--
+2.51.0
+
--- /dev/null
+From a6ce622659c2db1c80e7b5ad59091ea451a36712 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Aug 2025 15:00:51 +0800
+Subject: um: virtio_uml: Fix use-after-free after put_device in probe
+
+From: Miaoqian Lin <linmq006@gmail.com>
+
+[ Upstream commit 7ebf70cf181651fe3f2e44e95e7e5073d594c9c0 ]
+
+When register_virtio_device() fails in virtio_uml_probe(),
+the code sets vu_dev->registered = 1 even though
+the device was not successfully registered.
+This can lead to use-after-free or other issues.
+
+Fixes: 04e5b1fb0183 ("um: virtio: Remove device on disconnect")
+Signed-off-by: Miaoqian Lin <linmq006@gmail.com>
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/virtio_uml.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/arch/um/drivers/virtio_uml.c b/arch/um/drivers/virtio_uml.c
+index 8adca2000e519..d790acfc2c674 100644
+--- a/arch/um/drivers/virtio_uml.c
++++ b/arch/um/drivers/virtio_uml.c
+@@ -1229,10 +1229,12 @@ static int virtio_uml_probe(struct platform_device *pdev)
+ device_set_wakeup_capable(&vu_dev->vdev.dev, true);
+
+ rc = register_virtio_device(&vu_dev->vdev);
+- if (rc)
++ if (rc) {
+ put_device(&vu_dev->vdev.dev);
++ return rc;
++ }
+ vu_dev->registered = 1;
+- return rc;
++ return 0;
+
+ error_init:
+ os_close_file(vu_dev->sock);
+--
+2.51.0
+
--- /dev/null
+From 657f6c6117dfabcc48ce472f1180d53a18e5a415 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Aug 2025 10:29:11 +0800
+Subject: wifi: mac80211: fix incorrect type for ret
+
+From: Liao Yuanhong <liaoyuanhong@vivo.com>
+
+[ Upstream commit a33b375ab5b3a9897a0ab76be8258d9f6b748628 ]
+
+The variable ret is declared as a u32 type, but it is assigned a value
+of -EOPNOTSUPP. Since unsigned types cannot correctly represent negative
+values, the type of ret should be changed to int.
+
+Signed-off-by: Liao Yuanhong <liaoyuanhong@vivo.com>
+Link: https://patch.msgid.link/20250825022911.139377-1-liaoyuanhong@vivo.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/driver-ops.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h
+index 78aa3bc51586e..5f8f72ca2769c 100644
+--- a/net/mac80211/driver-ops.h
++++ b/net/mac80211/driver-ops.h
+@@ -1273,7 +1273,7 @@ drv_get_ftm_responder_stats(struct ieee80211_local *local,
+ struct ieee80211_sub_if_data *sdata,
+ struct cfg80211_ftm_responder_stats *ftm_stats)
+ {
+- u32 ret = -EOPNOTSUPP;
++ int ret = -EOPNOTSUPP;
+
+ if (local->ops->get_ftm_responder_stats)
+ ret = local->ops->get_ftm_responder_stats(&local->hw,
+--
+2.51.0
+
--- /dev/null
+From c97be7e89d0ee6e3c273b483f2165940ee5f4ee2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Aug 2025 18:54:37 +1000
+Subject: wifi: mac80211: increase scan_ies_len for S1G
+
+From: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+
+[ Upstream commit 7e2f3213e85eba00acb4cfe6d71647892d63c3a1 ]
+
+Currently the S1G capability element is not taken into account
+for the scan_ies_len, which leads to a buffer length validation
+failure in ieee80211_prep_hw_scan() and subsequent WARN in
+__ieee80211_start_scan(). This prevents hw scanning from functioning.
+To fix ensure we accommodate for the S1G capability length.
+
+Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+Link: https://patch.msgid.link/20250826085437.3493-1-lachlan.hodges@morsemicro.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/main.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/net/mac80211/main.c b/net/mac80211/main.c
+index 3a6fff98748b8..80b143bde93d6 100644
+--- a/net/mac80211/main.c
++++ b/net/mac80211/main.c
+@@ -965,7 +965,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ int result, i;
+ enum nl80211_band band;
+ int channels, max_bitrates;
+- bool supp_ht, supp_vht, supp_he, supp_eht;
++ bool supp_ht, supp_vht, supp_he, supp_eht, supp_s1g;
+ struct cfg80211_chan_def dflt_chandef = {};
+
+ if (ieee80211_hw_check(hw, QUEUE_CONTROL) &&
+@@ -1081,6 +1081,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ supp_vht = false;
+ supp_he = false;
+ supp_eht = false;
++ supp_s1g = false;
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband;
+
+@@ -1127,6 +1128,7 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ max_bitrates = sband->n_bitrates;
+ supp_ht = supp_ht || sband->ht_cap.ht_supported;
+ supp_vht = supp_vht || sband->vht_cap.vht_supported;
++ supp_s1g = supp_s1g || sband->s1g_cap.s1g;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *iftd;
+@@ -1253,6 +1255,9 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
+ local->scan_ies_len +=
+ 2 + sizeof(struct ieee80211_vht_cap);
+
++ if (supp_s1g)
++ local->scan_ies_len += 2 + sizeof(struct ieee80211_s1g_cap);
++
+ /*
+ * HE cap element is variable in size - set len to allow max size */
+ if (supp_he) {
+--
+2.51.0
+
--- /dev/null
+From 572f8e10b15c0ec580dee97736a33ad9800a7579 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Aug 2025 22:58:43 +0000
+Subject: wifi: wilc1000: avoid buffer overflow in WID string configuration
+
+From: Ajay.Kathat@microchip.com <Ajay.Kathat@microchip.com>
+
+[ Upstream commit fe9e4d0c39311d0f97b024147a0d155333f388b5 ]
+
+Fix the following copy overflow warning identified by Smatch checker.
+
+ drivers/net/wireless/microchip/wilc1000/wlan_cfg.c:184 wilc_wlan_parse_response_frame()
+ error: '__memcpy()' 'cfg->s[i]->str' copy overflow (512 vs 65537)
+
+This patch introduces size check before accessing the memory buffer.
+The checks are base on the WID type of received data from the firmware.
+For WID string configuration, the size limit is determined by individual
+element size in 'struct wilc_cfg_str_vals' that is maintained in 'len' field
+of 'struct wilc_cfg_str'.
+
+Reported-by: Dan Carpenter <dan.carpenter@linaro.org>
+Closes: https://lore.kernel.org/linux-wireless/aLFbr9Yu9j_TQTey@stanley.mountain
+Suggested-by: Dan Carpenter <dan.carpenter@linaro.org>
+Signed-off-by: Ajay Singh <ajay.kathat@microchip.com>
+Link: https://patch.msgid.link/20250829225829.5423-1-ajay.kathat@microchip.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../wireless/microchip/wilc1000/wlan_cfg.c | 37 ++++++++++++++-----
+ .../wireless/microchip/wilc1000/wlan_cfg.h | 5 ++-
+ 2 files changed, 30 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+index 131388886acbf..cfabd5aebb540 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
+@@ -41,10 +41,10 @@ static const struct wilc_cfg_word g_cfg_word[] = {
+ };
+
+ static const struct wilc_cfg_str g_cfg_str[] = {
+- {WID_FIRMWARE_VERSION, NULL},
+- {WID_MAC_ADDR, NULL},
+- {WID_ASSOC_RES_INFO, NULL},
+- {WID_NIL, NULL}
++ {WID_FIRMWARE_VERSION, 0, NULL},
++ {WID_MAC_ADDR, 0, NULL},
++ {WID_ASSOC_RES_INFO, 0, NULL},
++ {WID_NIL, 0, NULL}
+ };
+
+ #define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R'
+@@ -147,44 +147,58 @@ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size)
+
+ switch (FIELD_GET(WILC_WID_TYPE, wid)) {
+ case WID_CHAR:
++ len = 3;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid)
+ i++;
+
+ if (cfg->b[i].id == wid)
+ cfg->b[i].val = info[4];
+
+- len = 3;
+ break;
+
+ case WID_SHORT:
++ len = 4;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid)
+ i++;
+
+ if (cfg->hw[i].id == wid)
+ cfg->hw[i].val = get_unaligned_le16(&info[4]);
+
+- len = 4;
+ break;
+
+ case WID_INT:
++ len = 6;
++ if (len + 2 > size)
++ return;
++
+ while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid)
+ i++;
+
+ if (cfg->w[i].id == wid)
+ cfg->w[i].val = get_unaligned_le32(&info[4]);
+
+- len = 6;
+ break;
+
+ case WID_STR:
++ len = 2 + get_unaligned_le16(&info[2]);
++
+ while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid)
+ i++;
+
+- if (cfg->s[i].id == wid)
++ if (cfg->s[i].id == wid) {
++ if (len > cfg->s[i].len || (len + 2 > size))
++ return;
++
+ memcpy(cfg->s[i].str, &info[2],
+- get_unaligned_le16(&info[2]) + 2);
++ len);
++ }
+
+- len = 2 + get_unaligned_le16(&info[2]);
+ break;
+
+ default:
+@@ -384,12 +398,15 @@ int wilc_wlan_cfg_init(struct wilc *wl)
+ /* store the string cfg parameters */
+ wl->cfg.s[i].id = WID_FIRMWARE_VERSION;
+ wl->cfg.s[i].str = str_vals->firmware_version;
++ wl->cfg.s[i].len = sizeof(str_vals->firmware_version);
+ i++;
+ wl->cfg.s[i].id = WID_MAC_ADDR;
+ wl->cfg.s[i].str = str_vals->mac_address;
++ wl->cfg.s[i].len = sizeof(str_vals->mac_address);
+ i++;
+ wl->cfg.s[i].id = WID_ASSOC_RES_INFO;
+ wl->cfg.s[i].str = str_vals->assoc_rsp;
++ wl->cfg.s[i].len = sizeof(str_vals->assoc_rsp);
+ i++;
+ wl->cfg.s[i].id = WID_NIL;
+ wl->cfg.s[i].str = NULL;
+diff --git a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+index 7038b74f8e8ff..5ae74bced7d74 100644
+--- a/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
++++ b/drivers/net/wireless/microchip/wilc1000/wlan_cfg.h
+@@ -24,12 +24,13 @@ struct wilc_cfg_word {
+
+ struct wilc_cfg_str {
+ u16 id;
++ u16 len;
+ u8 *str;
+ };
+
+ struct wilc_cfg_str_vals {
+- u8 mac_address[7];
+- u8 firmware_version[129];
++ u8 mac_address[8];
++ u8 firmware_version[130];
+ u8 assoc_rsp[WILC_MAX_ASSOC_RESP_FRAME_SIZE];
+ };
+
+--
+2.51.0
+