]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.12
authorSasha Levin <sashal@kernel.org>
Mon, 11 Aug 2025 04:39:18 +0000 (00:39 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 11 Aug 2025 04:39:18 +0000 (00:39 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
44 files changed:
queue-6.12/alsa-hda-ca0132-fix-missing-error-handling-in-ca0132.patch [new file with mode: 0644]
queue-6.12/asoc-tas2781-fix-the-wrong-step-for-tlv-on-tas2781.patch [new file with mode: 0644]
queue-6.12/benet-fix-bug-when-creating-vfs.patch [new file with mode: 0644]
queue-6.12/block-ensure-discard_granularity-is-zero-when-discar.patch [new file with mode: 0644]
queue-6.12/block-fix-default-io-priority-if-there-is-no-io-cont.patch [new file with mode: 0644]
queue-6.12/eth-fbnic-remove-the-debugging-trick-of-super-high-p.patch [new file with mode: 0644]
queue-6.12/ipv6-reject-malicious-packets-in-ipv6_gso_segment.patch [new file with mode: 0644]
queue-6.12/irqchip-build-imx_mu_msi-only-on-arm.patch [new file with mode: 0644]
queue-6.12/md-md-cluster-handle-remove-message-earlier.patch [new file with mode: 0644]
queue-6.12/net-drop-ufo-packets-in-udp_rcv_segment.patch [new file with mode: 0644]
queue-6.12/net-ipa-add-ipa-v5.1-and-v5.5-to-ipa_version_string.patch [new file with mode: 0644]
queue-6.12/net-mdio-mdio-bcm-unimac-correct-rate-fallback-logic.patch [new file with mode: 0644]
queue-6.12/net-mlx5-correctly-set-gso_segs-when-lro-is-used.patch [new file with mode: 0644]
queue-6.12/net-sched-mqprio-fix-stack-out-of-bounds-write-in-tc.patch [new file with mode: 0644]
queue-6.12/net-sched-taprio-enforce-minimum-value-for-picos_per.patch [new file with mode: 0644]
queue-6.12/netlink-specs-ethtool-fix-module-eeprom-input-output.patch [new file with mode: 0644]
queue-6.12/netpoll-prevent-hanging-napi-when-netcons-gets-enabl.patch [new file with mode: 0644]
queue-6.12/nfs-fix-filehandle-bounds-checking-in-nfs_fh_to_dent.patch [new file with mode: 0644]
queue-6.12/nfs-fix-wakeup-of-__nfs_lookup_revalidate-in-unblock.patch [new file with mode: 0644]
queue-6.12/nfs-fixup-allocation-flags-for-nfsiod-s-__gfp_noretr.patch [new file with mode: 0644]
queue-6.12/nfsv4.2-another-fix-for-listxattr.patch [new file with mode: 0644]
queue-6.12/nvmet-exit-debugfs-after-discovery-subsystem-exits.patch [new file with mode: 0644]
queue-6.12/nvmet-initialize-discovery-subsys-after-debugfs-is-i.patch [new file with mode: 0644]
queue-6.12/phy-mscc-fix-parsing-of-unicast-frames.patch [new file with mode: 0644]
queue-6.12/pnfs-flexfiles-don-t-attempt-pnfs-on-fatal-ds-errors.patch [new file with mode: 0644]
queue-6.12/pptp-ensure-minimal-skb-length-in-pptp_xmit.patch [new file with mode: 0644]
queue-6.12/pptp-fix-pptp_xmit-error-path.patch [new file with mode: 0644]
queue-6.12/s390-ap-unmask-slcf-bit-in-card-and-queue-ap-functio.patch [new file with mode: 0644]
queue-6.12/s390-mm-allocate-page-table-with-page_size-granulari.patch [new file with mode: 0644]
queue-6.12/sched-add-test_and_clear_wake_up_bit-and-atomic_dec_.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/smb-client-let-recv_done-avoid-touching-data_transfe.patch [new file with mode: 0644]
queue-6.12/smb-client-let-recv_done-cleanup-before-notifying-th.patch [new file with mode: 0644]
queue-6.12/smb-client-let-send_done-cleanup-before-calling-smbd.patch [new file with mode: 0644]
queue-6.12/smb-client-make-sure-we-call-ib_dma_unmap_single-onl.patch [new file with mode: 0644]
queue-6.12/smb-client-remove-separate-empty_packet_queue.patch [new file with mode: 0644]
queue-6.12/smb-client-return-an-error-if-rdma_connect-does-not-.patch [new file with mode: 0644]
queue-6.12/smb-server-let-recv_done-avoid-touching-data_transfe.patch [new file with mode: 0644]
queue-6.12/smb-server-let-recv_done-consistently-call-put_recvm.patch [new file with mode: 0644]
queue-6.12/smb-server-make-sure-we-call-ib_dma_unmap_single-onl.patch [new file with mode: 0644]
queue-6.12/smb-server-remove-separate-empty_recvmsg_queue.patch [new file with mode: 0644]
queue-6.12/spi-cs42l43-property-entry-should-be-a-null-terminat.patch [new file with mode: 0644]
queue-6.12/sunrpc-fix-client-side-handling-of-tls-alerts.patch [new file with mode: 0644]
queue-6.12/x86-irq-plug-vector-setup-race.patch [new file with mode: 0644]

diff --git a/queue-6.12/alsa-hda-ca0132-fix-missing-error-handling-in-ca0132.patch b/queue-6.12/alsa-hda-ca0132-fix-missing-error-handling-in-ca0132.patch
new file mode 100644 (file)
index 0000000..0e0df23
--- /dev/null
@@ -0,0 +1,47 @@
+From c4c153ceadcbb276da897447b95ee72dd5ddcf5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 11:44:22 +0200
+Subject: ALSA: hda/ca0132: Fix missing error handling in
+ ca0132_alt_select_out()
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 9f320dfb0ffc555aa2eac8331dee0c2c16f67633 ]
+
+There are a couple of cases where the error is ignored or the error
+code isn't propagated in ca0132_alt_select_out().  Fix those.
+
+Fixes: def3f0a5c700 ("ALSA: hda/ca0132 - Add quirk output selection structures.")
+Link: https://patch.msgid.link/20250806094423.8843-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_ca0132.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index d40197fb5fbd..77432e06f3e3 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4802,7 +4802,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
+       if (err < 0)
+               goto exit;
+-      if (ca0132_alt_select_out_quirk_set(codec) < 0)
++      err = ca0132_alt_select_out_quirk_set(codec);
++      if (err < 0)
+               goto exit;
+       switch (spec->cur_out_type) {
+@@ -4892,6 +4893,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
+                               spec->bass_redirection_val);
+       else
+               err = ca0132_alt_surround_set_bass_redirection(codec, 0);
++      if (err < 0)
++              goto exit;
+       /* Unmute DSP now that we're done with output selection. */
+       err = dspio_set_uint_param(codec, 0x96,
+-- 
+2.39.5
+
diff --git a/queue-6.12/asoc-tas2781-fix-the-wrong-step-for-tlv-on-tas2781.patch b/queue-6.12/asoc-tas2781-fix-the-wrong-step-for-tlv-on-tas2781.patch
new file mode 100644 (file)
index 0000000..6c0ca23
--- /dev/null
@@ -0,0 +1,36 @@
+From 8457926eb67a42dc6b619036b12abab3ba06e95b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Aug 2025 10:16:18 +0800
+Subject: ASoC: tas2781: Fix the wrong step for TLV on tas2781
+
+From: Baojun Xu <baojun.xu@ti.com>
+
+[ Upstream commit 9843cf7b6fd6f938c16fde51e86dd0e3ddbefb12 ]
+
+The step for TLV on tas2781, should be 50 (-0.5dB).
+
+Fixes: 678f38eba1f2 ("ASoC: tas2781: Add Header file for tas2781 driver")
+Signed-off-by: Baojun Xu <baojun.xu@ti.com>
+Link: https://patch.msgid.link/20250801021618.64627-1-baojun.xu@ti.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/sound/tas2781-tlv.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h
+index d87263e43fdb..ef9b9f19d212 100644
+--- a/include/sound/tas2781-tlv.h
++++ b/include/sound/tas2781-tlv.h
+@@ -15,7 +15,7 @@
+ #ifndef __TAS2781_TLV_H__
+ #define __TAS2781_TLV_H__
+-static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0);
++static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 50, 0);
+ static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0);
+ #endif
+-- 
+2.39.5
+
diff --git a/queue-6.12/benet-fix-bug-when-creating-vfs.patch b/queue-6.12/benet-fix-bug-when-creating-vfs.patch
new file mode 100644 (file)
index 0000000..874582a
--- /dev/null
@@ -0,0 +1,61 @@
+From e009df9618cbaedf210be2c3945d2079be4499ba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Aug 2025 12:13:37 +0200
+Subject: benet: fix BUG when creating VFs
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 5a40f8af2ba1b9bdf46e2db10e8c9710538fbc63 ]
+
+benet crashes as soon as SRIOV VFs are created:
+
+ kernel BUG at mm/vmalloc.c:3457!
+ Oops: invalid opcode: 0000 [#1] SMP KASAN NOPTI
+ CPU: 4 UID: 0 PID: 7408 Comm: test.sh Kdump: loaded Not tainted 6.16.0+ #1 PREEMPT(voluntary)
+ [...]
+ RIP: 0010:vunmap+0x5f/0x70
+ [...]
+ Call Trace:
+  <TASK>
+  __iommu_dma_free+0xe8/0x1c0
+  be_cmd_set_mac_list+0x3fe/0x640 [be2net]
+  be_cmd_set_mac+0xaf/0x110 [be2net]
+  be_vf_eth_addr_config+0x19f/0x330 [be2net]
+  be_vf_setup+0x4f7/0x990 [be2net]
+  be_pci_sriov_configure+0x3a1/0x470 [be2net]
+  sriov_numvfs_store+0x20b/0x380
+  kernfs_fop_write_iter+0x354/0x530
+  vfs_write+0x9b9/0xf60
+  ksys_write+0xf3/0x1d0
+  do_syscall_64+0x8c/0x3d0
+
+be_cmd_set_mac_list() calls dma_free_coherent() under a spin_lock_bh.
+Fix it by freeing only after the lock has been released.
+
+Fixes: 1a82d19ca2d6 ("be2net: fix sleeping while atomic bugs in be_ndo_bridge_getlink")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250801101338.72502-1-mschmidt@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/emulex/benet/be_cmds.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index a89aa4ac0a06..779f1324bb5f 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -3852,8 +3852,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+       status = be_mcc_notify_wait(adapter);
+ err:
+-      dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+       spin_unlock_bh(&adapter->mcc_lock);
++      dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+       return status;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/block-ensure-discard_granularity-is-zero-when-discar.patch b/queue-6.12/block-ensure-discard_granularity-is-zero-when-discar.patch
new file mode 100644 (file)
index 0000000..8fabad0
--- /dev/null
@@ -0,0 +1,61 @@
+From 8ae6a575c3464294373852ab060a3c6a0680d5ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 08:22:28 -0700
+Subject: block: ensure discard_granularity is zero when discard is not
+ supported
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit fad6551fcf537375702b9af012508156a16a1ff7 ]
+
+Documentation/ABI/stable/sysfs-block states:
+
+  What: /sys/block/<disk>/queue/discard_granularity
+  [...]
+  A discard_granularity of 0 means that the device does not support
+  discard functionality.
+
+but this got broken when sorting out the block limits updates.  Fix this
+by setting the discard_granularity limit to zero when the combined
+max_discard_sectors is zero.
+
+Fixes: 3c407dc723bb ("block: default the discard granularity to sector size")
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
+Link: https://lore.kernel.org/r/20250731152228.873923-1-hch@lst.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ block/blk-settings.c | 13 ++++++++++---
+ 1 file changed, 10 insertions(+), 3 deletions(-)
+
+diff --git a/block/blk-settings.c b/block/blk-settings.c
+index 7858c92b4483..22ce7fa4fe20 100644
+--- a/block/blk-settings.c
++++ b/block/blk-settings.c
+@@ -320,12 +320,19 @@ static int blk_validate_limits(struct queue_limits *lim)
+       lim->max_discard_sectors =
+               min(lim->max_hw_discard_sectors, lim->max_user_discard_sectors);
++      /*
++       * When discard is not supported, discard_granularity should be reported
++       * as 0 to userspace.
++       */
++      if (lim->max_discard_sectors)
++              lim->discard_granularity =
++                      max(lim->discard_granularity, lim->physical_block_size);
++      else
++              lim->discard_granularity = 0;
++
+       if (!lim->max_discard_segments)
+               lim->max_discard_segments = 1;
+-      if (lim->discard_granularity < lim->physical_block_size)
+-              lim->discard_granularity = lim->physical_block_size;
+-
+       /*
+        * By default there is no limit on the segment boundary alignment,
+        * but if there is one it can't be smaller than the page size as
+-- 
+2.39.5
+
diff --git a/queue-6.12/block-fix-default-io-priority-if-there-is-no-io-cont.patch b/queue-6.12/block-fix-default-io-priority-if-there-is-no-io-cont.patch
new file mode 100644 (file)
index 0000000..08ba05b
--- /dev/null
@@ -0,0 +1,54 @@
+From e0b55fe381ed64c760cba7b4c309118e24b2986b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 21:49:53 -0700
+Subject: block: Fix default IO priority if there is no IO context
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit e2ba58ccc9099514380c3300cbc0750b5055fc1c ]
+
+Upstream commit 53889bcaf536 ("block: make __get_task_ioprio() easier to
+read") changes the IO priority returned to the caller if no IO context
+is defined for the task. Prior to this commit, the returned IO priority
+was determined by task_nice_ioclass() and task_nice_ioprio(). Now it is
+always IOPRIO_DEFAULT, which translates to IOPRIO_CLASS_NONE with priority
+0. However, task_nice_ioclass() returns IOPRIO_CLASS_IDLE, IOPRIO_CLASS_RT,
+or IOPRIO_CLASS_BE depending on the task scheduling policy, and
+task_nice_ioprio() returns a value determined by task_nice(). This causes
+regressions in test code checking the IO priority and class of IO
+operations on tasks with no IO context.
+
+Fix the problem by returning the IO priority calculated from
+task_nice_ioclass() and task_nice_ioprio() if no IO context is defined
+to match earlier behavior.
+
+Fixes: 53889bcaf536 ("block: make __get_task_ioprio() easier to read")
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Yu Kuai <yukuai3@huawei.com>
+Reviewed-by: Damien Le Moal <dlemoal@kernel.org>
+Link: https://lore.kernel.org/r/20250731044953.1852690-1-linux@roeck-us.net
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/ioprio.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h
+index b25377b6ea98..5210e8371238 100644
+--- a/include/linux/ioprio.h
++++ b/include/linux/ioprio.h
+@@ -60,7 +60,8 @@ static inline int __get_task_ioprio(struct task_struct *p)
+       int prio;
+       if (!ioc)
+-              return IOPRIO_DEFAULT;
++              return IOPRIO_PRIO_VALUE(task_nice_ioclass(p),
++                                       task_nice_ioprio(p));
+       if (p != current)
+               lockdep_assert_held(&p->alloc_lock);
+-- 
+2.39.5
+
diff --git a/queue-6.12/eth-fbnic-remove-the-debugging-trick-of-super-high-p.patch b/queue-6.12/eth-fbnic-remove-the-debugging-trick-of-super-high-p.patch
new file mode 100644 (file)
index 0000000..e029af4
--- /dev/null
@@ -0,0 +1,66 @@
+From 03a8571e5c6368fae5229e765c1be15ae0d7ad4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Aug 2025 10:07:54 -0700
+Subject: eth: fbnic: remove the debugging trick of super high page bias
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit e407fceeaf1b2959892b4fc9b584843d3f2bfc05 ]
+
+Alex added page bias of LONG_MAX, which is admittedly quite
+a clever way of catching overflows of the pp ref count.
+The page pool code was "optimized" to leave the ref at 1
+for freed pages so it can't catch basic bugs by itself any more.
+(Something we should probably address under DEBUG_NET...)
+
+Unfortunately for fbnic since commit f7dc3248dcfb ("skbuff: Optimization
+of SKB coalescing for page pool") core _may_ actually take two extra
+pp refcounts, if one of them is returned before driver gives up the bias
+the ret < 0 check in page_pool_unref_netmem() will trigger.
+
+While at it add a FBNIC_ to the name of the driver constant.
+
+Fixes: 0cb4c0a13723 ("eth: fbnic: Implement Rx queue alloc/start/stop/free")
+Link: https://patch.msgid.link/20250801170754.2439577-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/meta/fbnic/fbnic_txrx.c | 4 ++--
+ drivers/net/ethernet/meta/fbnic/fbnic_txrx.h | 6 ++----
+ 2 files changed, 4 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+index 6a6d7e22f1a7..fc52db8e36f2 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c
+@@ -389,8 +389,8 @@ static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx,
+ {
+       struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx];
+-      page_pool_fragment_page(page, PAGECNT_BIAS_MAX);
+-      rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX;
++      page_pool_fragment_page(page, FBNIC_PAGECNT_BIAS_MAX);
++      rx_buf->pagecnt_bias = FBNIC_PAGECNT_BIAS_MAX;
+       rx_buf->page = page;
+ }
+diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+index 2f91f68d11d5..05cde71db9df 100644
+--- a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
++++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h
+@@ -59,10 +59,8 @@ struct fbnic_queue_stats {
+       struct u64_stats_sync syncp;
+ };
+-/* Pagecnt bias is long max to reserve the last bit to catch overflow
+- * cases where if we overcharge the bias it will flip over to be negative.
+- */
+-#define PAGECNT_BIAS_MAX      LONG_MAX
++#define FBNIC_PAGECNT_BIAS_MAX        PAGE_SIZE
++
+ struct fbnic_rx_buf {
+       struct page *page;
+       long pagecnt_bias;
+-- 
+2.39.5
+
diff --git a/queue-6.12/ipv6-reject-malicious-packets-in-ipv6_gso_segment.patch b/queue-6.12/ipv6-reject-malicious-packets-in-ipv6_gso_segment.patch
new file mode 100644 (file)
index 0000000..04d7255
--- /dev/null
@@ -0,0 +1,103 @@
+From 406b85cfb6ce052f6b718072cc6dfc7e2913d430 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 13:17:38 +0000
+Subject: ipv6: reject malicious packets in ipv6_gso_segment()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d45cf1e7d7180256e17c9ce88e32e8061a7887fe ]
+
+syzbot was able to craft a packet with very long IPv6 extension headers
+leading to an overflow of skb->transport_header.
+
+This 16bit field has a limited range.
+
+Add skb_reset_transport_header_careful() helper and use it
+from ipv6_gso_segment()
+
+WARNING: CPU: 0 PID: 5871 at ./include/linux/skbuff.h:3032 skb_reset_transport_header include/linux/skbuff.h:3032 [inline]
+WARNING: CPU: 0 PID: 5871 at ./include/linux/skbuff.h:3032 ipv6_gso_segment+0x15e2/0x21e0 net/ipv6/ip6_offload.c:151
+Modules linked in:
+CPU: 0 UID: 0 PID: 5871 Comm: syz-executor211 Not tainted 6.16.0-rc6-syzkaller-g7abc678e3084 #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2025
+ RIP: 0010:skb_reset_transport_header include/linux/skbuff.h:3032 [inline]
+ RIP: 0010:ipv6_gso_segment+0x15e2/0x21e0 net/ipv6/ip6_offload.c:151
+Call Trace:
+ <TASK>
+  skb_mac_gso_segment+0x31c/0x640 net/core/gso.c:53
+  nsh_gso_segment+0x54a/0xe10 net/nsh/nsh.c:110
+  skb_mac_gso_segment+0x31c/0x640 net/core/gso.c:53
+  __skb_gso_segment+0x342/0x510 net/core/gso.c:124
+  skb_gso_segment include/net/gso.h:83 [inline]
+  validate_xmit_skb+0x857/0x11b0 net/core/dev.c:3950
+  validate_xmit_skb_list+0x84/0x120 net/core/dev.c:4000
+  sch_direct_xmit+0xd3/0x4b0 net/sched/sch_generic.c:329
+  __dev_xmit_skb net/core/dev.c:4102 [inline]
+  __dev_queue_xmit+0x17b6/0x3a70 net/core/dev.c:4679
+
+Fixes: d1da932ed4ec ("ipv6: Separate ipv6 offload support")
+Reported-by: syzbot+af43e647fd835acc02df@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/688a1a05.050a0220.5d226.0008.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250730131738.3385939-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skbuff.h | 23 +++++++++++++++++++++++
+ net/ipv6/ip6_offload.c |  4 +++-
+ 2 files changed, 26 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index 39f1d16f3628..a726a698aac4 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2991,6 +2991,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb)
+       skb->transport_header = skb->data - skb->head;
+ }
++/**
++ * skb_reset_transport_header_careful - conditionally reset transport header
++ * @skb: buffer to alter
++ *
++ * Hardened version of skb_reset_transport_header().
++ *
++ * Returns: true if the operation was a success.
++ */
++static inline bool __must_check
++skb_reset_transport_header_careful(struct sk_buff *skb)
++{
++      long offset = skb->data - skb->head;
++
++      if (unlikely(offset != (typeof(skb->transport_header))offset))
++              return false;
++
++      if (unlikely(offset == (typeof(skb->transport_header))~0U))
++              return false;
++
++      skb->transport_header = offset;
++      return true;
++}
++
+ static inline void skb_set_transport_header(struct sk_buff *skb,
+                                           const int offset)
+ {
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 9822163428b0..fce91183797a 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -148,7 +148,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+       ops = rcu_dereference(inet6_offloads[proto]);
+       if (likely(ops && ops->callbacks.gso_segment)) {
+-              skb_reset_transport_header(skb);
++              if (!skb_reset_transport_header_careful(skb))
++                      goto out;
++
+               segs = ops->callbacks.gso_segment(skb, features);
+               if (!segs)
+                       skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
+-- 
+2.39.5
+
diff --git a/queue-6.12/irqchip-build-imx_mu_msi-only-on-arm.patch b/queue-6.12/irqchip-build-imx_mu_msi-only-on-arm.patch
new file mode 100644 (file)
index 0000000..1c4251a
--- /dev/null
@@ -0,0 +1,48 @@
+From 0c237a641eaaa2acf006ce84fd9faaa19b91a55d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 18:09:49 +0200
+Subject: irqchip: Build IMX_MU_MSI only on ARM
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 3b6a18f0da8720d612d8a682ea5c55870da068e0 ]
+
+Compile-testing IMX_MU_MSI on x86 without PCI_MSI support results in a
+build failure:
+
+drivers/gpio/gpio-sprd.c:8:
+include/linux/gpio/driver.h:41:33: error: field 'msiinfo' has incomplete type
+drivers/iommu/iommufd/viommu.c:4:
+include/linux/msi.h:528:33: error: field 'alloc_info' has incomplete type
+
+Tighten the dependency further to only allow compile testing on Arm.
+This could be refined further to allow certain x86 configs.
+
+This was submitted before to address a different build failure, which was
+fixed differently, but the problem has now returned in a different form.
+
+Fixes: 70afdab904d2d1e6 ("irqchip: Add IMX MU MSI controller driver")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/20250805160952.4006075-1-arnd@kernel.org
+Link: https://lore.kernel.org/all/20221215164109.761427-1-arnd@kernel.org/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index a799a89195c5..5d5b3cf381b9 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -506,6 +506,7 @@ config IMX_MU_MSI
+       tristate "i.MX MU used as MSI controller"
+       depends on OF && HAS_IOMEM
+       depends on ARCH_MXC || COMPILE_TEST
++      depends on ARM || ARM64
+       default m if ARCH_MXC
+       select IRQ_DOMAIN
+       select IRQ_DOMAIN_HIERARCHY
+-- 
+2.39.5
+
diff --git a/queue-6.12/md-md-cluster-handle-remove-message-earlier.patch b/queue-6.12/md-md-cluster-handle-remove-message-earlier.patch
new file mode 100644 (file)
index 0000000..22e2e97
--- /dev/null
@@ -0,0 +1,86 @@
+From 153da971b186d172daca27de9310dc8b3ce318ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 12:21:40 +0800
+Subject: md/md-cluster: handle REMOVE message earlier
+
+From: Heming Zhao <heming.zhao@suse.com>
+
+[ Upstream commit 948b1fe12005d39e2b49087b50e5ee55c9a8f76f ]
+
+Commit a1fd37f97808 ("md: Don't wait for MD_RECOVERY_NEEDED for
+HOT_REMOVE_DISK ioctl") introduced a regression in the md_cluster
+module. (Failed cases 02r1_Manage_re-add & 02r10_Manage_re-add)
+
+Consider a 2-node cluster:
+- node1 set faulty & remove command on a disk.
+- node2 must correctly update the array metadata.
+
+Before a1fd37f97808, on node1, the delay between msg:METADATA_UPDATED
+(triggered by faulty) and msg:REMOVE was sufficient for node2 to
+reload the disk info (written by node1).
+After a1fd37f97808, node1 no longer waits between faulty and remove,
+causing it to send msg:REMOVE while node2 is still reloading disk info.
+This often results in node2 failing to remove the faulty disk.
+
+== how to trigger ==
+
+set up a 2-node cluster (node1 & node2) with disks vdc & vdd.
+
+on node1:
+mdadm -CR /dev/md0 -l1 -b clustered -n2 /dev/vdc /dev/vdd --assume-clean
+ssh node2-ip mdadm -A /dev/md0 /dev/vdc /dev/vdd
+mdadm --manage /dev/md0 --fail /dev/vdc --remove /dev/vdc
+
+check array status on both nodes with "mdadm -D /dev/md0".
+node1 output:
+    Number   Major   Minor   RaidDevice State
+       -       0        0        0      removed
+       1     254       48        1      active sync   /dev/vdd
+node2 output:
+    Number   Major   Minor   RaidDevice State
+       -       0        0        0      removed
+       1     254       48        1      active sync   /dev/vdd
+
+       0     254       32        -      faulty   /dev/vdc
+
+Fixes: a1fd37f97808 ("md: Don't wait for MD_RECOVERY_NEEDED for HOT_REMOVE_DISK ioctl")
+Signed-off-by: Heming Zhao <heming.zhao@suse.com>
+Reviewed-by: Su Yue <glass.su@suse.com>
+Link: https://lore.kernel.org/linux-raid/20250728042145.9989-1-heming.zhao@suse.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index 7809b951e09a..4b3291723670 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -9702,8 +9702,8 @@ void md_check_recovery(struct mddev *mddev)
+                        * remove disk.
+                        */
+                       rdev_for_each_safe(rdev, tmp, mddev) {
+-                              if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
+-                                              rdev->raid_disk < 0)
++                              if (rdev->raid_disk < 0 &&
++                                  test_and_clear_bit(ClusterRemove, &rdev->flags))
+                                       md_kick_rdev_from_array(rdev);
+                       }
+               }
+@@ -10000,8 +10000,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+       /* Check for change of roles in the active devices */
+       rdev_for_each_safe(rdev2, tmp, mddev) {
+-              if (test_bit(Faulty, &rdev2->flags))
++              if (test_bit(Faulty, &rdev2->flags)) {
++                      if (test_bit(ClusterRemove, &rdev2->flags))
++                              set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+                       continue;
++              }
+               /* Check if the roles changed */
+               role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-drop-ufo-packets-in-udp_rcv_segment.patch b/queue-6.12/net-drop-ufo-packets-in-udp_rcv_segment.patch
new file mode 100644 (file)
index 0000000..4127c0a
--- /dev/null
@@ -0,0 +1,122 @@
+From 4ec4f7343f137476f1d680f7e8b6de3a68ecaddf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 18:14:58 +0800
+Subject: net: drop UFO packets in udp_rcv_segment()
+
+From: Wang Liang <wangliang74@huawei.com>
+
+[ Upstream commit d46e51f1c78b9ab9323610feb14238d06d46d519 ]
+
+When sending a packet with virtio_net_hdr to tun device, if the gso_type
+in virtio_net_hdr is SKB_GSO_UDP and the gso_size is less than udphdr
+size, below crash may happen.
+
+  ------------[ cut here ]------------
+  kernel BUG at net/core/skbuff.c:4572!
+  Oops: invalid opcode: 0000 [#1] SMP NOPTI
+  CPU: 0 UID: 0 PID: 62 Comm: mytest Not tainted 6.16.0-rc7 #203 PREEMPT(voluntary)
+  Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
+  RIP: 0010:skb_pull_rcsum+0x8e/0xa0
+  Code: 00 00 5b c3 cc cc cc cc 8b 93 88 00 00 00 f7 da e8 37 44 38 00 f7 d8 89 83 88 00 00 00 48 8b 83 c8 00 00 00 5b c3 cc cc cc cc <0f> 0b 0f 0b 66 66 2e 0f 1f 84 00 000
+  RSP: 0018:ffffc900001fba38 EFLAGS: 00000297
+  RAX: 0000000000000004 RBX: ffff8880040c1000 RCX: ffffc900001fb948
+  RDX: ffff888003e6d700 RSI: 0000000000000008 RDI: ffff88800411a062
+  RBP: ffff8880040c1000 R08: 0000000000000000 R09: 0000000000000001
+  R10: ffff888003606c00 R11: 0000000000000001 R12: 0000000000000000
+  R13: ffff888004060900 R14: ffff888004050000 R15: ffff888004060900
+  FS:  000000002406d3c0(0000) GS:ffff888084a19000(0000) knlGS:0000000000000000
+  CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+  CR2: 0000000020000040 CR3: 0000000004007000 CR4: 00000000000006f0
+  Call Trace:
+   <TASK>
+   udp_queue_rcv_one_skb+0x176/0x4b0 net/ipv4/udp.c:2445
+   udp_queue_rcv_skb+0x155/0x1f0 net/ipv4/udp.c:2475
+   udp_unicast_rcv_skb+0x71/0x90 net/ipv4/udp.c:2626
+   __udp4_lib_rcv+0x433/0xb00 net/ipv4/udp.c:2690
+   ip_protocol_deliver_rcu+0xa6/0x160 net/ipv4/ip_input.c:205
+   ip_local_deliver_finish+0x72/0x90 net/ipv4/ip_input.c:233
+   ip_sublist_rcv_finish+0x5f/0x70 net/ipv4/ip_input.c:579
+   ip_sublist_rcv+0x122/0x1b0 net/ipv4/ip_input.c:636
+   ip_list_rcv+0xf7/0x130 net/ipv4/ip_input.c:670
+   __netif_receive_skb_list_core+0x21d/0x240 net/core/dev.c:6067
+   netif_receive_skb_list_internal+0x186/0x2b0 net/core/dev.c:6210
+   napi_complete_done+0x78/0x180 net/core/dev.c:6580
+   tun_get_user+0xa63/0x1120 drivers/net/tun.c:1909
+   tun_chr_write_iter+0x65/0xb0 drivers/net/tun.c:1984
+   vfs_write+0x300/0x420 fs/read_write.c:593
+   ksys_write+0x60/0xd0 fs/read_write.c:686
+   do_syscall_64+0x50/0x1c0 arch/x86/entry/syscall_64.c:63
+   </TASK>
+
+To trigger gso segment in udp_queue_rcv_skb(), we should also set option
+UDP_ENCAP_ESPINUDP to enable udp_sk(sk)->encap_rcv. When the encap_rcv
+hook return 1 in udp_queue_rcv_one_skb(), udp_csum_pull_header() will try
+to pull udphdr, but the skb size has been segmented to gso size, which
+leads to this crash.
+
+Previous commit cf329aa42b66 ("udp: cope with UDP GRO packet misdirection")
+introduces segmentation in UDP receive path only for GRO, which was never
+intended to be used for UFO, so drop UFO packets in udp_rcv_segment().
+
+Link: https://lore.kernel.org/netdev/20250724083005.3918375-1-wangliang74@huawei.com/
+Link: https://lore.kernel.org/netdev/20250729123907.3318425-1-wangliang74@huawei.com/
+Fixes: cf329aa42b66 ("udp: cope with UDP GRO packet misdirection")
+Suggested-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250730101458.3470788-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/udp.h | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 61222545ab1c..0b2e3a5e01d8 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -461,6 +461,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ {
+       netdev_features_t features = NETIF_F_SG;
+       struct sk_buff *segs;
++      int drop_count;
++
++      /*
++       * Segmentation in UDP receive path is only for UDP GRO, drop udp
++       * fragmentation offload (UFO) packets.
++       */
++      if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
++              drop_count = 1;
++              goto drop;
++      }
+       /* Avoid csum recalculation by skb_segment unless userspace explicitly
+        * asks for the final checksum values
+@@ -484,16 +494,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+        */
+       segs = __skb_gso_segment(skb, features, false);
+       if (IS_ERR_OR_NULL(segs)) {
+-              int segs_nr = skb_shinfo(skb)->gso_segs;
+-
+-              atomic_add(segs_nr, &sk->sk_drops);
+-              SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+-              kfree_skb(skb);
+-              return NULL;
++              drop_count = skb_shinfo(skb)->gso_segs;
++              goto drop;
+       }
+       consume_skb(skb);
+       return segs;
++
++drop:
++      atomic_add(drop_count, &sk->sk_drops);
++      SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
++      kfree_skb(skb);
++      return NULL;
+ }
+ static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-ipa-add-ipa-v5.1-and-v5.5-to-ipa_version_string.patch b/queue-6.12/net-ipa-add-ipa-v5.1-and-v5.5-to-ipa_version_string.patch
new file mode 100644 (file)
index 0000000..9e998d3
--- /dev/null
@@ -0,0 +1,46 @@
+From e9cb1a2f2193e8a4c44580c6bc84189893404c72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 10:35:24 +0200
+Subject: net: ipa: add IPA v5.1 and v5.5 to ipa_version_string()
+
+From: Luca Weiss <luca.weiss@fairphone.com>
+
+[ Upstream commit f2aa00e4f65efcf25ff6bc8198e21f031e7b9b1b ]
+
+Handle the case for v5.1 and v5.5 instead of returning "0.0".
+
+Also reword the comment below since I don't see any evidence of such a
+check happening, and - since 5.5 has been missing - can happen.
+
+Fixes: 3aac8ec1c028 ("net: ipa: add some new IPA versions")
+Signed-off-by: Luca Weiss <luca.weiss@fairphone.com>
+Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Reviewed-by: Alex Elder <elder@riscstar.com>
+Link: https://patch.msgid.link/20250728-ipa-5-1-5-5-version_string-v1-1-d7a5623d7ece@fairphone.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_sysfs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
+index a59bd215494c..a53e9e6f6cdf 100644
+--- a/drivers/net/ipa/ipa_sysfs.c
++++ b/drivers/net/ipa/ipa_sysfs.c
+@@ -37,8 +37,12 @@ static const char *ipa_version_string(struct ipa *ipa)
+               return "4.11";
+       case IPA_VERSION_5_0:
+               return "5.0";
++      case IPA_VERSION_5_1:
++              return "5.1";
++      case IPA_VERSION_5_5:
++              return "5.5";
+       default:
+-              return "0.0";   /* Won't happen (checked at probe time) */
++              return "0.0";   /* Should not happen */
+       }
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-mdio-mdio-bcm-unimac-correct-rate-fallback-logic.patch b/queue-6.12/net-mdio-mdio-bcm-unimac-correct-rate-fallback-logic.patch
new file mode 100644 (file)
index 0000000..dd58aba
--- /dev/null
@@ -0,0 +1,54 @@
+From 2ae5312a50b6a181ee488ce8bce734453fbdf076 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 13:25:33 -0700
+Subject: net: mdio: mdio-bcm-unimac: Correct rate fallback logic
+
+From: Florian Fainelli <florian.fainelli@broadcom.com>
+
+[ Upstream commit a81649a4efd382497bf3d34a623360263adc6993 ]
+
+When the parent clock is a gated clock which has multiple parents, the
+clock provider (clk-scmi typically) might return a rate of 0 since there
+is not one of those particular parent clocks that should be chosen for
+returning a rate. Prior to ee975351cf0c ("net: mdio: mdio-bcm-unimac:
+Manage clock around I/O accesses"), we would not always be passing a
+clock reference depending upon how mdio-bcm-unimac was instantiated. In
+that case, we would take the fallback path where the rate is hard coded
+to 250MHz.
+
+Make sure that we still fallback to using a fixed rate for the divider
+calculation, otherwise we simply ignore the desired MDIO bus clock
+frequency which can prevent us from interfacing with Ethernet PHYs
+properly.
+
+Fixes: ee975351cf0c ("net: mdio: mdio-bcm-unimac: Manage clock around I/O accesses")
+Signed-off-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://patch.msgid.link/20250730202533.3463529-1-florian.fainelli@broadcom.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/mdio/mdio-bcm-unimac.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/mdio/mdio-bcm-unimac.c b/drivers/net/mdio/mdio-bcm-unimac.c
+index b7bc70586ee0..369540b43ada 100644
+--- a/drivers/net/mdio/mdio-bcm-unimac.c
++++ b/drivers/net/mdio/mdio-bcm-unimac.c
+@@ -209,10 +209,9 @@ static int unimac_mdio_clk_set(struct unimac_mdio_priv *priv)
+       if (ret)
+               return ret;
+-      if (!priv->clk)
++      rate = clk_get_rate(priv->clk);
++      if (!rate)
+               rate = 250000000;
+-      else
+-              rate = clk_get_rate(priv->clk);
+       div = (rate / (2 * priv->clk_freq)) - 1;
+       if (div & ~MDIO_CLK_DIV_MASK) {
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-mlx5-correctly-set-gso_segs-when-lro-is-used.patch b/queue-6.12/net-mlx5-correctly-set-gso_segs-when-lro-is-used.patch
new file mode 100644 (file)
index 0000000..ecdaaf5
--- /dev/null
@@ -0,0 +1,59 @@
+From 9ac58bf42178bdda8d76c60ea89ffda74ca9d905 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 11:34:00 -0700
+Subject: net/mlx5: Correctly set gso_segs when LRO is used
+
+From: Christoph Paasch <cpaasch@openai.com>
+
+[ Upstream commit 77bf1c55b2acc7fa3734b14f4561e3d75aea1a90 ]
+
+When gso_segs is left at 0, a number of assumptions will end up being
+incorrect throughout the stack.
+
+For example, in the GRO-path, we set NAPI_GRO_CB()->count to gso_segs.
+So, if a non-LRO'ed packet followed by an LRO'ed packet is being
+processed in GRO, the first one will have NAPI_GRO_CB()->count set to 1 and
+the next one to 0 (in dev_gro_receive()).
+Since commit 531d0d32de3e
+("net/mlx5: Correctly set gso_size when LRO is used")
+these packets will get merged (as their gso_size now matches).
+So, we end up in gro_complete() with NAPI_GRO_CB()->count == 1 and thus
+don't call inet_gro_complete(). Meaning, checksum-validation in
+tcp_checksum_complete() will fail with a "hw csum failure".
+
+Even before the above mentioned commit, incorrect gso_segs means that other
+things like TCP's accounting of incoming packets (tp->segs_in,
+data_segs_in, rcv_ooopack) will be incorrect. Which means that if one
+does bytes_received/data_segs_in, the result will be bigger than the
+MTU.
+
+Fix this by initializing gso_segs correctly when LRO is used.
+
+Fixes: e586b3b0baee ("net/mlx5: Ethernet Datapath files")
+Reported-by: Gal Pressman <gal@nvidia.com>
+Closes: https://lore.kernel.org/netdev/6583783f-f0fb-4fb1-a415-feec8155bc69@nvidia.com/
+Signed-off-by: Christoph Paasch <cpaasch@openai.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250729-mlx5_gso_segs-v1-1-b48c480c1c12@openai.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index 8ed47e7a7515..673043d9ed11 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1569,6 +1569,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+               unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+               skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
++              skb_shinfo(skb)->gso_segs = lro_num_seg;
+               /* Subtract one since we already counted this as one
+                * "regular" packet in mlx5e_complete_rx_cqe()
+                */
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-sched-mqprio-fix-stack-out-of-bounds-write-in-tc.patch b/queue-6.12/net-sched-mqprio-fix-stack-out-of-bounds-write-in-tc.patch
new file mode 100644 (file)
index 0000000..28e2864
--- /dev/null
@@ -0,0 +1,46 @@
+From 33631c59d055b86b4ae37462df8604fb6f8fd31d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Aug 2025 17:18:57 -0700
+Subject: net/sched: mqprio: fix stack out-of-bounds write in tc entry parsing
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maher Azzouzi <maherazz04@gmail.com>
+
+[ Upstream commit ffd2dc4c6c49ff4f1e5d34e454a6a55608104c17 ]
+
+TCA_MQPRIO_TC_ENTRY_INDEX is validated using
+NLA_POLICY_MAX(NLA_U32, TC_QOPT_MAX_QUEUE), which allows the value
+TC_QOPT_MAX_QUEUE (16). This leads to a 4-byte out-of-bounds stack
+write in the fp[] array, which only has room for 16 elements (0–15).
+
+Fix this by changing the policy to allow only up to TC_QOPT_MAX_QUEUE - 1.
+
+Fixes: f62af20bed2d ("net/sched: mqprio: allow per-TC user input of FP adminStatus")
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Maher Azzouzi <maherazz04@gmail.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250802001857.2702497-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_mqprio.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 51d4013b6121..f3e5ef9a9592 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -152,7 +152,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
+ static const struct
+ nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
+       [TCA_MQPRIO_TC_ENTRY_INDEX]     = NLA_POLICY_MAX(NLA_U32,
+-                                                       TC_QOPT_MAX_QUEUE),
++                                                       TC_QOPT_MAX_QUEUE - 1),
+       [TCA_MQPRIO_TC_ENTRY_FP]        = NLA_POLICY_RANGE(NLA_U32,
+                                                          TC_FP_EXPRESS,
+                                                          TC_FP_PREEMPTIBLE),
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-sched-taprio-enforce-minimum-value-for-picos_per.patch b/queue-6.12/net-sched-taprio-enforce-minimum-value-for-picos_per.patch
new file mode 100644 (file)
index 0000000..7ab685a
--- /dev/null
@@ -0,0 +1,103 @@
+From 9c4129d8e40dad393f2cd747e3886ef9f9d23b32 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 02:31:49 +0900
+Subject: net/sched: taprio: enforce minimum value for picos_per_byte
+
+From: Takamitsu Iwai <takamitz@amazon.co.jp>
+
+[ Upstream commit ae8508b25def57982493c48694ef135973bfabe0 ]
+
+Syzbot reported a WARNING in taprio_get_start_time().
+
+When link speed is 470,589 or greater, q->picos_per_byte becomes too
+small, causing length_to_duration(q, ETH_ZLEN) to return zero.
+
+This zero value leads to validation failures in fill_sched_entry() and
+parse_taprio_schedule(), allowing arbitrary values to be assigned to
+entry->interval and cycle_time. As a result, sched->cycle can become zero.
+
+Since SPEED_800000 is the largest defined speed in
+include/uapi/linux/ethtool.h, this issue can occur in realistic scenarios.
+
+To ensure length_to_duration() returns a non-zero value for minimum-sized
+Ethernet frames (ETH_ZLEN = 60), picos_per_byte must be at least 17
+(60 * 17 > PSEC_PER_NSEC which is 1000).
+
+This patch enforces a minimum value of 17 for picos_per_byte when the
+calculated value would be lower, and adds a warning message to inform
+users that scheduling accuracy may be affected at very high link speeds.
+
+Fixes: fb66df20a720 ("net/sched: taprio: extend minimum interval restriction to entire cycle too")
+Reported-by: syzbot+398e1ee4ca2cac05fddb@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=398e1ee4ca2cac05fddb
+Signed-off-by: Takamitsu Iwai <takamitz@amazon.co.jp>
+Link: https://patch.msgid.link/20250728173149.45585-1-takamitz@amazon.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_taprio.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index 3142715d7e41..1620f0fd78ce 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -43,6 +43,11 @@ static struct static_key_false taprio_have_working_mqprio;
+ #define TAPRIO_SUPPORTED_FLAGS \
+       (TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST | TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+ #define TAPRIO_FLAGS_INVALID U32_MAX
++/* Minimum value for picos_per_byte to ensure non-zero duration
++ * for minimum-sized Ethernet frames (ETH_ZLEN = 60).
++ * 60 * 17 > PSEC_PER_NSEC (1000)
++ */
++#define TAPRIO_PICOS_PER_BYTE_MIN 17
+ struct sched_entry {
+       /* Durations between this GCL entry and the GCL entry where the
+@@ -1284,7 +1289,8 @@ static void taprio_start_sched(struct Qdisc *sch,
+ }
+ static void taprio_set_picos_per_byte(struct net_device *dev,
+-                                    struct taprio_sched *q)
++                                    struct taprio_sched *q,
++                                    struct netlink_ext_ack *extack)
+ {
+       struct ethtool_link_ksettings ecmd;
+       int speed = SPEED_10;
+@@ -1300,6 +1306,15 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
+ skip:
+       picos_per_byte = (USEC_PER_SEC * 8) / speed;
++      if (picos_per_byte < TAPRIO_PICOS_PER_BYTE_MIN) {
++              if (!extack)
++                      pr_warn("Link speed %d is too high. Schedule may be inaccurate.\n",
++                              speed);
++              NL_SET_ERR_MSG_FMT_MOD(extack,
++                                     "Link speed %d is too high. Schedule may be inaccurate.",
++                                     speed);
++              picos_per_byte = TAPRIO_PICOS_PER_BYTE_MIN;
++      }
+       atomic64_set(&q->picos_per_byte, picos_per_byte);
+       netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
+@@ -1324,7 +1339,7 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
+               if (dev != qdisc_dev(q->root))
+                       continue;
+-              taprio_set_picos_per_byte(dev, q);
++              taprio_set_picos_per_byte(dev, q, NULL);
+               stab = rtnl_dereference(q->root->stab);
+@@ -1848,7 +1863,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+       q->flags = taprio_flags;
+       /* Needed for length_to_duration() during netlink attribute parsing */
+-      taprio_set_picos_per_byte(dev, q);
++      taprio_set_picos_per_byte(dev, q, extack);
+       err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
+       if (err < 0)
+-- 
+2.39.5
+
diff --git a/queue-6.12/netlink-specs-ethtool-fix-module-eeprom-input-output.patch b/queue-6.12/netlink-specs-ethtool-fix-module-eeprom-input-output.patch
new file mode 100644 (file)
index 0000000..46e40ce
--- /dev/null
@@ -0,0 +1,50 @@
+From d3908238ae1b5d00147186e68c31821a9be285db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 10:21:37 -0700
+Subject: netlink: specs: ethtool: fix module EEPROM input/output arguments
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 01051012887329ea78eaca19b1d2eac4c9f601b5 ]
+
+Module (SFP) eeprom GET has a lot of input params, they are all
+mistakenly listed as output in the spec. Looks like kernel doesn't
+output them at all. Correct what are the inputs and what the outputs.
+
+Reported-by: Duo Yi <duo@meta.com>
+Fixes: a353318ebf24 ("tools: ynl: populate most of the ethtool spec")
+Acked-by: Stanislav Fomichev <sdf@fomichev.me>
+Link: https://patch.msgid.link/20250730172137.1322351-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/netlink/specs/ethtool.yaml | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
+index f6c5d8214c7e..4936aa5855b1 100644
+--- a/Documentation/netlink/specs/ethtool.yaml
++++ b/Documentation/netlink/specs/ethtool.yaml
+@@ -1682,9 +1682,6 @@ operations:
+       do: &module-eeprom-get-op
+         request:
+-          attributes:
+-            - header
+-        reply:
+           attributes:
+             - header
+             - offset
+@@ -1692,6 +1689,9 @@ operations:
+             - page
+             - bank
+             - i2c-address
++        reply:
++          attributes:
++            - header
+             - data
+       dump: *module-eeprom-get-op
+     -
+-- 
+2.39.5
+
diff --git a/queue-6.12/netpoll-prevent-hanging-napi-when-netcons-gets-enabl.patch b/queue-6.12/netpoll-prevent-hanging-napi-when-netcons-gets-enabl.patch
new file mode 100644 (file)
index 0000000..427726c
--- /dev/null
@@ -0,0 +1,95 @@
+From ffb21b8339e6fe784efdb608c072fe28e3a0f76c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 18:08:46 -0700
+Subject: netpoll: prevent hanging NAPI when netcons gets enabled
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2da4def0f487f24bbb0cece3bb2bcdcb918a0b72 ]
+
+Paolo spotted hangs in NIPA running driver tests against virtio.
+The tests hang in virtnet_close() -> virtnet_napi_tx_disable().
+
+The problem is only reproducible if running multiple of our tests
+in sequence (I used TEST_PROGS="xdp.py ping.py netcons_basic.sh \
+netpoll_basic.py stats.py"). Initial suspicion was that this is
+a simple case of double-disable of NAPI, but instrumenting the
+code reveals:
+
+ Deadlocked on NAPI ffff888007cd82c0 (virtnet_poll_tx):
+   state: 0x37, disabled: false, owner: 0, listed: false, weight: 64
+
+The NAPI was not in fact disabled, owner is 0 (rather than -1),
+so the NAPI "thinks" it's scheduled for CPU 0 but it's not listed
+(!list_empty(&n->poll_list) => false). It seems odd that normal NAPI
+processing would wedge itself like this.
+
+Better suspicion is that netpoll gets enabled while NAPI is polling,
+and also grabs the NAPI instance. This confuses napi_complete_done():
+
+  [netpoll]                                   [normal NAPI]
+                                        napi_poll()
+                                          have = netpoll_poll_lock()
+                                            rcu_access_pointer(dev->npinfo)
+                                              return NULL # no netpoll
+                                          __napi_poll()
+                                           ->poll(->weight)
+  poll_napi()
+    cmpxchg(->poll_owner, -1, cpu)
+      poll_one_napi()
+        set_bit(NAPI_STATE_NPSVC, ->state)
+                                              napi_complete_done()
+                                                if (NAPIF_STATE_NPSVC)
+                                                  return false
+                                           # exit without clearing SCHED
+
+This feels very unlikely, but perhaps virtio has some interactions
+with the hypervisor in the NAPI ->poll that makes the race window
+larger?
+
+Best I could to to prove the theory was to add and trigger this
+warning in napi_poll (just before netpoll_poll_unlock()):
+
+      WARN_ONCE(!have && rcu_access_pointer(n->dev->npinfo) &&
+                napi_is_scheduled(n) && list_empty(&n->poll_list),
+                "NAPI race with netpoll %px", n);
+
+If this warning hits the next virtio_close() will hang.
+
+This patch survived 30 test iterations without a hang (without it
+the longest clean run was around 10). Credit for triggering this
+goes to Breno's recent netconsole tests.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Paolo Abeni <pabeni@redhat.com>
+Link: https://lore.kernel.org/c5a93ed1-9abe-4880-a3bb-8d1678018b1d@redhat.com
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Link: https://patch.msgid.link/20250726010846.1105875-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/netpoll.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index e95c2933756d..87182a4272bf 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -784,6 +784,13 @@ int netpoll_setup(struct netpoll *np)
+       if (err)
+               goto put;
+       rtnl_unlock();
++
++      /* Make sure all NAPI polls which started before dev->npinfo
++       * was visible have exited before we start calling NAPI poll.
++       * NAPI skips locking if dev->npinfo is NULL.
++       */
++      synchronize_rcu();
++
+       return 0;
+ put:
+-- 
+2.39.5
+
diff --git a/queue-6.12/nfs-fix-filehandle-bounds-checking-in-nfs_fh_to_dent.patch b/queue-6.12/nfs-fix-filehandle-bounds-checking-in-nfs_fh_to_dent.patch
new file mode 100644 (file)
index 0000000..bcbb539
--- /dev/null
@@ -0,0 +1,51 @@
+From 1ea7f279a7be22d6644ab2d9538ba4298fa60a03 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Jul 2025 09:24:58 -0400
+Subject: NFS: Fix filehandle bounds checking in nfs_fh_to_dentry()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit ef93a685e01a281b5e2a25ce4e3428cf9371a205 ]
+
+The function needs to check the minimal filehandle length before it can
+access the embedded filehandle.
+
+Reported-by: zhangjian <zhangjian496@huawei.com>
+Fixes: 20fa19027286 ("nfs: add export operations")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/export.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/fs/nfs/export.c b/fs/nfs/export.c
+index be686b8e0c54..aeb17adcb2b6 100644
+--- a/fs/nfs/export.c
++++ b/fs/nfs/export.c
+@@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ {
+       struct nfs_fattr *fattr = NULL;
+       struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
+-      size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
++      size_t fh_size = offsetof(struct nfs_fh, data);
+       const struct nfs_rpc_ops *rpc_ops;
+       struct dentry *dentry;
+       struct inode *inode;
+-      int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
++      int len = EMBED_FH_OFF;
+       u32 *p = fid->raw;
+       int ret;
++      /* Initial check of bounds */
++      if (fh_len < len + XDR_QUADLEN(fh_size) ||
++          fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
++              return NULL;
++      /* Calculate embedded filehandle size */
++      fh_size += server_fh->size;
++      len += XDR_QUADLEN(fh_size);
+       /* NULL translates to ESTALE */
+       if (fh_len < len || fh_type != len)
+               return NULL;
+-- 
+2.39.5
+
diff --git a/queue-6.12/nfs-fix-wakeup-of-__nfs_lookup_revalidate-in-unblock.patch b/queue-6.12/nfs-fix-wakeup-of-__nfs_lookup_revalidate-in-unblock.patch
new file mode 100644 (file)
index 0000000..25e4be4
--- /dev/null
@@ -0,0 +1,43 @@
+From 25f7bb448b7d1e9994ce89eace2efbd1a92ecb0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Jul 2025 16:15:27 -0700
+Subject: NFS: Fix wakeup of __nfs_lookup_revalidate() in unblock_revalidate()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 1db3a48e83bb64a70bf27263b7002585574a9c2d ]
+
+Use store_release_wake_up() to add the appropriate memory barrier before
+calling wake_up_var(&dentry->d_fsdata).
+
+Reported-by: Lukáš Hejtmánek<xhejtman@ics.muni.cz>
+Suggested-by: Santosh Pradhan <santosh.pradhan@gmail.com>
+Link: https://lore.kernel.org/all/18945D18-3EDB-4771-B019-0335CE671077@ics.muni.cz/
+Fixes: 99bc9f2eb3f7 ("NFS: add barriers when testing for NFS_FSDATA_BLOCKED")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/dir.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index f9f4a92f63e9..bbc625e742aa 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1837,9 +1837,7 @@ static void block_revalidate(struct dentry *dentry)
+ static void unblock_revalidate(struct dentry *dentry)
+ {
+-      /* store_release ensures wait_var_event() sees the update */
+-      smp_store_release(&dentry->d_fsdata, NULL);
+-      wake_up_var(&dentry->d_fsdata);
++      store_release_wake_up(&dentry->d_fsdata, NULL);
+ }
+ /*
+-- 
+2.39.5
+
diff --git a/queue-6.12/nfs-fixup-allocation-flags-for-nfsiod-s-__gfp_noretr.patch b/queue-6.12/nfs-fixup-allocation-flags-for-nfsiod-s-__gfp_noretr.patch
new file mode 100644 (file)
index 0000000..da7cbb2
--- /dev/null
@@ -0,0 +1,55 @@
+From 014b39941085b6a8021c35b01320d3b24a14be34 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 21:47:43 -0400
+Subject: NFS: Fixup allocation flags for nfsiod's __GFP_NORETRY
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+[ Upstream commit 99765233ab42bf7a4950377ad7894dce8a5c0e60 ]
+
+If the NFS client is doing writeback from a workqueue context, avoid using
+__GFP_NORETRY for allocations if the task has set PF_MEMALLOC_NOIO or
+PF_MEMALLOC_NOFS.  The combination of these flags makes memory allocation
+failures much more likely.
+
+We've seen those allocation failures show up when the loopback driver is
+doing writeback from a workqueue to a file on NFS, where memory allocation
+failure results in errors or corruption within the loopback device's
+filesystem.
+
+Suggested-by: Trond Myklebust <trondmy@kernel.org>
+Fixes: 0bae835b63c5 ("NFS: Avoid writeback threads getting stuck in mempool_alloc()")
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Reviewed-by: Laurence Oberman <loberman@redhat.com>
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Link: https://lore.kernel.org/r/f83ac1155a4bc670f2663959a7a068571e06afd9.1752111622.git.bcodding@redhat.com
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/internal.h | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index 1be4be3d4a2b..9840b779f0df 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -668,9 +668,12 @@ nfs_write_match_verf(const struct nfs_writeverf *verf,
+ static inline gfp_t nfs_io_gfp_mask(void)
+ {
+-      if (current->flags & PF_WQ_WORKER)
+-              return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+-      return GFP_KERNEL;
++      gfp_t ret = current_gfp_context(GFP_KERNEL);
++
++      /* For workers __GFP_NORETRY only with __GFP_IO or __GFP_FS */
++      if ((current->flags & PF_WQ_WORKER) && ret == GFP_KERNEL)
++              ret |= __GFP_NORETRY | __GFP_NOWARN;
++      return ret;
+ }
+ /*
+-- 
+2.39.5
+
diff --git a/queue-6.12/nfsv4.2-another-fix-for-listxattr.patch b/queue-6.12/nfsv4.2-another-fix-for-listxattr.patch
new file mode 100644 (file)
index 0000000..20a58ff
--- /dev/null
@@ -0,0 +1,53 @@
+From 16435926470785f3d8725e8bdbcb2f596e9bbc80 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Jul 2025 16:56:41 -0400
+Subject: NFSv4.2: another fix for listxattr
+
+From: Olga Kornievskaia <okorniev@redhat.com>
+
+[ Upstream commit 9acb237deff7667b0f6b10fe6b1b70c4429ea049 ]
+
+Currently, when the server supports NFS4.1 security labels then
+security.selinux label in included twice. Instead, only add it
+when the server doesn't possess security label support.
+
+Fixes: 243fea134633 ("NFSv4.2: fix listxattr to return selinux security label")
+Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
+Link: https://lore.kernel.org/r/20250722205641.79394-1-okorniev@redhat.com
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 77b239b10d41..e27cd2c7cfd1 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10819,7 +10819,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+-      ssize_t error, error2, error3, error4;
++      ssize_t error, error2, error3, error4 = 0;
+       size_t left = size;
+       error = generic_listxattr(dentry, list, left);
+@@ -10847,9 +10847,11 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+               left -= error3;
+       }
+-      error4 = security_inode_listsecurity(d_inode(dentry), list, left);
+-      if (error4 < 0)
+-              return error4;
++      if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
++              error4 = security_inode_listsecurity(d_inode(dentry), list, left);
++              if (error4 < 0)
++                      return error4;
++      }
+       error += error2 + error3 + error4;
+       if (size && error > size)
+-- 
+2.39.5
+
diff --git a/queue-6.12/nvmet-exit-debugfs-after-discovery-subsystem-exits.patch b/queue-6.12/nvmet-exit-debugfs-after-discovery-subsystem-exits.patch
new file mode 100644 (file)
index 0000000..c18a19f
--- /dev/null
@@ -0,0 +1,45 @@
+From ad57e62f20247c8930542c8731fd484eb0632994 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 22:35:07 -0700
+Subject: nvmet: exit debugfs after discovery subsystem exits
+
+From: Mohamed Khalfella <mkhalfella@purestorage.com>
+
+[ Upstream commit 80f21806b8e34ae1e24c0fc6a0f0dfd9b055e130 ]
+
+Commit 528589947c180 ("nvmet: initialize discovery subsys after debugfs
+is initialized") changed nvmet_init() to initialize nvme discovery after
+"nvmet" debugfs directory is initialized. The change broke nvmet_exit()
+because discovery subsystem now depends on debugfs. Debugfs should be
+destroyed after discovery subsystem. Fix nvmet_exit() to do that.
+
+Reported-by: Yi Zhang <yi.zhang@redhat.com>
+Closes: https://lore.kernel.org/all/CAHj4cs96AfFQpyDKF_MdfJsnOEo=2V7dQgqjFv+k3t7H-=yGhA@mail.gmail.com/
+Fixes: 528589947c180 ("nvmet: initialize discovery subsys after debugfs is initialized")
+Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Reviewed-by: Daniel Wagner <dwagner@suse.de>
+Link: https://lore.kernel.org/r/20250807053507.2794335-1-mkhalfella@purestorage.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index cfde5b018048..710e74d3ec3e 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1746,8 +1746,8 @@ static int __init nvmet_init(void)
+ static void __exit nvmet_exit(void)
+ {
+       nvmet_exit_configfs();
+-      nvmet_exit_debugfs();
+       nvmet_exit_discovery();
++      nvmet_exit_debugfs();
+       ida_destroy(&cntlid_ida);
+       destroy_workqueue(nvmet_wq);
+       destroy_workqueue(buffered_io_wq);
+-- 
+2.39.5
+
diff --git a/queue-6.12/nvmet-initialize-discovery-subsys-after-debugfs-is-i.patch b/queue-6.12/nvmet-initialize-discovery-subsys-after-debugfs-is-i.patch
new file mode 100644 (file)
index 0000000..f1715f6
--- /dev/null
@@ -0,0 +1,72 @@
+From d4960c51fedc18e2412b206fe9ee248c3537a719 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 13:50:05 -0700
+Subject: nvmet: initialize discovery subsys after debugfs is initialized
+
+From: Mohamed Khalfella <mkhalfella@purestorage.com>
+
+[ Upstream commit 528589947c1802b9357c2a9b96d88cc4a11cd88b ]
+
+During nvme target initialization discovery subsystem is initialized
+before "nvmet" debugfs directory is created. This results in discovery
+subsystem debugfs directory to be created in debugfs root directory.
+
+nvmet_init() ->
+  nvmet_init_discovery() ->
+    nvmet_subsys_alloc() ->
+      nvmet_debugfs_subsys_setup()
+
+In other words, the codepath above is exeucted before nvmet_debugfs is
+created. We get /sys/kernel/debug/nqn.2014-08.org.nvmexpress.discovery
+instead of /sys/kernel/debug/nvmet/nqn.2014-08.org.nvmexpress.discovery.
+Move nvmet_init_discovery() call after nvmet_init_debugfs() to fix it.
+
+Fixes: 649fd41420a8 ("nvmet: add debugfs support")
+Signed-off-by: Mohamed Khalfella <mkhalfella@purestorage.com>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Reviewed-by: Hannes Reinecke <hare@kernel.org>
+Reviewed-by: Daniel Wagner <dwagner@suse.de>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/core.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 4606c8813666..cfde5b018048 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1714,24 +1714,24 @@ static int __init nvmet_init(void)
+       if (!nvmet_wq)
+               goto out_free_buffered_work_queue;
+-      error = nvmet_init_discovery();
++      error = nvmet_init_debugfs();
+       if (error)
+               goto out_free_nvmet_work_queue;
+-      error = nvmet_init_debugfs();
++      error = nvmet_init_discovery();
+       if (error)
+-              goto out_exit_discovery;
++              goto out_exit_debugfs;
+       error = nvmet_init_configfs();
+       if (error)
+-              goto out_exit_debugfs;
++              goto out_exit_discovery;
+       return 0;
+-out_exit_debugfs:
+-      nvmet_exit_debugfs();
+ out_exit_discovery:
+       nvmet_exit_discovery();
++out_exit_debugfs:
++      nvmet_exit_debugfs();
+ out_free_nvmet_work_queue:
+       destroy_workqueue(nvmet_wq);
+ out_free_buffered_work_queue:
+-- 
+2.39.5
+
diff --git a/queue-6.12/phy-mscc-fix-parsing-of-unicast-frames.patch b/queue-6.12/phy-mscc-fix-parsing-of-unicast-frames.patch
new file mode 100644 (file)
index 0000000..b1cfce0
--- /dev/null
@@ -0,0 +1,53 @@
+From 2dd9cb2163e6399f25d89cd98f0ffac8e40e5dac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 26 Jul 2025 16:03:07 +0200
+Subject: phy: mscc: Fix parsing of unicast frames
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit 6fb5ff63b35b7e849cc8510957f25753f87f63d2 ]
+
+According to the 1588 standard, it is possible to use both unicast and
+multicast frames to send the PTP information. It was noticed that if the
+frames were unicast they were not processed by the analyzer meaning that
+they were not timestamped. Therefore fix this to match also these
+unicast frames.
+
+Fixes: ab2bf9339357 ("net: phy: mscc: 1588 block initialization")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250726140307.3039694-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mscc/mscc_ptp.c | 1 +
+ drivers/net/phy/mscc/mscc_ptp.h | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index ce49f3ac6939..bce6cc5b04ee 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -897,6 +897,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
+                                    get_unaligned_be32(ptp_multicast));
+       } else {
+               val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
++              val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
+               vsc85xx_ts_write_csr(phydev, blk,
+                                    MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
+               vsc85xx_ts_write_csr(phydev, blk,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
+index da3465360e90..ae9ad925bfa8 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.h
++++ b/drivers/net/phy/mscc/mscc_ptp.h
+@@ -98,6 +98,7 @@
+ #define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK   GENMASK(22, 20)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST       0x400000
++#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR   0x100000
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK       GENMASK(17, 16)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST    0x020000
+-- 
+2.39.5
+
diff --git a/queue-6.12/pnfs-flexfiles-don-t-attempt-pnfs-on-fatal-ds-errors.patch b/queue-6.12/pnfs-flexfiles-don-t-attempt-pnfs-on-fatal-ds-errors.patch
new file mode 100644 (file)
index 0000000..1bc54f0
--- /dev/null
@@ -0,0 +1,176 @@
+From 2eae1bff668074df3cfcc4f1fb2d603a96ac404d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jun 2025 09:17:51 +0200
+Subject: pNFS/flexfiles: don't attempt pnfs on fatal DS errors
+
+From: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+
+[ Upstream commit f06bedfa62d57f7b67d44aacd6badad2e13a803f ]
+
+When an applications get killed (SIGTERM/SIGINT) while pNFS client performs a connection
+to DS, client ends in an infinite loop of connect-disconnect. This
+source of the issue, it that flexfilelayoutdev#nfs4_ff_layout_prepare_ds gets an error
+on nfs4_pnfs_ds_connect with status ERESTARTSYS, which is set by rpc_signal_task, but
+the error is treated as transient, thus retried.
+
+The issue is reproducible with Ctrl+C the following script(there should be ~1000 files in
+a directory, client should must not have any connections to DSes):
+
+```
+echo 3 > /proc/sys/vm/drop_caches
+
+for i in *
+do
+   head -1 $i
+done
+```
+
+The change aims to propagate the nfs4_ff_layout_prepare_ds error state
+to the caller that can decide whatever this is a retryable error or not.
+
+Signed-off-by: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+Link: https://lore.kernel.org/r/20250627071751.189663-1-tigran.mkrtchyan@desy.de
+Fixes: 260f32adb88d ("pNFS/flexfiles: Check the result of nfs4_pnfs_ds_connect")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/flexfilelayout/flexfilelayout.c    | 26 ++++++++++++++---------
+ fs/nfs/flexfilelayout/flexfilelayoutdev.c |  6 +++---
+ 2 files changed, 19 insertions(+), 13 deletions(-)
+
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index bf96f7a8900c..b685e763ef11 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -761,14 +761,14 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+ {
+       struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+       struct nfs4_ff_layout_mirror *mirror;
+-      struct nfs4_pnfs_ds *ds;
++      struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
+       u32 idx;
+       /* mirrors are initially sorted by efficiency */
+       for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
+               mirror = FF_LAYOUT_COMP(lseg, idx);
+               ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
+-              if (!ds)
++              if (IS_ERR(ds))
+                       continue;
+               if (check_device &&
+@@ -776,10 +776,10 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+                       continue;
+               *best_idx = idx;
+-              return ds;
++              break;
+       }
+-      return NULL;
++      return ds;
+ }
+ static struct nfs4_pnfs_ds *
+@@ -941,7 +941,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+       for (i = 0; i < pgio->pg_mirror_count; i++) {
+               mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
+               ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
+-              if (!ds) {
++              if (IS_ERR(ds)) {
+                       if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+                               goto out_mds;
+                       pnfs_generic_pg_cleanup(pgio);
+@@ -1848,6 +1848,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+       u32 idx = hdr->pgio_mirror_idx;
+       int vers;
+       struct nfs_fh *fh;
++      bool ds_fatal_error = false;
+       dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
+               __func__, hdr->inode->i_ino,
+@@ -1855,8 +1856,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+       mirror = FF_LAYOUT_COMP(lseg, idx);
+       ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
+-      if (!ds)
++      if (IS_ERR(ds)) {
++              ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
+               goto out_failed;
++      }
+       ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+                                                  hdr->inode);
+@@ -1904,7 +1907,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+       return PNFS_ATTEMPTED;
+ out_failed:
+-      if (ff_layout_avoid_mds_available_ds(lseg))
++      if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
+               return PNFS_TRY_AGAIN;
+       trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
+                       hdr->args.offset, hdr->args.count,
+@@ -1926,11 +1929,14 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+       int vers;
+       struct nfs_fh *fh;
+       u32 idx = hdr->pgio_mirror_idx;
++      bool ds_fatal_error = false;
+       mirror = FF_LAYOUT_COMP(lseg, idx);
+       ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
+-      if (!ds)
++      if (IS_ERR(ds)) {
++              ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
+               goto out_failed;
++      }
+       ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+                                                  hdr->inode);
+@@ -1981,7 +1987,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+       return PNFS_ATTEMPTED;
+ out_failed:
+-      if (ff_layout_avoid_mds_available_ds(lseg))
++      if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
+               return PNFS_TRY_AGAIN;
+       trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
+                       hdr->args.offset, hdr->args.count,
+@@ -2024,7 +2030,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
+       idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+       mirror = FF_LAYOUT_COMP(lseg, idx);
+       ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
+-      if (!ds)
++      if (IS_ERR(ds))
+               goto out_err;
+       ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index 4a304cf17c4b..ef535baeefb6 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -370,11 +370,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+                         struct nfs4_ff_layout_mirror *mirror,
+                         bool fail_return)
+ {
+-      struct nfs4_pnfs_ds *ds = NULL;
++      struct nfs4_pnfs_ds *ds;
+       struct inode *ino = lseg->pls_layout->plh_inode;
+       struct nfs_server *s = NFS_SERVER(ino);
+       unsigned int max_payload;
+-      int status;
++      int status = -EAGAIN;
+       if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
+               goto noconnect;
+@@ -418,7 +418,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+       ff_layout_send_layouterror(lseg);
+       if (fail_return || !ff_layout_has_available_ds(lseg))
+               pnfs_error_mark_layout_for_return(ino, lseg);
+-      ds = NULL;
++      ds = ERR_PTR(status);
+ out:
+       return ds;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/pptp-ensure-minimal-skb-length-in-pptp_xmit.patch b/queue-6.12/pptp-ensure-minimal-skb-length-in-pptp_xmit.patch
new file mode 100644 (file)
index 0000000..ee5461b
--- /dev/null
@@ -0,0 +1,92 @@
+From 55b228332bd87d9d6c665622bc131887028da143 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 08:02:07 +0000
+Subject: pptp: ensure minimal skb length in pptp_xmit()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit de9c4861fb42f0cd72da844c3c34f692d5895b7b ]
+
+Commit aabc6596ffb3 ("net: ppp: Add bound checking for skb data
+on ppp_sync_txmung") fixed ppp_sync_txmunge()
+
+We need a similar fix in pptp_xmit(), otherwise we might
+read uninit data as reported by syzbot.
+
+BUG: KMSAN: uninit-value in pptp_xmit+0xc34/0x2720 drivers/net/ppp/pptp.c:193
+  pptp_xmit+0xc34/0x2720 drivers/net/ppp/pptp.c:193
+  ppp_channel_bridge_input drivers/net/ppp/ppp_generic.c:2290 [inline]
+  ppp_input+0x1d6/0xe60 drivers/net/ppp/ppp_generic.c:2314
+  pppoe_rcv_core+0x1e8/0x760 drivers/net/ppp/pppoe.c:379
+  sk_backlog_rcv+0x142/0x420 include/net/sock.h:1148
+  __release_sock+0x1d3/0x330 net/core/sock.c:3213
+  release_sock+0x6b/0x270 net/core/sock.c:3767
+  pppoe_sendmsg+0x15d/0xcb0 drivers/net/ppp/pppoe.c:904
+  sock_sendmsg_nosec net/socket.c:712 [inline]
+  __sock_sendmsg+0x330/0x3d0 net/socket.c:727
+  ____sys_sendmsg+0x893/0xd80 net/socket.c:2566
+  ___sys_sendmsg+0x271/0x3b0 net/socket.c:2620
+  __sys_sendmmsg+0x2d9/0x7c0 net/socket.c:2709
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+afad90ffc8645324afe5@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/68887d86.a00a0220.b12ec.00cd.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Link: https://patch.msgid.link/20250729080207.1863408-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/pptp.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 689687bd2574..06d50e7de151 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -159,9 +159,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+       int len;
+       unsigned char *data;
+       __u32 seq_recv;
+-
+-
+-      struct rtable *rt;
++      struct rtable *rt = NULL;
+       struct net_device *tdev;
+       struct iphdr  *iph;
+       int    max_headroom;
+@@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+       if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
+               struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+-              if (!new_skb) {
+-                      ip_rt_put(rt);
++
++              if (!new_skb)
+                       goto tx_error;
+-              }
++
+               if (skb->sk)
+                       skb_set_owner_w(new_skb, skb->sk);
+               consume_skb(skb);
+               skb = new_skb;
+       }
++      /* Ensure we can safely access protocol field and LCP code */
++      if (!pskb_may_pull(skb, 3))
++              goto tx_error;
++
+       data = skb->data;
+       islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+@@ -262,6 +264,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+       return 1;
+ tx_error:
++      ip_rt_put(rt);
+       kfree_skb(skb);
+       return 1;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/pptp-fix-pptp_xmit-error-path.patch b/queue-6.12/pptp-fix-pptp_xmit-error-path.patch
new file mode 100644 (file)
index 0000000..2a97bb4
--- /dev/null
@@ -0,0 +1,84 @@
+From 83c3fd88c7794b3f52a7a099ffb645f97936634b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 14:21:46 +0000
+Subject: pptp: fix pptp_xmit() error path
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ae633388cae349886f1a3cfb27aa092854b24c1b ]
+
+I accidentally added a bug in pptp_xmit() that syzbot caught for us.
+
+Only call ip_rt_put() if a route has been allocated.
+
+BUG: unable to handle page fault for address: ffffffffffffffdb
+PGD df3b067 P4D df3b067 PUD df3d067 PMD 0
+Oops: Oops: 0002 [#1] SMP KASAN PTI
+CPU: 1 UID: 0 PID: 6346 Comm: syz.0.336 Not tainted 6.16.0-next-20250804-syzkaller #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2025
+RIP: 0010:arch_atomic_add_return arch/x86/include/asm/atomic.h:85 [inline]
+RIP: 0010:raw_atomic_sub_return_release include/linux/atomic/atomic-arch-fallback.h:846 [inline]
+RIP: 0010:atomic_sub_return_release include/linux/atomic/atomic-instrumented.h:327 [inline]
+RIP: 0010:__rcuref_put include/linux/rcuref.h:109 [inline]
+RIP: 0010:rcuref_put+0x172/0x210 include/linux/rcuref.h:173
+Call Trace:
+ <TASK>
+ dst_release+0x24/0x1b0 net/core/dst.c:167
+ ip_rt_put include/net/route.h:285 [inline]
+ pptp_xmit+0x14b/0x1a90 drivers/net/ppp/pptp.c:267
+ __ppp_channel_push+0xf2/0x1c0 drivers/net/ppp/ppp_generic.c:2166
+ ppp_channel_push+0x123/0x660 drivers/net/ppp/ppp_generic.c:2198
+ ppp_write+0x2b0/0x400 drivers/net/ppp/ppp_generic.c:544
+ vfs_write+0x27b/0xb30 fs/read_write.c:684
+ ksys_write+0x145/0x250 fs/read_write.c:738
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Fixes: de9c4861fb42 ("pptp: ensure minimal skb length in pptp_xmit()")
+Reported-by: syzbot+27d7cfbc93457e472e00@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/689095a5.050a0220.1fc43d.0009.GAE@google.com/
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250807142146.2877060-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/pptp.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 06d50e7de151..cec3bb22471b 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -159,17 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+       int len;
+       unsigned char *data;
+       __u32 seq_recv;
+-      struct rtable *rt = NULL;
++      struct rtable *rt;
+       struct net_device *tdev;
+       struct iphdr  *iph;
+       int    max_headroom;
+       if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+-              goto tx_error;
++              goto tx_drop;
+       rt = pptp_route_output(po, &fl4);
+       if (IS_ERR(rt))
+-              goto tx_error;
++              goto tx_drop;
+       tdev = rt->dst.dev;
+@@ -265,6 +265,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ tx_error:
+       ip_rt_put(rt);
++tx_drop:
+       kfree_skb(skb);
+       return 1;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/s390-ap-unmask-slcf-bit-in-card-and-queue-ap-functio.patch b/queue-6.12/s390-ap-unmask-slcf-bit-in-card-and-queue-ap-functio.patch
new file mode 100644 (file)
index 0000000..2ef94a4
--- /dev/null
@@ -0,0 +1,55 @@
+From 568fee17e7f2bdaf186520b4a5688789013ed31a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Jul 2025 15:39:12 +0200
+Subject: s390/ap: Unmask SLCF bit in card and queue ap functions sysfs
+
+From: Harald Freudenberger <freude@linux.ibm.com>
+
+[ Upstream commit 123b7c7c2ba725daf3bfa5ce421d65b92cb5c075 ]
+
+The SLCF bit ("stateless command filtering") introduced with
+CEX8 cards was because of the function mask's default value
+suppressed when user space read the ap function for an AP
+card or queue. Unmask this bit so that user space applications
+like lszcrypt can evaluate and list this feature.
+
+Fixes: d4c53ae8e494 ("s390/ap: store TAPQ hwinfo in struct ap_card")
+Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
+Reviewed-by: Holger Dengler <dengler@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/include/asm/ap.h   | 2 +-
+ drivers/s390/crypto/ap_bus.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/s390/include/asm/ap.h b/arch/s390/include/asm/ap.h
+index 395b02d6a133..352108727d7e 100644
+--- a/arch/s390/include/asm/ap.h
++++ b/arch/s390/include/asm/ap.h
+@@ -103,7 +103,7 @@ struct ap_tapq_hwinfo {
+                       unsigned int accel :  1; /* A */
+                       unsigned int ep11  :  1; /* X */
+                       unsigned int apxa  :  1; /* APXA */
+-                      unsigned int       :  1;
++                      unsigned int slcf  :  1; /* Cmd filtering avail. */
+                       unsigned int class :  8;
+                       unsigned int bs    :  2; /* SE bind/assoc */
+                       unsigned int       : 14;
+diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
+index f4622ee4d894..6111913c858c 100644
+--- a/drivers/s390/crypto/ap_bus.h
++++ b/drivers/s390/crypto/ap_bus.h
+@@ -180,7 +180,7 @@ struct ap_card {
+       atomic64_t total_request_count; /* # requests ever for this AP device.*/
+ };
+-#define TAPQ_CARD_HWINFO_MASK 0xFEFF0000FFFF0F0FUL
++#define TAPQ_CARD_HWINFO_MASK 0xFFFF0000FFFF0F0FUL
+ #define ASSOC_IDX_INVALID 0x10000
+ #define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
+-- 
+2.39.5
+
diff --git a/queue-6.12/s390-mm-allocate-page-table-with-page_size-granulari.patch b/queue-6.12/s390-mm-allocate-page-table-with-page_size-granulari.patch
new file mode 100644 (file)
index 0000000..a924a19
--- /dev/null
@@ -0,0 +1,47 @@
+From b66775dd3dbf7d5752005a9ba988f5ea69e6f702 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 11:57:03 +0200
+Subject: s390/mm: Allocate page table with PAGE_SIZE granularity
+
+From: Sumanth Korikkar <sumanthk@linux.ibm.com>
+
+[ Upstream commit daa8af80d283ee9a7d42dd6f164a65036665b9d4 ]
+
+Make vmem_pte_alloc() consistent by always allocating page table of
+PAGE_SIZE granularity, regardless of whether page_table_alloc() (with
+slab) or memblock_alloc() is used. This ensures page table can be fully
+freed when the corresponding page table entries are removed.
+
+Fixes: d08d4e7cd6bf ("s390/mm: use full 4KB page for 2KB PTE")
+Reviewed-by: Heiko Carstens <hca@linux.ibm.com>
+Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/mm/vmem.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
+index 665b8228afeb..dd971826652a 100644
+--- a/arch/s390/mm/vmem.c
++++ b/arch/s390/mm/vmem.c
+@@ -63,13 +63,12 @@ void *vmem_crst_alloc(unsigned long val)
+ pte_t __ref *vmem_pte_alloc(void)
+ {
+-      unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
+       pte_t *pte;
+       if (slab_is_available())
+-              pte = (pte_t *) page_table_alloc(&init_mm);
++              pte = (pte_t *)page_table_alloc(&init_mm);
+       else
+-              pte = (pte_t *) memblock_alloc(size, size);
++              pte = (pte_t *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+       if (!pte)
+               return NULL;
+       memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
+-- 
+2.39.5
+
diff --git a/queue-6.12/sched-add-test_and_clear_wake_up_bit-and-atomic_dec_.patch b/queue-6.12/sched-add-test_and_clear_wake_up_bit-and-atomic_dec_.patch
new file mode 100644 (file)
index 0000000..4175f68
--- /dev/null
@@ -0,0 +1,104 @@
+From e4afe12e0274d20b932be173baeb8ddb497cad87 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Sep 2024 15:31:41 +1000
+Subject: sched: Add test_and_clear_wake_up_bit() and atomic_dec_and_wake_up()
+
+From: NeilBrown <neilb@suse.de>
+
+[ Upstream commit 52d633def56c10fe3e82a2c5d88c3ecb3f4e4852 ]
+
+There are common patterns in the kernel of using test_and_clear_bit()
+before wake_up_bit(), and atomic_dec_and_test() before wake_up_var().
+
+These combinations don't need extra barriers but sometimes include them
+unnecessarily.
+
+To help avoid the unnecessary barriers and to help discourage the
+general use of wake_up_bit/var (which is a fragile interface) introduce
+two combined functions which implement these patterns.
+
+Also add store_release_wake_up() which supports the task of simply
+setting a non-atomic variable and sending a wakeup.  This pattern
+requires barriers which are often omitted.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20240925053405.3960701-5-neilb@suse.de
+Stable-dep-of: 1db3a48e83bb ("NFS: Fix wakeup of __nfs_lookup_revalidate() in unblock_revalidate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/wait_bit.h | 60 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 60 insertions(+)
+
+diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
+index 7725b7579b78..2209c227e859 100644
+--- a/include/linux/wait_bit.h
++++ b/include/linux/wait_bit.h
+@@ -335,4 +335,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word)
+       wake_up_bit(word, bit);
+ }
++/**
++ * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
++ * @bit: the bit of the word being waited on
++ * @word: the address of memory containing that bit
++ *
++ * If the bit is set and can be atomically cleared, any tasks waiting in
++ * wait_on_bit() or similar will be woken.  This call has the same
++ * complete ordering semantics as test_and_clear_bit().  Any changes to
++ * memory made before this call are guaranteed to be visible after the
++ * corresponding wait_on_bit() completes.
++ *
++ * Returns %true if the bit was successfully set and the wake up was sent.
++ */
++static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
++{
++      if (!test_and_clear_bit(bit, word))
++              return false;
++      /* no extra barrier required */
++      wake_up_bit(word, bit);
++      return true;
++}
++
++/**
++ * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
++ * @var: the variable to dec and test
++ *
++ * Decrements the atomic variable and if it reaches zero, send a wake_up to any
++ * processes waiting on the variable.
++ *
++ * This function has the same complete ordering semantics as atomic_dec_and_test.
++ *
++ * Returns %true is the variable reaches zero and the wake up was sent.
++ */
++
++static inline bool atomic_dec_and_wake_up(atomic_t *var)
++{
++      if (!atomic_dec_and_test(var))
++              return false;
++      /* No extra barrier required */
++      wake_up_var(var);
++      return true;
++}
++
++/**
++ * store_release_wake_up - update a variable and send a wake_up
++ * @var: the address of the variable to be updated and woken
++ * @val: the value to store in the variable.
++ *
++ * Store the given value in the variable send a wake up to any tasks
++ * waiting on the variable.  All necessary barriers are included to ensure
++ * the task calling wait_var_event() sees the new value and all values
++ * written to memory before this call.
++ */
++#define store_release_wake_up(var, val)                                       \
++do {                                                                  \
++      smp_store_release(var, val);                                    \
++      smp_mb();                                                       \
++      wake_up_var(var);                                               \
++} while (0)
++
+ #endif /* _LINUX_WAIT_BIT_H */
+-- 
+2.39.5
+
index 62f31dec42d0640820b2efb335eac7bbc840dbd7..db4c2ea2e26ec99780abe2b58ddb37ff2b802142 100644 (file)
@@ -278,3 +278,46 @@ pci-pnv_php-work-around-switches-with-broken-presenc.patch
 powerpc-eeh-export-eeh_unfreeze_pe.patch
 powerpc-eeh-make-eeh-driver-device-hotplug-safe.patch
 pci-pnv_php-fix-surprise-plug-detection-and-recovery.patch
+pnfs-flexfiles-don-t-attempt-pnfs-on-fatal-ds-errors.patch
+sched-add-test_and_clear_wake_up_bit-and-atomic_dec_.patch
+nfs-fix-wakeup-of-__nfs_lookup_revalidate-in-unblock.patch
+nfs-fix-filehandle-bounds-checking-in-nfs_fh_to_dent.patch
+nfsv4.2-another-fix-for-listxattr.patch
+nfs-fixup-allocation-flags-for-nfsiod-s-__gfp_noretr.patch
+md-md-cluster-handle-remove-message-earlier.patch
+netpoll-prevent-hanging-napi-when-netcons-gets-enabl.patch
+phy-mscc-fix-parsing-of-unicast-frames.patch
+net-ipa-add-ipa-v5.1-and-v5.5-to-ipa_version_string.patch
+pptp-ensure-minimal-skb-length-in-pptp_xmit.patch
+nvmet-initialize-discovery-subsys-after-debugfs-is-i.patch
+s390-ap-unmask-slcf-bit-in-card-and-queue-ap-functio.patch
+netlink-specs-ethtool-fix-module-eeprom-input-output.patch
+block-fix-default-io-priority-if-there-is-no-io-cont.patch
+block-ensure-discard_granularity-is-zero-when-discar.patch
+asoc-tas2781-fix-the-wrong-step-for-tlv-on-tas2781.patch
+spi-cs42l43-property-entry-should-be-a-null-terminat.patch
+net-mlx5-correctly-set-gso_segs-when-lro-is-used.patch
+ipv6-reject-malicious-packets-in-ipv6_gso_segment.patch
+net-mdio-mdio-bcm-unimac-correct-rate-fallback-logic.patch
+net-drop-ufo-packets-in-udp_rcv_segment.patch
+net-sched-taprio-enforce-minimum-value-for-picos_per.patch
+sunrpc-fix-client-side-handling-of-tls-alerts.patch
+x86-irq-plug-vector-setup-race.patch
+benet-fix-bug-when-creating-vfs.patch
+net-sched-mqprio-fix-stack-out-of-bounds-write-in-tc.patch
+s390-mm-allocate-page-table-with-page_size-granulari.patch
+eth-fbnic-remove-the-debugging-trick-of-super-high-p.patch
+irqchip-build-imx_mu_msi-only-on-arm.patch
+alsa-hda-ca0132-fix-missing-error-handling-in-ca0132.patch
+smb-server-remove-separate-empty_recvmsg_queue.patch
+smb-server-make-sure-we-call-ib_dma_unmap_single-onl.patch
+smb-server-let-recv_done-consistently-call-put_recvm.patch
+smb-server-let-recv_done-avoid-touching-data_transfe.patch
+smb-client-let-send_done-cleanup-before-calling-smbd.patch
+smb-client-remove-separate-empty_packet_queue.patch
+smb-client-make-sure-we-call-ib_dma_unmap_single-onl.patch
+smb-client-let-recv_done-cleanup-before-notifying-th.patch
+smb-client-let-recv_done-avoid-touching-data_transfe.patch
+nvmet-exit-debugfs-after-discovery-subsystem-exits.patch
+pptp-fix-pptp_xmit-error-path.patch
+smb-client-return-an-error-if-rdma_connect-does-not-.patch
diff --git a/queue-6.12/smb-client-let-recv_done-avoid-touching-data_transfe.patch b/queue-6.12/smb-client-let-recv_done-avoid-touching-data_transfe.patch
new file mode 100644 (file)
index 0000000..0cadc20
--- /dev/null
@@ -0,0 +1,81 @@
+From df32db7dca2e4d67c33767e16639d1f1b676a652 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:16 +0200
+Subject: smb: client: let recv_done() avoid touching data_transfer after
+ cleanup/move
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 24eff17887cb45c25a427e662dda352973c5c171 ]
+
+Calling enqueue_reassembly() and wake_up_interruptible(&info->wait_reassembly_queue)
+or put_receive_buffer() means the response/data_transfer pointer might
+get re-used by another thread, which means these should be
+the last operations before calling return.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 25 +++++++++++--------------
+ 1 file changed, 11 insertions(+), 14 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index d26b8cef82d6..47f2a6cc1c0c 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -479,10 +479,6 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               data_transfer = smbd_response_payload(response);
+               data_length = le32_to_cpu(data_transfer->data_length);
+-              /*
+-               * If this is a packet with data playload place the data in
+-               * reassembly queue and wake up the reading thread
+-               */
+               if (data_length) {
+                       if (info->full_packet_received)
+                               response->first_segment = true;
+@@ -491,16 +487,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+                               info->full_packet_received = false;
+                       else
+                               info->full_packet_received = true;
+-
+-                      enqueue_reassembly(
+-                              info,
+-                              response,
+-                              data_length);
+-              } else
+-                      put_receive_buffer(info, response);
+-
+-              if (data_length)
+-                      wake_up_interruptible(&info->wait_reassembly_queue);
++              }
+               atomic_dec(&info->receive_credits);
+               info->receive_credit_target =
+@@ -528,6 +515,16 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+                       info->keep_alive_requested = KEEP_ALIVE_PENDING;
+               }
++              /*
++               * If this is a packet with data playload place the data in
++               * reassembly queue and wake up the reading thread
++               */
++              if (data_length) {
++                      enqueue_reassembly(info, response, data_length);
++                      wake_up_interruptible(&info->wait_reassembly_queue);
++              } else
++                      put_receive_buffer(info, response);
++
+               return;
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-let-recv_done-cleanup-before-notifying-th.patch b/queue-6.12/smb-client-let-recv_done-cleanup-before-notifying-th.patch
new file mode 100644 (file)
index 0000000..945f5c6
--- /dev/null
@@ -0,0 +1,79 @@
+From 8a03b9df67ac7e59ef60dede2ba5e9be5e068122 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:15 +0200
+Subject: smb: client: let recv_done() cleanup before notifying the callers.
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit bdd7afc6dca5e0ebbb75583484aa6ea9e03fbb13 ]
+
+We should call put_receive_buffer() before waking up the callers.
+
+For the internal error case of response->type being unexpected,
+we now also call smbd_disconnect_rdma_connection() instead
+of not waking up the callers at all.
+
+Note that the SMBD_TRANSFER_DATA case still has problems,
+which will be addressed in the next commit in order to make
+it easier to review this one.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 5690e8b3d101..d26b8cef82d6 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -454,7 +454,6 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+       if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+               log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
+                       wc->status, wc->opcode);
+-              smbd_disconnect_rdma_connection(info);
+               goto error;
+       }
+@@ -471,8 +470,9 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               info->full_packet_received = true;
+               info->negotiate_done =
+                       process_negotiation_response(response, wc->byte_len);
++              put_receive_buffer(info, response);
+               complete(&info->negotiate_completion);
+-              break;
++              return;
+       /* SMBD data transfer packet */
+       case SMBD_TRANSFER_DATA:
+@@ -529,14 +529,16 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               }
+               return;
+-
+-      default:
+-              log_rdma_recv(ERR,
+-                      "unexpected response type=%d\n", response->type);
+       }
++      /*
++       * This is an internal error!
++       */
++      log_rdma_recv(ERR, "unexpected response type=%d\n", response->type);
++      WARN_ON_ONCE(response->type != SMBD_TRANSFER_DATA);
+ error:
+       put_receive_buffer(info, response);
++      smbd_disconnect_rdma_connection(info);
+ }
+ static struct rdma_cm_id *smbd_create_id(
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-let-send_done-cleanup-before-calling-smbd.patch b/queue-6.12/smb-client-let-send_done-cleanup-before-calling-smbd.patch
new file mode 100644 (file)
index 0000000..937a724
--- /dev/null
@@ -0,0 +1,63 @@
+From fb6ca0a1de44a2bec67cc2a61503ab631b2103ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:12 +0200
+Subject: smb: client: let send_done() cleanup before calling
+ smbd_disconnect_rdma_connection()
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 5349ae5e05fa37409fd48a1eb483b199c32c889b ]
+
+We should call ib_dma_unmap_single() and mempool_free() before calling
+smbd_disconnect_rdma_connection().
+
+And smbd_disconnect_rdma_connection() needs to be the last function to
+call as all other state might already be gone after it returns.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 754e94a0e07f..e99e783f1b0e 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -281,18 +281,20 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+       log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+               request, wc->status);
+-      if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+-              log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
+-                      wc->status, wc->opcode);
+-              smbd_disconnect_rdma_connection(request->info);
+-      }
+-
+       for (i = 0; i < request->num_sge; i++)
+               ib_dma_unmap_single(sc->ib.dev,
+                       request->sge[i].addr,
+                       request->sge[i].length,
+                       DMA_TO_DEVICE);
++      if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
++              log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
++                      wc->status, wc->opcode);
++              mempool_free(request, info->request_mempool);
++              smbd_disconnect_rdma_connection(info);
++              return;
++      }
++
+       if (atomic_dec_and_test(&request->info->send_pending))
+               wake_up(&request->info->wait_send_pending);
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-make-sure-we-call-ib_dma_unmap_single-onl.patch b/queue-6.12/smb-client-make-sure-we-call-ib_dma_unmap_single-onl.patch
new file mode 100644 (file)
index 0000000..a9bcde7
--- /dev/null
@@ -0,0 +1,68 @@
+From 768b0c9b76e76cfd57fa06caba02a0fc5422f724 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:14 +0200
+Subject: smb: client: make sure we call ib_dma_unmap_single() only if we
+ called ib_dma_map_single already
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 047682c370b6f18fec818b57b0ed8b501bdb79f8 ]
+
+In case of failures either ib_dma_map_single() might not be called yet
+or ib_dma_unmap_single() was already called.
+
+We should make sure put_receive_buffer() only calls
+ib_dma_unmap_single() if needed.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 0ab490c0a9b0..5690e8b3d101 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1057,6 +1057,7 @@ static int smbd_post_recv(
+       if (rc) {
+               ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+                                   response->sge.length, DMA_FROM_DEVICE);
++              response->sge.length = 0;
+               smbd_disconnect_rdma_connection(info);
+               log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+       }
+@@ -1186,8 +1187,13 @@ static void put_receive_buffer(
+       struct smbdirect_socket *sc = &info->socket;
+       unsigned long flags;
+-      ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+-              response->sge.length, DMA_FROM_DEVICE);
++      if (likely(response->sge.length != 0)) {
++              ib_dma_unmap_single(sc->ib.dev,
++                                  response->sge.addr,
++                                  response->sge.length,
++                                  DMA_FROM_DEVICE);
++              response->sge.length = 0;
++      }
+       spin_lock_irqsave(&info->receive_queue_lock, flags);
+       list_add_tail(&response->list, &info->receive_queue);
+@@ -1221,6 +1227,7 @@ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+                       goto allocate_failed;
+               response->info = info;
++              response->sge.length = 0;
+               list_add_tail(&response->list, &info->receive_queue);
+               info->count_receive_queue++;
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-remove-separate-empty_packet_queue.patch b/queue-6.12/smb-client-remove-separate-empty_packet_queue.patch
new file mode 100644 (file)
index 0000000..ceb8942
--- /dev/null
@@ -0,0 +1,202 @@
+From a8366484d43774ac7aab58beec07093c3e82b2c3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:13 +0200
+Subject: smb: client: remove separate empty_packet_queue
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 24b6afc36db748467e853e166a385df07e443859 ]
+
+There's no need to maintain two lists, we can just
+have a single list of receive buffers, which are free to use.
+
+It just added unneeded complexity and resulted in
+ib_dma_unmap_single() not being called from recv_done()
+for empty keepalive packets.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_debug.c |  6 ++--
+ fs/smb/client/smbdirect.c  | 62 +++-----------------------------------
+ fs/smb/client/smbdirect.h  |  4 ---
+ 3 files changed, 7 insertions(+), 65 deletions(-)
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index c0196be0e65f..9092051776fc 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -432,10 +432,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+                       server->smbd_conn->receive_credit_target);
+               seq_printf(m, "\nPending send_pending: %x ",
+                       atomic_read(&server->smbd_conn->send_pending));
+-              seq_printf(m, "\nReceive buffers count_receive_queue: %x "
+-                      "count_empty_packet_queue: %x",
+-                      server->smbd_conn->count_receive_queue,
+-                      server->smbd_conn->count_empty_packet_queue);
++              seq_printf(m, "\nReceive buffers count_receive_queue: %x ",
++                      server->smbd_conn->count_receive_queue);
+               seq_printf(m, "\nMR responder_resources: %x "
+                       "max_frmr_depth: %x mr_type: %x",
+                       server->smbd_conn->responder_resources,
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index e99e783f1b0e..0ab490c0a9b0 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -13,8 +13,6 @@
+ #include "cifsproto.h"
+ #include "smb2proto.h"
+-static struct smbd_response *get_empty_queue_buffer(
+-              struct smbd_connection *info);
+ static struct smbd_response *get_receive_buffer(
+               struct smbd_connection *info);
+ static void put_receive_buffer(
+@@ -23,8 +21,6 @@ static void put_receive_buffer(
+ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf);
+ static void destroy_receive_buffers(struct smbd_connection *info);
+-static void put_empty_packet(
+-              struct smbd_connection *info, struct smbd_response *response);
+ static void enqueue_reassembly(
+               struct smbd_connection *info,
+               struct smbd_response *response, int data_length);
+@@ -393,7 +389,6 @@ static bool process_negotiation_response(
+ static void smbd_post_send_credits(struct work_struct *work)
+ {
+       int ret = 0;
+-      int use_receive_queue = 1;
+       int rc;
+       struct smbd_response *response;
+       struct smbd_connection *info =
+@@ -409,18 +404,9 @@ static void smbd_post_send_credits(struct work_struct *work)
+       if (info->receive_credit_target >
+               atomic_read(&info->receive_credits)) {
+               while (true) {
+-                      if (use_receive_queue)
+-                              response = get_receive_buffer(info);
+-                      else
+-                              response = get_empty_queue_buffer(info);
+-                      if (!response) {
+-                              /* now switch to empty packet queue */
+-                              if (use_receive_queue) {
+-                                      use_receive_queue = 0;
+-                                      continue;
+-                              } else
+-                                      break;
+-                      }
++                      response = get_receive_buffer(info);
++                      if (!response)
++                              break;
+                       response->type = SMBD_TRANSFER_DATA;
+                       response->first_segment = false;
+@@ -511,7 +497,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+                               response,
+                               data_length);
+               } else
+-                      put_empty_packet(info, response);
++                      put_receive_buffer(info, response);
+               if (data_length)
+                       wake_up_interruptible(&info->wait_reassembly_queue);
+@@ -1115,17 +1101,6 @@ static int smbd_negotiate(struct smbd_connection *info)
+       return rc;
+ }
+-static void put_empty_packet(
+-              struct smbd_connection *info, struct smbd_response *response)
+-{
+-      spin_lock(&info->empty_packet_queue_lock);
+-      list_add_tail(&response->list, &info->empty_packet_queue);
+-      info->count_empty_packet_queue++;
+-      spin_unlock(&info->empty_packet_queue_lock);
+-
+-      queue_work(info->workqueue, &info->post_send_credits_work);
+-}
+-
+ /*
+  * Implement Connection.FragmentReassemblyBuffer defined in [MS-SMBD] 3.1.1.1
+  * This is a queue for reassembling upper layer payload and present to upper
+@@ -1174,25 +1149,6 @@ static struct smbd_response *_get_first_reassembly(struct smbd_connection *info)
+       return ret;
+ }
+-static struct smbd_response *get_empty_queue_buffer(
+-              struct smbd_connection *info)
+-{
+-      struct smbd_response *ret = NULL;
+-      unsigned long flags;
+-
+-      spin_lock_irqsave(&info->empty_packet_queue_lock, flags);
+-      if (!list_empty(&info->empty_packet_queue)) {
+-              ret = list_first_entry(
+-                      &info->empty_packet_queue,
+-                      struct smbd_response, list);
+-              list_del(&ret->list);
+-              info->count_empty_packet_queue--;
+-      }
+-      spin_unlock_irqrestore(&info->empty_packet_queue_lock, flags);
+-
+-      return ret;
+-}
+-
+ /*
+  * Get a receive buffer
+  * For each remote send, we need to post a receive. The receive buffers are
+@@ -1257,10 +1213,6 @@ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+       spin_lock_init(&info->receive_queue_lock);
+       info->count_receive_queue = 0;
+-      INIT_LIST_HEAD(&info->empty_packet_queue);
+-      spin_lock_init(&info->empty_packet_queue_lock);
+-      info->count_empty_packet_queue = 0;
+-
+       init_waitqueue_head(&info->wait_receive_queues);
+       for (i = 0; i < num_buf; i++) {
+@@ -1294,9 +1246,6 @@ static void destroy_receive_buffers(struct smbd_connection *info)
+       while ((response = get_receive_buffer(info)))
+               mempool_free(response, info->response_mempool);
+-
+-      while ((response = get_empty_queue_buffer(info)))
+-              mempool_free(response, info->response_mempool);
+ }
+ /* Implement idle connection timer [MS-SMBD] 3.1.6.2 */
+@@ -1383,8 +1332,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+       log_rdma_event(INFO, "free receive buffers\n");
+       wait_event(info->wait_receive_queues,
+-              info->count_receive_queue + info->count_empty_packet_queue
+-                      == sp->recv_credit_max);
++              info->count_receive_queue == sp->recv_credit_max);
+       destroy_receive_buffers(info);
+       /*
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index 3d552ab27e0f..fb8db71735f3 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -110,10 +110,6 @@ struct smbd_connection {
+       int count_receive_queue;
+       spinlock_t receive_queue_lock;
+-      struct list_head empty_packet_queue;
+-      int count_empty_packet_queue;
+-      spinlock_t empty_packet_queue_lock;
+-
+       wait_queue_head_t wait_receive_queues;
+       /* Reassembly queue */
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-return-an-error-if-rdma_connect-does-not-.patch b/queue-6.12/smb-client-return-an-error-if-rdma_connect-does-not-.patch
new file mode 100644 (file)
index 0000000..9e6fd11
--- /dev/null
@@ -0,0 +1,45 @@
+From 490178103e394c85ea14bd94976c6b5ebde743db Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 18:12:11 +0200
+Subject: smb: client: return an error if rdma_connect does not return within 5
+ seconds
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 03537826f77f1c829d0593d211b38b9c876c1722 ]
+
+This matches the timeout for tcp connections.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 47f2a6cc1c0c..60b160219f0a 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1636,8 +1636,10 @@ static struct smbd_connection *_smbd_get_connection(
+               goto rdma_connect_failed;
+       }
+-      wait_event_interruptible(
+-              info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
++      wait_event_interruptible_timeout(
++              info->conn_wait,
++              sc->status != SMBDIRECT_SOCKET_CONNECTING,
++              msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+       if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+               log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-server-let-recv_done-avoid-touching-data_transfe.patch b/queue-6.12/smb-server-let-recv_done-avoid-touching-data_transfe.patch
new file mode 100644 (file)
index 0000000..9841c50
--- /dev/null
@@ -0,0 +1,67 @@
+From b2b359b374ebc7a351d062a576e3733bda831f72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:53 +0200
+Subject: smb: server: let recv_done() avoid touching data_transfer after
+ cleanup/move
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit a6c015b7ac2d8c5233337e5793f50d04fac17669 ]
+
+Calling enqueue_reassembly() and wake_up_interruptible(&t->wait_reassembly_queue)
+or put_receive_buffer() means the recvmsg/data_transfer pointer might
+get re-used by another thread, which means these should be
+the last operations before calling return.
+
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index b5e9fd9369e9..805c20f619b0 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -580,16 +580,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+                       else
+                               t->full_packet_received = true;
+-                      enqueue_reassembly(t, recvmsg, (int)data_length);
+-                      wake_up_interruptible(&t->wait_reassembly_queue);
+-
+                       spin_lock(&t->receive_credit_lock);
+                       receive_credits = --(t->recv_credits);
+                       avail_recvmsg_count = t->count_avail_recvmsg;
+                       spin_unlock(&t->receive_credit_lock);
+               } else {
+-                      put_recvmsg(t, recvmsg);
+-
+                       spin_lock(&t->receive_credit_lock);
+                       receive_credits = --(t->recv_credits);
+                       avail_recvmsg_count = ++(t->count_avail_recvmsg);
+@@ -611,6 +606,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+                       mod_delayed_work(smb_direct_wq,
+                                        &t->post_recv_credits_work, 0);
++
++              if (data_length) {
++                      enqueue_reassembly(t, recvmsg, (int)data_length);
++                      wake_up_interruptible(&t->wait_reassembly_queue);
++              } else
++                      put_recvmsg(t, recvmsg);
++
+               return;
+       }
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-server-let-recv_done-consistently-call-put_recvm.patch b/queue-6.12/smb-server-let-recv_done-consistently-call-put_recvm.patch
new file mode 100644 (file)
index 0000000..259e61c
--- /dev/null
@@ -0,0 +1,105 @@
+From c06c7c338d2c1d08dfd5cb659f14ab042d076dac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:52 +0200
+Subject: smb: server: let recv_done() consistently call
+ put_recvmsg/smb_direct_disconnect_rdma_connection
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit cfe76fdbb9729c650f3505d9cfb2f70ddda2dbdc ]
+
+We should call put_recvmsg() before smb_direct_disconnect_rdma_connection()
+in order to call it before waking up the callers.
+
+In all error cases we should call smb_direct_disconnect_rdma_connection()
+in order to avoid stale connections.
+
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 3bb6cb9dceae..b5e9fd9369e9 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -520,13 +520,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+       t = recvmsg->transport;
+       if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
++              put_recvmsg(t, recvmsg);
+               if (wc->status != IB_WC_WR_FLUSH_ERR) {
+                       pr_err("Recv error. status='%s (%d)' opcode=%d\n",
+                              ib_wc_status_msg(wc->status), wc->status,
+                              wc->opcode);
+                       smb_direct_disconnect_rdma_connection(t);
+               }
+-              put_recvmsg(t, recvmsg);
+               return;
+       }
+@@ -541,6 +541,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+       case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+               if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+                       put_recvmsg(t, recvmsg);
++                      smb_direct_disconnect_rdma_connection(t);
+                       return;
+               }
+               t->negotiation_requested = true;
+@@ -548,7 +549,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               t->status = SMB_DIRECT_CS_CONNECTED;
+               enqueue_reassembly(t, recvmsg, 0);
+               wake_up_interruptible(&t->wait_status);
+-              break;
++              return;
+       case SMB_DIRECT_MSG_DATA_TRANSFER: {
+               struct smb_direct_data_transfer *data_transfer =
+                       (struct smb_direct_data_transfer *)recvmsg->packet;
+@@ -558,6 +559,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               if (wc->byte_len <
+                   offsetof(struct smb_direct_data_transfer, padding)) {
+                       put_recvmsg(t, recvmsg);
++                      smb_direct_disconnect_rdma_connection(t);
+                       return;
+               }
+@@ -566,6 +568,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+                       if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+                           (u64)data_length) {
+                               put_recvmsg(t, recvmsg);
++                              smb_direct_disconnect_rdma_connection(t);
+                               return;
+                       }
+@@ -608,11 +611,16 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+                       mod_delayed_work(smb_direct_wq,
+                                        &t->post_recv_credits_work, 0);
+-              break;
++              return;
+       }
+-      default:
+-              break;
+       }
++
++      /*
++       * This is an internal error!
++       */
++      WARN_ON_ONCE(recvmsg->type != SMB_DIRECT_MSG_DATA_TRANSFER);
++      put_recvmsg(t, recvmsg);
++      smb_direct_disconnect_rdma_connection(t);
+ }
+ static int smb_direct_post_recv(struct smb_direct_transport *t,
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-server-make-sure-we-call-ib_dma_unmap_single-onl.patch b/queue-6.12/smb-server-make-sure-we-call-ib_dma_unmap_single-onl.patch
new file mode 100644 (file)
index 0000000..bb5e8f7
--- /dev/null
@@ -0,0 +1,68 @@
+From 361e25d53f91046b560ff3091084b2cd45ebad30 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:51 +0200
+Subject: smb: server: make sure we call ib_dma_unmap_single() only if we
+ called ib_dma_map_single already
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit afb4108c92898350e66b9a009692230bcdd2ac73 ]
+
+In case of failures either ib_dma_map_single() might not be called yet
+or ib_dma_unmap_single() was already called.
+
+We should make sure put_recvmsg() only calls ib_dma_unmap_single() if needed.
+
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index fd78d24e96f4..3bb6cb9dceae 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -264,8 +264,13 @@ smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
+ static void put_recvmsg(struct smb_direct_transport *t,
+                       struct smb_direct_recvmsg *recvmsg)
+ {
+-      ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+-                          recvmsg->sge.length, DMA_FROM_DEVICE);
++      if (likely(recvmsg->sge.length != 0)) {
++              ib_dma_unmap_single(t->cm_id->device,
++                                  recvmsg->sge.addr,
++                                  recvmsg->sge.length,
++                                  DMA_FROM_DEVICE);
++              recvmsg->sge.length = 0;
++      }
+       spin_lock(&t->recvmsg_queue_lock);
+       list_add(&recvmsg->list, &t->recvmsg_queue);
+@@ -637,6 +642,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
+               ib_dma_unmap_single(t->cm_id->device,
+                                   recvmsg->sge.addr, recvmsg->sge.length,
+                                   DMA_FROM_DEVICE);
++              recvmsg->sge.length = 0;
+               smb_direct_disconnect_rdma_connection(t);
+               return ret;
+       }
+@@ -1818,6 +1824,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
+               if (!recvmsg)
+                       goto err;
+               recvmsg->transport = t;
++              recvmsg->sge.length = 0;
+               list_add(&recvmsg->list, &t->recvmsg_queue);
+       }
+       t->count_avail_recvmsg = t->recv_credit_max;
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-server-remove-separate-empty_recvmsg_queue.patch b/queue-6.12/smb-server-remove-separate-empty_recvmsg_queue.patch
new file mode 100644 (file)
index 0000000..60698ec
--- /dev/null
@@ -0,0 +1,169 @@
+From a5f04a07ad7fd17ba4267cf4c293df812ba812df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:50 +0200
+Subject: smb: server: remove separate empty_recvmsg_queue
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 01027a62b508c48c762096f347de925eedcbd008 ]
+
+There's no need to maintain two lists, we can just
+have a single list of receive buffers, which are free to use.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 60 +++++-----------------------------
+ 1 file changed, 8 insertions(+), 52 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 3ab8c04f72e4..fd78d24e96f4 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -128,9 +128,6 @@ struct smb_direct_transport {
+       spinlock_t              recvmsg_queue_lock;
+       struct list_head        recvmsg_queue;
+-      spinlock_t              empty_recvmsg_queue_lock;
+-      struct list_head        empty_recvmsg_queue;
+-
+       int                     send_credit_target;
+       atomic_t                send_credits;
+       spinlock_t              lock_new_recv_credits;
+@@ -275,32 +272,6 @@ static void put_recvmsg(struct smb_direct_transport *t,
+       spin_unlock(&t->recvmsg_queue_lock);
+ }
+-static struct
+-smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
+-{
+-      struct smb_direct_recvmsg *recvmsg = NULL;
+-
+-      spin_lock(&t->empty_recvmsg_queue_lock);
+-      if (!list_empty(&t->empty_recvmsg_queue)) {
+-              recvmsg = list_first_entry(&t->empty_recvmsg_queue,
+-                                         struct smb_direct_recvmsg, list);
+-              list_del(&recvmsg->list);
+-      }
+-      spin_unlock(&t->empty_recvmsg_queue_lock);
+-      return recvmsg;
+-}
+-
+-static void put_empty_recvmsg(struct smb_direct_transport *t,
+-                            struct smb_direct_recvmsg *recvmsg)
+-{
+-      ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+-                          recvmsg->sge.length, DMA_FROM_DEVICE);
+-
+-      spin_lock(&t->empty_recvmsg_queue_lock);
+-      list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
+-      spin_unlock(&t->empty_recvmsg_queue_lock);
+-}
+-
+ static void enqueue_reassembly(struct smb_direct_transport *t,
+                              struct smb_direct_recvmsg *recvmsg,
+                              int data_length)
+@@ -385,9 +356,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+       spin_lock_init(&t->recvmsg_queue_lock);
+       INIT_LIST_HEAD(&t->recvmsg_queue);
+-      spin_lock_init(&t->empty_recvmsg_queue_lock);
+-      INIT_LIST_HEAD(&t->empty_recvmsg_queue);
+-
+       init_waitqueue_head(&t->wait_send_pending);
+       atomic_set(&t->send_pending, 0);
+@@ -553,7 +521,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+                              wc->opcode);
+                       smb_direct_disconnect_rdma_connection(t);
+               }
+-              put_empty_recvmsg(t, recvmsg);
++              put_recvmsg(t, recvmsg);
+               return;
+       }
+@@ -567,7 +535,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+       switch (recvmsg->type) {
+       case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+               if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+-                      put_empty_recvmsg(t, recvmsg);
++                      put_recvmsg(t, recvmsg);
+                       return;
+               }
+               t->negotiation_requested = true;
+@@ -584,7 +552,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               if (wc->byte_len <
+                   offsetof(struct smb_direct_data_transfer, padding)) {
+-                      put_empty_recvmsg(t, recvmsg);
++                      put_recvmsg(t, recvmsg);
+                       return;
+               }
+@@ -592,7 +560,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               if (data_length) {
+                       if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+                           (u64)data_length) {
+-                              put_empty_recvmsg(t, recvmsg);
++                              put_recvmsg(t, recvmsg);
+                               return;
+                       }
+@@ -612,7 +580,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+                       avail_recvmsg_count = t->count_avail_recvmsg;
+                       spin_unlock(&t->receive_credit_lock);
+               } else {
+-                      put_empty_recvmsg(t, recvmsg);
++                      put_recvmsg(t, recvmsg);
+                       spin_lock(&t->receive_credit_lock);
+                       receive_credits = --(t->recv_credits);
+@@ -810,7 +778,6 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
+       struct smb_direct_recvmsg *recvmsg;
+       int receive_credits, credits = 0;
+       int ret;
+-      int use_free = 1;
+       spin_lock(&t->receive_credit_lock);
+       receive_credits = t->recv_credits;
+@@ -818,18 +785,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
+       if (receive_credits < t->recv_credit_target) {
+               while (true) {
+-                      if (use_free)
+-                              recvmsg = get_free_recvmsg(t);
+-                      else
+-                              recvmsg = get_empty_recvmsg(t);
+-                      if (!recvmsg) {
+-                              if (use_free) {
+-                                      use_free = 0;
+-                                      continue;
+-                              } else {
+-                                      break;
+-                              }
+-                      }
++                      recvmsg = get_free_recvmsg(t);
++                      if (!recvmsg)
++                              break;
+                       recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
+                       recvmsg->first_segment = false;
+@@ -1805,8 +1763,6 @@ static void smb_direct_destroy_pools(struct smb_direct_transport *t)
+       while ((recvmsg = get_free_recvmsg(t)))
+               mempool_free(recvmsg, t->recvmsg_mempool);
+-      while ((recvmsg = get_empty_recvmsg(t)))
+-              mempool_free(recvmsg, t->recvmsg_mempool);
+       mempool_destroy(t->recvmsg_mempool);
+       t->recvmsg_mempool = NULL;
+-- 
+2.39.5
+
diff --git a/queue-6.12/spi-cs42l43-property-entry-should-be-a-null-terminat.patch b/queue-6.12/spi-cs42l43-property-entry-should-be-a-null-terminat.patch
new file mode 100644 (file)
index 0000000..416285e
--- /dev/null
@@ -0,0 +1,42 @@
+From ce443e38359a3c9c05f57231786b5d62b5574e0f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 16:01:09 +0000
+Subject: spi: cs42l43: Property entry should be a null-terminated array
+
+From: Simon Trimmer <simont@opensource.cirrus.com>
+
+[ Upstream commit ffcfd071eec7973e58c4ffff7da4cb0e9ca7b667 ]
+
+The software node does not specify a count of property entries, so the
+array must be null-terminated.
+
+When unterminated, this can lead to a fault in the downstream cs35l56
+amplifier driver, because the node parse walks off the end of the
+array into unknown memory.
+
+Fixes: 0ca645ab5b15 ("spi: cs42l43: Add speaker id support to the bridge configuration")
+Closes: https://bugzilla.kernel.org/show_bug.cgi?id=220371
+Signed-off-by: Simon Trimmer <simont@opensource.cirrus.com>
+Link: https://patch.msgid.link/20250731160109.1547131-1-simont@opensource.cirrus.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-cs42l43.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-cs42l43.c b/drivers/spi/spi-cs42l43.c
+index 5b8ed65f8094..7a02fb42a88b 100644
+--- a/drivers/spi/spi-cs42l43.c
++++ b/drivers/spi/spi-cs42l43.c
+@@ -265,7 +265,7 @@ static struct spi_board_info *cs42l43_create_bridge_amp(struct cs42l43_spi *priv
+       struct spi_board_info *info;
+       if (spkid >= 0) {
+-              props = devm_kmalloc(priv->dev, sizeof(*props), GFP_KERNEL);
++              props = devm_kcalloc(priv->dev, 2, sizeof(*props), GFP_KERNEL);
+               if (!props)
+                       return NULL;
+-- 
+2.39.5
+
diff --git a/queue-6.12/sunrpc-fix-client-side-handling-of-tls-alerts.patch b/queue-6.12/sunrpc-fix-client-side-handling-of-tls-alerts.patch
new file mode 100644 (file)
index 0000000..5a2879f
--- /dev/null
@@ -0,0 +1,124 @@
+From 072b2559d5262c8ea0037e0d26a86994effef7ec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 14:00:56 -0400
+Subject: sunrpc: fix client side handling of tls alerts
+
+From: Olga Kornievskaia <okorniev@redhat.com>
+
+[ Upstream commit cc5d59081fa26506d02de2127ab822f40d88bc5a ]
+
+A security exploit was discovered in NFS over TLS in tls_alert_recv
+due to its assumption that there is valid data in the msghdr's
+iterator's kvec.
+
+Instead, this patch proposes the rework how control messages are
+setup and used by sock_recvmsg().
+
+If no control message structure is setup, kTLS layer will read and
+process TLS data record types. As soon as it encounters a TLS control
+message, it would return an error. At that point, NFS can setup a kvec
+backed control buffer and read in the control message such as a TLS
+alert. Scott found that a msg iterator can advance the kvec pointer
+as a part of the copy process thus we need to revert the iterator
+before calling into the tls_alert_recv.
+
+Fixes: dea034b963c8 ("SUNRPC: Capture CMSG metadata on client-side receive")
+Suggested-by: Trond Myklebust <trondmy@hammerspace.com>
+Suggested-by: Scott Mayhew <smayhew@redhat.com>
+Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
+Link: https://lore.kernel.org/r/20250731180058.4669-3-okorniev@redhat.com
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtsock.c | 40 ++++++++++++++++++++++++++++++----------
+ 1 file changed, 30 insertions(+), 10 deletions(-)
+
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 67d099c7c662..1397bb48cdde 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -358,7 +358,7 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
+ static int
+ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+-                   struct cmsghdr *cmsg, int ret)
++                   unsigned int *msg_flags, struct cmsghdr *cmsg, int ret)
+ {
+       u8 content_type = tls_get_record_type(sock->sk, cmsg);
+       u8 level, description;
+@@ -371,7 +371,7 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+                * record, even though there might be more frames
+                * waiting to be decrypted.
+                */
+-              msg->msg_flags &= ~MSG_EOR;
++              *msg_flags &= ~MSG_EOR;
+               break;
+       case TLS_RECORD_TYPE_ALERT:
+               tls_alert_recv(sock->sk, msg, &level, &description);
+@@ -386,19 +386,33 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+ }
+ static int
+-xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags)
++xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
+ {
+       union {
+               struct cmsghdr  cmsg;
+               u8              buf[CMSG_SPACE(sizeof(u8))];
+       } u;
++      u8 alert[2];
++      struct kvec alert_kvec = {
++              .iov_base = alert,
++              .iov_len = sizeof(alert),
++      };
++      struct msghdr msg = {
++              .msg_flags = *msg_flags,
++              .msg_control = &u,
++              .msg_controllen = sizeof(u),
++      };
+       int ret;
+-      msg->msg_control = &u;
+-      msg->msg_controllen = sizeof(u);
+-      ret = sock_recvmsg(sock, msg, flags);
+-      if (msg->msg_controllen != sizeof(u))
+-              ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret);
++      iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
++                    alert_kvec.iov_len);
++      ret = sock_recvmsg(sock, &msg, flags);
++      if (ret > 0 &&
++          tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
++              iov_iter_revert(&msg.msg_iter, ret);
++              ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
++                                         -EAGAIN);
++      }
+       return ret;
+ }
+@@ -408,7 +422,13 @@ xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
+       ssize_t ret;
+       if (seek != 0)
+               iov_iter_advance(&msg->msg_iter, seek);
+-      ret = xs_sock_recv_cmsg(sock, msg, flags);
++      ret = sock_recvmsg(sock, msg, flags);
++      /* Handle TLS inband control message lazily */
++      if (msg->msg_flags & MSG_CTRUNC) {
++              msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
++              if (ret == 0 || ret == -EIO)
++                      ret = xs_sock_recv_cmsg(sock, &msg->msg_flags, flags);
++      }
+       return ret > 0 ? ret + seek : ret;
+ }
+@@ -434,7 +454,7 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
+               size_t count)
+ {
+       iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
+-      return xs_sock_recv_cmsg(sock, msg, flags);
++      return xs_sock_recvmsg(sock, msg, flags, 0);
+ }
+ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+-- 
+2.39.5
+
diff --git a/queue-6.12/x86-irq-plug-vector-setup-race.patch b/queue-6.12/x86-irq-plug-vector-setup-race.patch
new file mode 100644 (file)
index 0000000..0f63da8
--- /dev/null
@@ -0,0 +1,184 @@
+From d21c7bff335933aeedc854138b13c3859e00e8bf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 24 Jul 2025 12:49:30 +0200
+Subject: x86/irq: Plug vector setup race
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit ce0b5eedcb753697d43f61dd2e27d68eb5d3150f ]
+
+Hogan reported a vector setup race, which overwrites the interrupt
+descriptor in the per CPU vector array resulting in a disfunctional device.
+
+CPU0                           CPU1
+                               interrupt is raised in APIC IRR
+                               but not handled
+  free_irq()
+    per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN;
+
+  request_irq()                        common_interrupt()
+                                 d = this_cpu_read(vector_irq[vector]);
+
+    per_cpu(vector_irq, CPU1)[vector] = desc;
+
+                                 if (d == VECTOR_SHUTDOWN)
+                                   this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+
+free_irq() cannot observe the pending vector in the CPU1 APIC as there is
+no way to query the remote CPUs APIC IRR.
+
+This requires that request_irq() uses the same vector/CPU as the one which
+was freed, but this also can be triggered by a spurious interrupt.
+
+Interestingly enough this problem managed to be hidden for more than a
+decade.
+
+Prevent this by reevaluating vector_irq under the vector lock, which is
+held by the interrupt activation code when vector_irq is updated.
+
+To avoid ifdeffery or IS_ENABLED() nonsense, move the
+[un]lock_vector_lock() declarations out under the
+CONFIG_IRQ_DOMAIN_HIERARCHY guard as it's only provided when
+CONFIG_X86_LOCAL_APIC=y.
+
+The current CONFIG_IRQ_DOMAIN_HIERARCHY guard is selected by
+CONFIG_X86_LOCAL_APIC, but can also be selected by other parts of the
+Kconfig system, which makes 32-bit UP builds with CONFIG_X86_LOCAL_APIC=n
+fail.
+
+Can we just get rid of this !APIC nonsense once and forever?
+
+Fixes: 9345005f4eed ("x86/irq: Fix do_IRQ() interrupt warning for cpu hotplug retriggered irqs")
+Reported-by: Hogan Wang <hogan.wang@huawei.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Tested-by: Hogan Wang <hogan.wang@huawei.com>
+Link: https://lore.kernel.org/all/draft-87ikjhrhhh.ffs@tglx
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/hw_irq.h | 12 ++++---
+ arch/x86/kernel/irq.c         | 63 ++++++++++++++++++++++++++---------
+ 2 files changed, 55 insertions(+), 20 deletions(-)
+
+diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
+index edebf1020e04..6bb3d9a86abe 100644
+--- a/arch/x86/include/asm/hw_irq.h
++++ b/arch/x86/include/asm/hw_irq.h
+@@ -92,8 +92,6 @@ struct irq_cfg {
+ extern struct irq_cfg *irq_cfg(unsigned int irq);
+ extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
+-extern void lock_vector_lock(void);
+-extern void unlock_vector_lock(void);
+ #ifdef CONFIG_SMP
+ extern void vector_schedule_cleanup(struct irq_cfg *);
+ extern void irq_complete_move(struct irq_cfg *cfg);
+@@ -101,12 +99,16 @@ extern void irq_complete_move(struct irq_cfg *cfg);
+ static inline void vector_schedule_cleanup(struct irq_cfg *c) { }
+ static inline void irq_complete_move(struct irq_cfg *c) { }
+ #endif
+-
+ extern void apic_ack_edge(struct irq_data *data);
+-#else /*  CONFIG_IRQ_DOMAIN_HIERARCHY */
++#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++extern void lock_vector_lock(void);
++extern void unlock_vector_lock(void);
++#else
+ static inline void lock_vector_lock(void) {}
+ static inline void unlock_vector_lock(void) {}
+-#endif        /* CONFIG_IRQ_DOMAIN_HIERARCHY */
++#endif
+ /* Statistics */
+ extern atomic_t irq_err_count;
+diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
+index 85fa2db38dc4..9400730e538e 100644
+--- a/arch/x86/kernel/irq.c
++++ b/arch/x86/kernel/irq.c
+@@ -251,26 +251,59 @@ static __always_inline void handle_irq(struct irq_desc *desc,
+               __handle_irq(desc, regs);
+ }
+-static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
++static struct irq_desc *reevaluate_vector(int vector)
+ {
+-      struct irq_desc *desc;
+-      int ret = 0;
++      struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
++
++      if (!IS_ERR_OR_NULL(desc))
++              return desc;
++
++      if (desc == VECTOR_UNUSED)
++              pr_emerg_ratelimited("No irq handler for %d.%u\n", smp_processor_id(), vector);
++      else
++              __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
++      return NULL;
++}
++
++static __always_inline bool call_irq_handler(int vector, struct pt_regs *regs)
++{
++      struct irq_desc *desc = __this_cpu_read(vector_irq[vector]);
+-      desc = __this_cpu_read(vector_irq[vector]);
+       if (likely(!IS_ERR_OR_NULL(desc))) {
+               handle_irq(desc, regs);
+-      } else {
+-              ret = -EINVAL;
+-              if (desc == VECTOR_UNUSED) {
+-                      pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
+-                                           __func__, smp_processor_id(),
+-                                           vector);
+-              } else {
+-                      __this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+-              }
++              return true;
+       }
+-      return ret;
++      /*
++       * Reevaluate with vector_lock held to prevent a race against
++       * request_irq() setting up the vector:
++       *
++       * CPU0                         CPU1
++       *                              interrupt is raised in APIC IRR
++       *                              but not handled
++       * free_irq()
++       *   per_cpu(vector_irq, CPU1)[vector] = VECTOR_SHUTDOWN;
++       *
++       * request_irq()                common_interrupt()
++       *                                d = this_cpu_read(vector_irq[vector]);
++       *
++       * per_cpu(vector_irq, CPU1)[vector] = desc;
++       *
++       *                                if (d == VECTOR_SHUTDOWN)
++       *                                  this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
++       *
++       * This requires that the same vector on the same target CPU is
++       * handed out or that a spurious interrupt hits that CPU/vector.
++       */
++      lock_vector_lock();
++      desc = reevaluate_vector(vector);
++      unlock_vector_lock();
++
++      if (!desc)
++              return false;
++
++      handle_irq(desc, regs);
++      return true;
+ }
+ /*
+@@ -284,7 +317,7 @@ DEFINE_IDTENTRY_IRQ(common_interrupt)
+       /* entry code tells RCU that we're not quiescent.  Check it. */
+       RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
+-      if (unlikely(call_irq_handler(vector, regs)))
++      if (unlikely(!call_irq_handler(vector, regs)))
+               apic_eoi();
+       set_irq_regs(old_regs);
+-- 
+2.39.5
+