--- /dev/null
+From 0012c804c31cac416042932a4a6bb45d9b16da4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Aug 2025 11:44:22 +0200
+Subject: ALSA: hda/ca0132: Fix missing error handling in
+ ca0132_alt_select_out()
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 9f320dfb0ffc555aa2eac8331dee0c2c16f67633 ]
+
+There are a couple of cases where the error is ignored or the error
+code isn't propagated in ca0132_alt_select_out(). Fix those.
+
+Fixes: def3f0a5c700 ("ALSA: hda/ca0132 - Add quirk output selection structures.")
+Link: https://patch.msgid.link/20250806094423.8843-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_ca0132.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
+index 27e48fdbbf3a..94b452595f30 100644
+--- a/sound/pci/hda/patch_ca0132.c
++++ b/sound/pci/hda/patch_ca0132.c
+@@ -4803,7 +4803,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
+ if (err < 0)
+ goto exit;
+
+- if (ca0132_alt_select_out_quirk_set(codec) < 0)
++ err = ca0132_alt_select_out_quirk_set(codec);
++ if (err < 0)
+ goto exit;
+
+ switch (spec->cur_out_type) {
+@@ -4893,6 +4894,8 @@ static int ca0132_alt_select_out(struct hda_codec *codec)
+ spec->bass_redirection_val);
+ else
+ err = ca0132_alt_surround_set_bass_redirection(codec, 0);
++ if (err < 0)
++ goto exit;
+
+ /* Unmute DSP now that we're done with output selection. */
+ err = dspio_set_uint_param(codec, 0x96,
+--
+2.39.5
+
--- /dev/null
+From 81b6ec7a38ed02f8ef622c25b5258f4ba02895ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Aug 2025 12:13:37 +0200
+Subject: benet: fix BUG when creating VFs
+
+From: Michal Schmidt <mschmidt@redhat.com>
+
+[ Upstream commit 5a40f8af2ba1b9bdf46e2db10e8c9710538fbc63 ]
+
+benet crashes as soon as SRIOV VFs are created:
+
+ kernel BUG at mm/vmalloc.c:3457!
+ Oops: invalid opcode: 0000 [#1] SMP KASAN NOPTI
+ CPU: 4 UID: 0 PID: 7408 Comm: test.sh Kdump: loaded Not tainted 6.16.0+ #1 PREEMPT(voluntary)
+ [...]
+ RIP: 0010:vunmap+0x5f/0x70
+ [...]
+ Call Trace:
+ <TASK>
+ __iommu_dma_free+0xe8/0x1c0
+ be_cmd_set_mac_list+0x3fe/0x640 [be2net]
+ be_cmd_set_mac+0xaf/0x110 [be2net]
+ be_vf_eth_addr_config+0x19f/0x330 [be2net]
+ be_vf_setup+0x4f7/0x990 [be2net]
+ be_pci_sriov_configure+0x3a1/0x470 [be2net]
+ sriov_numvfs_store+0x20b/0x380
+ kernfs_fop_write_iter+0x354/0x530
+ vfs_write+0x9b9/0xf60
+ ksys_write+0xf3/0x1d0
+ do_syscall_64+0x8c/0x3d0
+
+be_cmd_set_mac_list() calls dma_free_coherent() under a spin_lock_bh.
+Fix it by freeing only after the lock has been released.
+
+Fixes: 1a82d19ca2d6 ("be2net: fix sleeping while atomic bugs in be_ndo_bridge_getlink")
+Signed-off-by: Michal Schmidt <mschmidt@redhat.com>
+Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
+Link: https://patch.msgid.link/20250801101338.72502-1-mschmidt@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/emulex/benet/be_cmds.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
+index a89aa4ac0a06..779f1324bb5f 100644
+--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
++++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
+@@ -3852,8 +3852,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
+ status = be_mcc_notify_wait(adapter);
+
+ err:
+- dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ spin_unlock_bh(&adapter->mcc_lock);
++ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
+ return status;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From ffd801332d1578e3ff94323f2a0c681b61562dc0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 13:17:38 +0000
+Subject: ipv6: reject malicious packets in ipv6_gso_segment()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d45cf1e7d7180256e17c9ce88e32e8061a7887fe ]
+
+syzbot was able to craft a packet with very long IPv6 extension headers
+leading to an overflow of skb->transport_header.
+
+This 16bit field has a limited range.
+
+Add skb_reset_transport_header_careful() helper and use it
+from ipv6_gso_segment()
+
+WARNING: CPU: 0 PID: 5871 at ./include/linux/skbuff.h:3032 skb_reset_transport_header include/linux/skbuff.h:3032 [inline]
+WARNING: CPU: 0 PID: 5871 at ./include/linux/skbuff.h:3032 ipv6_gso_segment+0x15e2/0x21e0 net/ipv6/ip6_offload.c:151
+Modules linked in:
+CPU: 0 UID: 0 PID: 5871 Comm: syz-executor211 Not tainted 6.16.0-rc6-syzkaller-g7abc678e3084 #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2025
+ RIP: 0010:skb_reset_transport_header include/linux/skbuff.h:3032 [inline]
+ RIP: 0010:ipv6_gso_segment+0x15e2/0x21e0 net/ipv6/ip6_offload.c:151
+Call Trace:
+ <TASK>
+ skb_mac_gso_segment+0x31c/0x640 net/core/gso.c:53
+ nsh_gso_segment+0x54a/0xe10 net/nsh/nsh.c:110
+ skb_mac_gso_segment+0x31c/0x640 net/core/gso.c:53
+ __skb_gso_segment+0x342/0x510 net/core/gso.c:124
+ skb_gso_segment include/net/gso.h:83 [inline]
+ validate_xmit_skb+0x857/0x11b0 net/core/dev.c:3950
+ validate_xmit_skb_list+0x84/0x120 net/core/dev.c:4000
+ sch_direct_xmit+0xd3/0x4b0 net/sched/sch_generic.c:329
+ __dev_xmit_skb net/core/dev.c:4102 [inline]
+ __dev_queue_xmit+0x17b6/0x3a70 net/core/dev.c:4679
+
+Fixes: d1da932ed4ec ("ipv6: Separate ipv6 offload support")
+Reported-by: syzbot+af43e647fd835acc02df@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/688a1a05.050a0220.5d226.0008.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250730131738.3385939-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skbuff.h | 23 +++++++++++++++++++++++
+ net/ipv6/ip6_offload.c | 4 +++-
+ 2 files changed, 26 insertions(+), 1 deletion(-)
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index f7d392d849be..7b7222b4f611 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -2877,6 +2877,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb)
+ skb->transport_header = skb->data - skb->head;
+ }
+
++/**
++ * skb_reset_transport_header_careful - conditionally reset transport header
++ * @skb: buffer to alter
++ *
++ * Hardened version of skb_reset_transport_header().
++ *
++ * Returns: true if the operation was a success.
++ */
++static inline bool __must_check
++skb_reset_transport_header_careful(struct sk_buff *skb)
++{
++ long offset = skb->data - skb->head;
++
++ if (unlikely(offset != (typeof(skb->transport_header))offset))
++ return false;
++
++ if (unlikely(offset == (typeof(skb->transport_header))~0U))
++ return false;
++
++ skb->transport_header = offset;
++ return true;
++}
++
+ static inline void skb_set_transport_header(struct sk_buff *skb,
+ const int offset)
+ {
+diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
+index 7f014a8969fb..84b17eeaa57c 100644
+--- a/net/ipv6/ip6_offload.c
++++ b/net/ipv6/ip6_offload.c
+@@ -150,7 +150,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
+
+ ops = rcu_dereference(inet6_offloads[proto]);
+ if (likely(ops && ops->callbacks.gso_segment)) {
+- skb_reset_transport_header(skb);
++ if (!skb_reset_transport_header_careful(skb))
++ goto out;
++
+ segs = ops->callbacks.gso_segment(skb, features);
+ if (!segs)
+ skb->network_header = skb_mac_header(skb) + nhoff - skb->head;
+--
+2.39.5
+
--- /dev/null
+From a3a58c56c7fa9db6dacfb7a6769ea4195f99a24f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Aug 2025 18:09:49 +0200
+Subject: irqchip: Build IMX_MU_MSI only on ARM
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 3b6a18f0da8720d612d8a682ea5c55870da068e0 ]
+
+Compile-testing IMX_MU_MSI on x86 without PCI_MSI support results in a
+build failure:
+
+drivers/gpio/gpio-sprd.c:8:
+include/linux/gpio/driver.h:41:33: error: field 'msiinfo' has incomplete type
+drivers/iommu/iommufd/viommu.c:4:
+include/linux/msi.h:528:33: error: field 'alloc_info' has incomplete type
+
+Tighten the dependency further to only allow compile testing on Arm.
+This could be refined further to allow certain x86 configs.
+
+This was submitted before to address a different build failure, which was
+fixed differently, but the problem has now returned in a different form.
+
+Fixes: 70afdab904d2d1e6 ("irqchip: Add IMX MU MSI controller driver")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lore.kernel.org/all/20250805160952.4006075-1-arnd@kernel.org
+Link: https://lore.kernel.org/all/20221215164109.761427-1-arnd@kernel.org/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
+index e7b736800dd0..4ff91df76947 100644
+--- a/drivers/irqchip/Kconfig
++++ b/drivers/irqchip/Kconfig
+@@ -483,6 +483,7 @@ config IMX_MU_MSI
+ tristate "i.MX MU used as MSI controller"
+ depends on OF && HAS_IOMEM
+ depends on ARCH_MXC || COMPILE_TEST
++ depends on ARM || ARM64
+ default m if ARCH_MXC
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+--
+2.39.5
+
--- /dev/null
+From 3daf2fa3844e846998f274e039ed23add4277aec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 12:21:40 +0800
+Subject: md/md-cluster: handle REMOVE message earlier
+
+From: Heming Zhao <heming.zhao@suse.com>
+
+[ Upstream commit 948b1fe12005d39e2b49087b50e5ee55c9a8f76f ]
+
+Commit a1fd37f97808 ("md: Don't wait for MD_RECOVERY_NEEDED for
+HOT_REMOVE_DISK ioctl") introduced a regression in the md_cluster
+module. (Failed cases 02r1_Manage_re-add & 02r10_Manage_re-add)
+
+Consider a 2-node cluster:
+- node1 set faulty & remove command on a disk.
+- node2 must correctly update the array metadata.
+
+Before a1fd37f97808, on node1, the delay between msg:METADATA_UPDATED
+(triggered by faulty) and msg:REMOVE was sufficient for node2 to
+reload the disk info (written by node1).
+After a1fd37f97808, node1 no longer waits between faulty and remove,
+causing it to send msg:REMOVE while node2 is still reloading disk info.
+This often results in node2 failing to remove the faulty disk.
+
+== how to trigger ==
+
+set up a 2-node cluster (node1 & node2) with disks vdc & vdd.
+
+on node1:
+mdadm -CR /dev/md0 -l1 -b clustered -n2 /dev/vdc /dev/vdd --assume-clean
+ssh node2-ip mdadm -A /dev/md0 /dev/vdc /dev/vdd
+mdadm --manage /dev/md0 --fail /dev/vdc --remove /dev/vdc
+
+check array status on both nodes with "mdadm -D /dev/md0".
+node1 output:
+ Number Major Minor RaidDevice State
+ - 0 0 0 removed
+ 1 254 48 1 active sync /dev/vdd
+node2 output:
+ Number Major Minor RaidDevice State
+ - 0 0 0 removed
+ 1 254 48 1 active sync /dev/vdd
+
+ 0 254 32 - faulty /dev/vdc
+
+Fixes: a1fd37f97808 ("md: Don't wait for MD_RECOVERY_NEEDED for HOT_REMOVE_DISK ioctl")
+Signed-off-by: Heming Zhao <heming.zhao@suse.com>
+Reviewed-by: Su Yue <glass.su@suse.com>
+Link: https://lore.kernel.org/linux-raid/20250728042145.9989-1-heming.zhao@suse.com
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/md.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/md/md.c b/drivers/md/md.c
+index ca7ae3aad265..b086cbf24086 100644
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -9465,8 +9465,8 @@ void md_check_recovery(struct mddev *mddev)
+ * remove disk.
+ */
+ rdev_for_each_safe(rdev, tmp, mddev) {
+- if (test_and_clear_bit(ClusterRemove, &rdev->flags) &&
+- rdev->raid_disk < 0)
++ if (rdev->raid_disk < 0 &&
++ test_and_clear_bit(ClusterRemove, &rdev->flags))
+ md_kick_rdev_from_array(rdev);
+ }
+ }
+@@ -9813,8 +9813,11 @@ static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev)
+
+ /* Check for change of roles in the active devices */
+ rdev_for_each_safe(rdev2, tmp, mddev) {
+- if (test_bit(Faulty, &rdev2->flags))
++ if (test_bit(Faulty, &rdev2->flags)) {
++ if (test_bit(ClusterRemove, &rdev2->flags))
++ set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+ continue;
++ }
+
+ /* Check if the roles changed */
+ role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]);
+--
+2.39.5
+
--- /dev/null
+From 50374c525e1b277c868034dc130d67d73272cc8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 18:14:58 +0800
+Subject: net: drop UFO packets in udp_rcv_segment()
+
+From: Wang Liang <wangliang74@huawei.com>
+
+[ Upstream commit d46e51f1c78b9ab9323610feb14238d06d46d519 ]
+
+When sending a packet with virtio_net_hdr to tun device, if the gso_type
+in virtio_net_hdr is SKB_GSO_UDP and the gso_size is less than udphdr
+size, below crash may happen.
+
+ ------------[ cut here ]------------
+ kernel BUG at net/core/skbuff.c:4572!
+ Oops: invalid opcode: 0000 [#1] SMP NOPTI
+ CPU: 0 UID: 0 PID: 62 Comm: mytest Not tainted 6.16.0-rc7 #203 PREEMPT(voluntary)
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
+ RIP: 0010:skb_pull_rcsum+0x8e/0xa0
+ Code: 00 00 5b c3 cc cc cc cc 8b 93 88 00 00 00 f7 da e8 37 44 38 00 f7 d8 89 83 88 00 00 00 48 8b 83 c8 00 00 00 5b c3 cc cc cc cc <0f> 0b 0f 0b 66 66 2e 0f 1f 84 00 000
+ RSP: 0018:ffffc900001fba38 EFLAGS: 00000297
+ RAX: 0000000000000004 RBX: ffff8880040c1000 RCX: ffffc900001fb948
+ RDX: ffff888003e6d700 RSI: 0000000000000008 RDI: ffff88800411a062
+ RBP: ffff8880040c1000 R08: 0000000000000000 R09: 0000000000000001
+ R10: ffff888003606c00 R11: 0000000000000001 R12: 0000000000000000
+ R13: ffff888004060900 R14: ffff888004050000 R15: ffff888004060900
+ FS: 000000002406d3c0(0000) GS:ffff888084a19000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 0000000020000040 CR3: 0000000004007000 CR4: 00000000000006f0
+ Call Trace:
+ <TASK>
+ udp_queue_rcv_one_skb+0x176/0x4b0 net/ipv4/udp.c:2445
+ udp_queue_rcv_skb+0x155/0x1f0 net/ipv4/udp.c:2475
+ udp_unicast_rcv_skb+0x71/0x90 net/ipv4/udp.c:2626
+ __udp4_lib_rcv+0x433/0xb00 net/ipv4/udp.c:2690
+ ip_protocol_deliver_rcu+0xa6/0x160 net/ipv4/ip_input.c:205
+ ip_local_deliver_finish+0x72/0x90 net/ipv4/ip_input.c:233
+ ip_sublist_rcv_finish+0x5f/0x70 net/ipv4/ip_input.c:579
+ ip_sublist_rcv+0x122/0x1b0 net/ipv4/ip_input.c:636
+ ip_list_rcv+0xf7/0x130 net/ipv4/ip_input.c:670
+ __netif_receive_skb_list_core+0x21d/0x240 net/core/dev.c:6067
+ netif_receive_skb_list_internal+0x186/0x2b0 net/core/dev.c:6210
+ napi_complete_done+0x78/0x180 net/core/dev.c:6580
+ tun_get_user+0xa63/0x1120 drivers/net/tun.c:1909
+ tun_chr_write_iter+0x65/0xb0 drivers/net/tun.c:1984
+ vfs_write+0x300/0x420 fs/read_write.c:593
+ ksys_write+0x60/0xd0 fs/read_write.c:686
+ do_syscall_64+0x50/0x1c0 arch/x86/entry/syscall_64.c:63
+ </TASK>
+
+To trigger gso segment in udp_queue_rcv_skb(), we should also set option
+UDP_ENCAP_ESPINUDP to enable udp_sk(sk)->encap_rcv. When the encap_rcv
+hook return 1 in udp_queue_rcv_one_skb(), udp_csum_pull_header() will try
+to pull udphdr, but the skb size has been segmented to gso size, which
+leads to this crash.
+
+Previous commit cf329aa42b66 ("udp: cope with UDP GRO packet misdirection")
+introduces segmentation in UDP receive path only for GRO, which was never
+intended to be used for UFO, so drop UFO packets in udp_rcv_segment().
+
+Link: https://lore.kernel.org/netdev/20250724083005.3918375-1-wangliang74@huawei.com/
+Link: https://lore.kernel.org/netdev/20250729123907.3318425-1-wangliang74@huawei.com/
+Fixes: cf329aa42b66 ("udp: cope with UDP GRO packet misdirection")
+Suggested-by: Willem de Bruijn <willemdebruijn.kernel@gmail.com>
+Signed-off-by: Wang Liang <wangliang74@huawei.com>
+Reviewed-by: Willem de Bruijn <willemb@google.com>
+Link: https://patch.msgid.link/20250730101458.3470788-1-wangliang74@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/udp.h | 24 ++++++++++++++++++------
+ 1 file changed, 18 insertions(+), 6 deletions(-)
+
+diff --git a/include/net/udp.h b/include/net/udp.h
+index 488a6d2babcc..89eeb187667b 100644
+--- a/include/net/udp.h
++++ b/include/net/udp.h
+@@ -466,6 +466,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ {
+ netdev_features_t features = NETIF_F_SG;
+ struct sk_buff *segs;
++ int drop_count;
++
++ /*
++ * Segmentation in UDP receive path is only for UDP GRO, drop udp
++ * fragmentation offload (UFO) packets.
++ */
++ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) {
++ drop_count = 1;
++ goto drop;
++ }
+
+ /* Avoid csum recalculation by skb_segment unless userspace explicitly
+ * asks for the final checksum values
+@@ -489,16 +499,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
+ */
+ segs = __skb_gso_segment(skb, features, false);
+ if (IS_ERR_OR_NULL(segs)) {
+- int segs_nr = skb_shinfo(skb)->gso_segs;
+-
+- atomic_add(segs_nr, &sk->sk_drops);
+- SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr);
+- kfree_skb(skb);
+- return NULL;
++ drop_count = skb_shinfo(skb)->gso_segs;
++ goto drop;
+ }
+
+ consume_skb(skb);
+ return segs;
++
++drop:
++ atomic_add(drop_count, &sk->sk_drops);
++ SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count);
++ kfree_skb(skb);
++ return NULL;
+ }
+
+ static inline void udp_post_segment_fix_csum(struct sk_buff *skb)
+--
+2.39.5
+
--- /dev/null
+From 8d35eb3b78a736fa9686b61f19e813c0ec9bb64b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 28 Jul 2025 10:35:24 +0200
+Subject: net: ipa: add IPA v5.1 and v5.5 to ipa_version_string()
+
+From: Luca Weiss <luca.weiss@fairphone.com>
+
+[ Upstream commit f2aa00e4f65efcf25ff6bc8198e21f031e7b9b1b ]
+
+Handle the case for v5.1 and v5.5 instead of returning "0.0".
+
+Also reword the comment below since I don't see any evidence of such a
+check happening, and - since 5.5 has been missing - can happen.
+
+Fixes: 3aac8ec1c028 ("net: ipa: add some new IPA versions")
+Signed-off-by: Luca Weiss <luca.weiss@fairphone.com>
+Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Reviewed-by: Alex Elder <elder@riscstar.com>
+Link: https://patch.msgid.link/20250728-ipa-5-1-5-5-version_string-v1-1-d7a5623d7ece@fairphone.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipa/ipa_sysfs.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
+index 2ff09ce343b7..2e676b9d4042 100644
+--- a/drivers/net/ipa/ipa_sysfs.c
++++ b/drivers/net/ipa/ipa_sysfs.c
+@@ -38,8 +38,12 @@ static const char *ipa_version_string(struct ipa *ipa)
+ return "4.11";
+ case IPA_VERSION_5_0:
+ return "5.0";
++ case IPA_VERSION_5_1:
++ return "5.1";
++ case IPA_VERSION_5_5:
++ return "5.5";
+ default:
+- return "0.0"; /* Won't happen (checked at probe time) */
++ return "0.0"; /* Should not happen */
+ }
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 3d89508946832d73f8fb7881d8a1917c6aeb7146 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 11:34:00 -0700
+Subject: net/mlx5: Correctly set gso_segs when LRO is used
+
+From: Christoph Paasch <cpaasch@openai.com>
+
+[ Upstream commit 77bf1c55b2acc7fa3734b14f4561e3d75aea1a90 ]
+
+When gso_segs is left at 0, a number of assumptions will end up being
+incorrect throughout the stack.
+
+For example, in the GRO-path, we set NAPI_GRO_CB()->count to gso_segs.
+So, if a non-LRO'ed packet followed by an LRO'ed packet is being
+processed in GRO, the first one will have NAPI_GRO_CB()->count set to 1 and
+the next one to 0 (in dev_gro_receive()).
+Since commit 531d0d32de3e
+("net/mlx5: Correctly set gso_size when LRO is used")
+these packets will get merged (as their gso_size now matches).
+So, we end up in gro_complete() with NAPI_GRO_CB()->count == 1 and thus
+don't call inet_gro_complete(). Meaning, checksum-validation in
+tcp_checksum_complete() will fail with a "hw csum failure".
+
+Even before the above mentioned commit, incorrect gso_segs means that other
+things like TCP's accounting of incoming packets (tp->segs_in,
+data_segs_in, rcv_ooopack) will be incorrect. Which means that if one
+does bytes_received/data_segs_in, the result will be bigger than the
+MTU.
+
+Fix this by initializing gso_segs correctly when LRO is used.
+
+Fixes: e586b3b0baee ("net/mlx5: Ethernet Datapath files")
+Reported-by: Gal Pressman <gal@nvidia.com>
+Closes: https://lore.kernel.org/netdev/6583783f-f0fb-4fb1-a415-feec8155bc69@nvidia.com/
+Signed-off-by: Christoph Paasch <cpaasch@openai.com>
+Reviewed-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250729-mlx5_gso_segs-v1-1-b48c480c1c12@openai.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+index d5731f7be04f..8278395ee20a 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+@@ -1573,6 +1573,7 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
+
+ skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
++ skb_shinfo(skb)->gso_segs = lro_num_seg;
+ /* Subtract one since we already counted this as one
+ * "regular" packet in mlx5e_complete_rx_cqe()
+ */
+--
+2.39.5
+
--- /dev/null
+From bfc92687bb1c5a4cab6673f38d1c0e194009d7a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 Aug 2025 17:18:57 -0700
+Subject: net/sched: mqprio: fix stack out-of-bounds write in tc entry parsing
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maher Azzouzi <maherazz04@gmail.com>
+
+[ Upstream commit ffd2dc4c6c49ff4f1e5d34e454a6a55608104c17 ]
+
+TCA_MQPRIO_TC_ENTRY_INDEX is validated using
+NLA_POLICY_MAX(NLA_U32, TC_QOPT_MAX_QUEUE), which allows the value
+TC_QOPT_MAX_QUEUE (16). This leads to a 4-byte out-of-bounds stack
+write in the fp[] array, which only has room for 16 elements (0–15).
+
+Fix this by changing the policy to allow only up to TC_QOPT_MAX_QUEUE - 1.
+
+Fixes: f62af20bed2d ("net/sched: mqprio: allow per-TC user input of FP adminStatus")
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Maher Azzouzi <maherazz04@gmail.com>
+Reviewed-by: Vladimir Oltean <vladimir.oltean@nxp.com>
+Link: https://patch.msgid.link/20250802001857.2702497-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_mqprio.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c
+index 793009f445c0..a0e3f3bae536 100644
+--- a/net/sched/sch_mqprio.c
++++ b/net/sched/sch_mqprio.c
+@@ -152,7 +152,7 @@ static int mqprio_parse_opt(struct net_device *dev, struct tc_mqprio_qopt *qopt,
+ static const struct
+ nla_policy mqprio_tc_entry_policy[TCA_MQPRIO_TC_ENTRY_MAX + 1] = {
+ [TCA_MQPRIO_TC_ENTRY_INDEX] = NLA_POLICY_MAX(NLA_U32,
+- TC_QOPT_MAX_QUEUE),
++ TC_QOPT_MAX_QUEUE - 1),
+ [TCA_MQPRIO_TC_ENTRY_FP] = NLA_POLICY_RANGE(NLA_U32,
+ TC_FP_EXPRESS,
+ TC_FP_PREEMPTIBLE),
+--
+2.39.5
+
--- /dev/null
+From 6a63b8ee170de6a2bc815380d7804c8b8f7b0e64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 02:31:49 +0900
+Subject: net/sched: taprio: enforce minimum value for picos_per_byte
+
+From: Takamitsu Iwai <takamitz@amazon.co.jp>
+
+[ Upstream commit ae8508b25def57982493c48694ef135973bfabe0 ]
+
+Syzbot reported a WARNING in taprio_get_start_time().
+
+When link speed is 470,589 or greater, q->picos_per_byte becomes too
+small, causing length_to_duration(q, ETH_ZLEN) to return zero.
+
+This zero value leads to validation failures in fill_sched_entry() and
+parse_taprio_schedule(), allowing arbitrary values to be assigned to
+entry->interval and cycle_time. As a result, sched->cycle can become zero.
+
+Since SPEED_800000 is the largest defined speed in
+include/uapi/linux/ethtool.h, this issue can occur in realistic scenarios.
+
+To ensure length_to_duration() returns a non-zero value for minimum-sized
+Ethernet frames (ETH_ZLEN = 60), picos_per_byte must be at least 17
+(60 * 17 > PSEC_PER_NSEC which is 1000).
+
+This patch enforces a minimum value of 17 for picos_per_byte when the
+calculated value would be lower, and adds a warning message to inform
+users that scheduling accuracy may be affected at very high link speeds.
+
+Fixes: fb66df20a720 ("net/sched: taprio: extend minimum interval restriction to entire cycle too")
+Reported-by: syzbot+398e1ee4ca2cac05fddb@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=398e1ee4ca2cac05fddb
+Signed-off-by: Takamitsu Iwai <takamitz@amazon.co.jp>
+Link: https://patch.msgid.link/20250728173149.45585-1-takamitz@amazon.co.jp
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_taprio.c | 21 ++++++++++++++++++---
+ 1 file changed, 18 insertions(+), 3 deletions(-)
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index d162e2dd8602..a01d17d03bf5 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -41,6 +41,11 @@ static struct static_key_false taprio_have_working_mqprio;
+ #define TXTIME_ASSIST_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_TXTIME_ASSIST)
+ #define FULL_OFFLOAD_IS_ENABLED(flags) ((flags) & TCA_TAPRIO_ATTR_FLAG_FULL_OFFLOAD)
+ #define TAPRIO_FLAGS_INVALID U32_MAX
++/* Minimum value for picos_per_byte to ensure non-zero duration
++ * for minimum-sized Ethernet frames (ETH_ZLEN = 60).
++ * 60 * 17 > PSEC_PER_NSEC (1000)
++ */
++#define TAPRIO_PICOS_PER_BYTE_MIN 17
+
+ struct sched_entry {
+ /* Durations between this GCL entry and the GCL entry where the
+@@ -1294,7 +1299,8 @@ static void taprio_start_sched(struct Qdisc *sch,
+ }
+
+ static void taprio_set_picos_per_byte(struct net_device *dev,
+- struct taprio_sched *q)
++ struct taprio_sched *q,
++ struct netlink_ext_ack *extack)
+ {
+ struct ethtool_link_ksettings ecmd;
+ int speed = SPEED_10;
+@@ -1310,6 +1316,15 @@ static void taprio_set_picos_per_byte(struct net_device *dev,
+
+ skip:
+ picos_per_byte = (USEC_PER_SEC * 8) / speed;
++ if (picos_per_byte < TAPRIO_PICOS_PER_BYTE_MIN) {
++ if (!extack)
++ pr_warn("Link speed %d is too high. Schedule may be inaccurate.\n",
++ speed);
++ NL_SET_ERR_MSG_FMT_MOD(extack,
++ "Link speed %d is too high. Schedule may be inaccurate.",
++ speed);
++ picos_per_byte = TAPRIO_PICOS_PER_BYTE_MIN;
++ }
+
+ atomic64_set(&q->picos_per_byte, picos_per_byte);
+ netdev_dbg(dev, "taprio: set %s's picos_per_byte to: %lld, linkspeed: %d\n",
+@@ -1334,7 +1349,7 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
+ if (dev != qdisc_dev(q->root))
+ continue;
+
+- taprio_set_picos_per_byte(dev, q);
++ taprio_set_picos_per_byte(dev, q, NULL);
+
+ stab = rtnl_dereference(q->root->stab);
+
+@@ -1871,7 +1886,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt,
+ q->flags = err;
+
+ /* Needed for length_to_duration() during netlink attribute parsing */
+- taprio_set_picos_per_byte(dev, q);
++ taprio_set_picos_per_byte(dev, q, extack);
+
+ err = taprio_parse_mqprio_opt(dev, mqprio, extack, q->flags);
+ if (err < 0)
+--
+2.39.5
+
--- /dev/null
+From 2a2e66c8b22804a57d0ba353c9d3493ad416a735 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 30 Jul 2025 10:21:37 -0700
+Subject: netlink: specs: ethtool: fix module EEPROM input/output arguments
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 01051012887329ea78eaca19b1d2eac4c9f601b5 ]
+
+Module (SFP) eeprom GET has a lot of input params, they are all
+mistakenly listed as output in the spec. Looks like kernel doesn't
+output them at all. Correct what are the inputs and what the outputs.
+
+Reported-by: Duo Yi <duo@meta.com>
+Fixes: a353318ebf24 ("tools: ynl: populate most of the ethtool spec")
+Acked-by: Stanislav Fomichev <sdf@fomichev.me>
+Link: https://patch.msgid.link/20250730172137.1322351-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/netlink/specs/ethtool.yaml | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml
+index 3e38f6956793..b463949736c5 100644
+--- a/Documentation/netlink/specs/ethtool.yaml
++++ b/Documentation/netlink/specs/ethtool.yaml
+@@ -1489,9 +1489,6 @@ operations:
+
+ do: &module-eeprom-get-op
+ request:
+- attributes:
+- - header
+- reply:
+ attributes:
+ - header
+ - offset
+@@ -1499,6 +1496,9 @@ operations:
+ - page
+ - bank
+ - i2c-address
++ reply:
++ attributes:
++ - header
+ - data
+ dump: *module-eeprom-get-op
+ -
+--
+2.39.5
+
--- /dev/null
+From 75d0871ba6ced8bc20884057e22fdcfff4d0d2fd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 25 Jul 2025 18:08:46 -0700
+Subject: netpoll: prevent hanging NAPI when netcons gets enabled
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 2da4def0f487f24bbb0cece3bb2bcdcb918a0b72 ]
+
+Paolo spotted hangs in NIPA running driver tests against virtio.
+The tests hang in virtnet_close() -> virtnet_napi_tx_disable().
+
+The problem is only reproducible if running multiple of our tests
+in sequence (I used TEST_PROGS="xdp.py ping.py netcons_basic.sh \
+netpoll_basic.py stats.py"). Initial suspicion was that this is
+a simple case of double-disable of NAPI, but instrumenting the
+code reveals:
+
+ Deadlocked on NAPI ffff888007cd82c0 (virtnet_poll_tx):
+ state: 0x37, disabled: false, owner: 0, listed: false, weight: 64
+
+The NAPI was not in fact disabled, owner is 0 (rather than -1),
+so the NAPI "thinks" it's scheduled for CPU 0 but it's not listed
+(!list_empty(&n->poll_list) => false). It seems odd that normal NAPI
+processing would wedge itself like this.
+
+Better suspicion is that netpoll gets enabled while NAPI is polling,
+and also grabs the NAPI instance. This confuses napi_complete_done():
+
+ [netpoll] [normal NAPI]
+ napi_poll()
+ have = netpoll_poll_lock()
+ rcu_access_pointer(dev->npinfo)
+ return NULL # no netpoll
+ __napi_poll()
+ ->poll(->weight)
+ poll_napi()
+ cmpxchg(->poll_owner, -1, cpu)
+ poll_one_napi()
+ set_bit(NAPI_STATE_NPSVC, ->state)
+ napi_complete_done()
+ if (NAPIF_STATE_NPSVC)
+ return false
+ # exit without clearing SCHED
+
+This feels very unlikely, but perhaps virtio has some interactions
+with the hypervisor in the NAPI ->poll that makes the race window
+larger?
+
+Best I could to to prove the theory was to add and trigger this
+warning in napi_poll (just before netpoll_poll_unlock()):
+
+ WARN_ONCE(!have && rcu_access_pointer(n->dev->npinfo) &&
+ napi_is_scheduled(n) && list_empty(&n->poll_list),
+ "NAPI race with netpoll %px", n);
+
+If this warning hits the next virtio_close() will hang.
+
+This patch survived 30 test iterations without a hang (without it
+the longest clean run was around 10). Credit for triggering this
+goes to Breno's recent netconsole tests.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: Paolo Abeni <pabeni@redhat.com>
+Link: https://lore.kernel.org/c5a93ed1-9abe-4880-a3bb-8d1678018b1d@redhat.com
+Acked-by: Jason Wang <jasowang@redhat.com>
+Reviewed-by: Xuan Zhuo <xuanzhuo@linux.alibaba.com>
+Link: https://patch.msgid.link/20250726010846.1105875-1-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/netpoll.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/net/core/netpoll.c b/net/core/netpoll.c
+index 1a4d2a61b060..2bdb1e84c6c8 100644
+--- a/net/core/netpoll.c
++++ b/net/core/netpoll.c
+@@ -791,6 +791,13 @@ int netpoll_setup(struct netpoll *np)
+ if (err)
+ goto put;
+ rtnl_unlock();
++
++ /* Make sure all NAPI polls which started before dev->npinfo
++ * was visible have exited before we start calling NAPI poll.
++ * NAPI skips locking if dev->npinfo is NULL.
++ */
++ synchronize_rcu();
++
+ return 0;
+
+ put:
+--
+2.39.5
+
--- /dev/null
+From 23a93b7c2db76c9d3e2d5414a0858b7bca8c7456 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Jul 2025 09:24:58 -0400
+Subject: NFS: Fix filehandle bounds checking in nfs_fh_to_dentry()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit ef93a685e01a281b5e2a25ce4e3428cf9371a205 ]
+
+The function needs to check the minimal filehandle length before it can
+access the embedded filehandle.
+
+Reported-by: zhangjian <zhangjian496@huawei.com>
+Fixes: 20fa19027286 ("nfs: add export operations")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/export.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/fs/nfs/export.c b/fs/nfs/export.c
+index be686b8e0c54..aeb17adcb2b6 100644
+--- a/fs/nfs/export.c
++++ b/fs/nfs/export.c
+@@ -66,14 +66,21 @@ nfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
+ {
+ struct nfs_fattr *fattr = NULL;
+ struct nfs_fh *server_fh = nfs_exp_embedfh(fid->raw);
+- size_t fh_size = offsetof(struct nfs_fh, data) + server_fh->size;
++ size_t fh_size = offsetof(struct nfs_fh, data);
+ const struct nfs_rpc_ops *rpc_ops;
+ struct dentry *dentry;
+ struct inode *inode;
+- int len = EMBED_FH_OFF + XDR_QUADLEN(fh_size);
++ int len = EMBED_FH_OFF;
+ u32 *p = fid->raw;
+ int ret;
+
++ /* Initial check of bounds */
++ if (fh_len < len + XDR_QUADLEN(fh_size) ||
++ fh_len > XDR_QUADLEN(NFS_MAXFHSIZE))
++ return NULL;
++ /* Calculate embedded filehandle size */
++ fh_size += server_fh->size;
++ len += XDR_QUADLEN(fh_size);
+ /* NULL translates to ESTALE */
+ if (fh_len < len || fh_type != len)
+ return NULL;
+--
+2.39.5
+
--- /dev/null
+From 5d10804815d1af8e4536714e4e700be6cf915efe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 18 Jul 2025 16:15:27 -0700
+Subject: NFS: Fix wakeup of __nfs_lookup_revalidate() in unblock_revalidate()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 1db3a48e83bb64a70bf27263b7002585574a9c2d ]
+
+Use store_release_wake_up() to add the appropriate memory barrier before
+calling wake_up_var(&dentry->d_fsdata).
+
+Reported-by: Lukáš Hejtmánek<xhejtman@ics.muni.cz>
+Suggested-by: Santosh Pradhan <santosh.pradhan@gmail.com>
+Link: https://lore.kernel.org/all/18945D18-3EDB-4771-B019-0335CE671077@ics.muni.cz/
+Fixes: 99bc9f2eb3f7 ("NFS: add barriers when testing for NFS_FSDATA_BLOCKED")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/dir.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
+index 389186384235..385baf871800 100644
+--- a/fs/nfs/dir.c
++++ b/fs/nfs/dir.c
+@@ -1835,9 +1835,7 @@ static void block_revalidate(struct dentry *dentry)
+
+ static void unblock_revalidate(struct dentry *dentry)
+ {
+- /* store_release ensures wait_var_event() sees the update */
+- smp_store_release(&dentry->d_fsdata, NULL);
+- wake_up_var(&dentry->d_fsdata);
++ store_release_wake_up(&dentry->d_fsdata, NULL);
+ }
+
+ /*
+--
+2.39.5
+
--- /dev/null
+From c4d599a09c2c9ebbd4114b6db63f021494668745 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Jul 2025 21:47:43 -0400
+Subject: NFS: Fixup allocation flags for nfsiod's __GFP_NORETRY
+
+From: Benjamin Coddington <bcodding@redhat.com>
+
+[ Upstream commit 99765233ab42bf7a4950377ad7894dce8a5c0e60 ]
+
+If the NFS client is doing writeback from a workqueue context, avoid using
+__GFP_NORETRY for allocations if the task has set PF_MEMALLOC_NOIO or
+PF_MEMALLOC_NOFS. The combination of these flags makes memory allocation
+failures much more likely.
+
+We've seen those allocation failures show up when the loopback driver is
+doing writeback from a workqueue to a file on NFS, where memory allocation
+failure results in errors or corruption within the loopback device's
+filesystem.
+
+Suggested-by: Trond Myklebust <trondmy@kernel.org>
+Fixes: 0bae835b63c5 ("NFS: Avoid writeback threads getting stuck in mempool_alloc()")
+Signed-off-by: Benjamin Coddington <bcodding@redhat.com>
+Reviewed-by: Laurence Oberman <loberman@redhat.com>
+Tested-by: Laurence Oberman <loberman@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Link: https://lore.kernel.org/r/f83ac1155a4bc670f2663959a7a068571e06afd9.1752111622.git.bcodding@redhat.com
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/internal.h | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
+index c29ad2e1d416..8870c72416ac 100644
+--- a/fs/nfs/internal.h
++++ b/fs/nfs/internal.h
+@@ -613,9 +613,12 @@ nfs_write_match_verf(const struct nfs_writeverf *verf,
+
+ static inline gfp_t nfs_io_gfp_mask(void)
+ {
+- if (current->flags & PF_WQ_WORKER)
+- return GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN;
+- return GFP_KERNEL;
++ gfp_t ret = current_gfp_context(GFP_KERNEL);
++
++ /* For workers __GFP_NORETRY only with __GFP_IO or __GFP_FS */
++ if ((current->flags & PF_WQ_WORKER) && ret == GFP_KERNEL)
++ ret |= __GFP_NORETRY | __GFP_NOWARN;
++ return ret;
+ }
+
+ /*
+--
+2.39.5
+
--- /dev/null
+From ab4493e814116f22cc193fb3af6b70668241459e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 22 Jul 2025 16:56:41 -0400
+Subject: NFSv4.2: another fix for listxattr
+
+From: Olga Kornievskaia <okorniev@redhat.com>
+
+[ Upstream commit 9acb237deff7667b0f6b10fe6b1b70c4429ea049 ]
+
+Currently, when the server supports NFS4.1 security labels then
+security.selinux label in included twice. Instead, only add it
+when the server doesn't possess security label support.
+
+Fixes: 243fea134633 ("NFSv4.2: fix listxattr to return selinux security label")
+Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
+Link: https://lore.kernel.org/r/20250722205641.79394-1-okorniev@redhat.com
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4proc.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
+index 3085a2faab2d..89d88d37e0cc 100644
+--- a/fs/nfs/nfs4proc.c
++++ b/fs/nfs/nfs4proc.c
+@@ -10630,7 +10630,7 @@ const struct nfs4_minor_version_ops *nfs_v4_minor_ops[] = {
+
+ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ {
+- ssize_t error, error2, error3, error4;
++ ssize_t error, error2, error3, error4 = 0;
+ size_t left = size;
+
+ error = generic_listxattr(dentry, list, left);
+@@ -10658,9 +10658,11 @@ static ssize_t nfs4_listxattr(struct dentry *dentry, char *list, size_t size)
+ left -= error3;
+ }
+
+- error4 = security_inode_listsecurity(d_inode(dentry), list, left);
+- if (error4 < 0)
+- return error4;
++ if (!nfs_server_capable(d_inode(dentry), NFS_CAP_SECURITY_LABEL)) {
++ error4 = security_inode_listsecurity(d_inode(dentry), list, left);
++ if (error4 < 0)
++ return error4;
++ }
+
+ error += error2 + error3 + error4;
+ if (size && error > size)
+--
+2.39.5
+
--- /dev/null
+From 73e6a254423baf1c2f22974766b661b06133f959 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 26 Jul 2025 16:03:07 +0200
+Subject: phy: mscc: Fix parsing of unicast frames
+
+From: Horatiu Vultur <horatiu.vultur@microchip.com>
+
+[ Upstream commit 6fb5ff63b35b7e849cc8510957f25753f87f63d2 ]
+
+According to the 1588 standard, it is possible to use both unicast and
+multicast frames to send the PTP information. It was noticed that if the
+frames were unicast they were not processed by the analyzer meaning that
+they were not timestamped. Therefore fix this to match also these
+unicast frames.
+
+Fixes: ab2bf9339357 ("net: phy: mscc: 1588 block initialization")
+Signed-off-by: Horatiu Vultur <horatiu.vultur@microchip.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://patch.msgid.link/20250726140307.3039694-1-horatiu.vultur@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/mscc/mscc_ptp.c | 1 +
+ drivers/net/phy/mscc/mscc_ptp.h | 1 +
+ 2 files changed, 2 insertions(+)
+
+diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c
+index 7e7ce79eadff..d0bd6ab45ebe 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.c
++++ b/drivers/net/phy/mscc/mscc_ptp.c
+@@ -897,6 +897,7 @@ static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
+ get_unaligned_be32(ptp_multicast));
+ } else {
+ val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
++ val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
+ vsc85xx_ts_write_csr(phydev, blk,
+ MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
+ vsc85xx_ts_write_csr(phydev, blk,
+diff --git a/drivers/net/phy/mscc/mscc_ptp.h b/drivers/net/phy/mscc/mscc_ptp.h
+index da3465360e90..ae9ad925bfa8 100644
+--- a/drivers/net/phy/mscc/mscc_ptp.h
++++ b/drivers/net/phy/mscc/mscc_ptp.h
+@@ -98,6 +98,7 @@
+ #define MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(x) (MSCC_ANA_ETH1_FLOW_ENA(x) + 3)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_MASK_MASK GENMASK(22, 20)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST 0x400000
++#define ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST 0x200000
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR 0x100000
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST_MASK GENMASK(17, 16)
+ #define ANA_ETH1_FLOW_ADDR_MATCH2_SRC_DEST 0x020000
+--
+2.39.5
+
--- /dev/null
+From 98f5ea7642dfddc03ccda3ba82deee1b327e1a1f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Jun 2025 09:17:51 +0200
+Subject: pNFS/flexfiles: don't attempt pnfs on fatal DS errors
+
+From: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+
+[ Upstream commit f06bedfa62d57f7b67d44aacd6badad2e13a803f ]
+
+When an applications get killed (SIGTERM/SIGINT) while pNFS client performs a connection
+to DS, client ends in an infinite loop of connect-disconnect. This
+source of the issue, it that flexfilelayoutdev#nfs4_ff_layout_prepare_ds gets an error
+on nfs4_pnfs_ds_connect with status ERESTARTSYS, which is set by rpc_signal_task, but
+the error is treated as transient, thus retried.
+
+The issue is reproducible with Ctrl+C the following script(there should be ~1000 files in
+a directory, client should must not have any connections to DSes):
+
+```
+echo 3 > /proc/sys/vm/drop_caches
+
+for i in *
+do
+ head -1 $i
+done
+```
+
+The change aims to propagate the nfs4_ff_layout_prepare_ds error state
+to the caller that can decide whatever this is a retryable error or not.
+
+Signed-off-by: Tigran Mkrtchyan <tigran.mkrtchyan@desy.de>
+Link: https://lore.kernel.org/r/20250627071751.189663-1-tigran.mkrtchyan@desy.de
+Fixes: 260f32adb88d ("pNFS/flexfiles: Check the result of nfs4_pnfs_ds_connect")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/flexfilelayout/flexfilelayout.c | 26 ++++++++++++++---------
+ fs/nfs/flexfilelayout/flexfilelayoutdev.c | 6 +++---
+ 2 files changed, 19 insertions(+), 13 deletions(-)
+
+diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
+index 0a26444fe202..7354b6b10478 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayout.c
++++ b/fs/nfs/flexfilelayout/flexfilelayout.c
+@@ -745,14 +745,14 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+ {
+ struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg);
+ struct nfs4_ff_layout_mirror *mirror;
+- struct nfs4_pnfs_ds *ds;
++ struct nfs4_pnfs_ds *ds = ERR_PTR(-EAGAIN);
+ u32 idx;
+
+ /* mirrors are initially sorted by efficiency */
+ for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) {
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
+- if (!ds)
++ if (IS_ERR(ds))
+ continue;
+
+ if (check_device &&
+@@ -760,10 +760,10 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg,
+ continue;
+
+ *best_idx = idx;
+- return ds;
++ break;
+ }
+
+- return NULL;
++ return ds;
+ }
+
+ static struct nfs4_pnfs_ds *
+@@ -933,7 +933,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio,
+ for (i = 0; i < pgio->pg_mirror_count; i++) {
+ mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i);
+ ds = nfs4_ff_layout_prepare_ds(pgio->pg_lseg, mirror, true);
+- if (!ds) {
++ if (IS_ERR(ds)) {
+ if (!ff_layout_no_fallback_to_mds(pgio->pg_lseg))
+ goto out_mds;
+ pnfs_generic_pg_cleanup(pgio);
+@@ -1839,6 +1839,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+ u32 idx = hdr->pgio_mirror_idx;
+ int vers;
+ struct nfs_fh *fh;
++ bool ds_fatal_error = false;
+
+ dprintk("--> %s ino %lu pgbase %u req %zu@%llu\n",
+ __func__, hdr->inode->i_ino,
+@@ -1846,8 +1847,10 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, false);
+- if (!ds)
++ if (IS_ERR(ds)) {
++ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
+ goto out_failed;
++ }
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+ hdr->inode);
+@@ -1888,7 +1891,7 @@ ff_layout_read_pagelist(struct nfs_pgio_header *hdr)
+ return PNFS_ATTEMPTED;
+
+ out_failed:
+- if (ff_layout_avoid_mds_available_ds(lseg))
++ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
+ return PNFS_TRY_AGAIN;
+ trace_pnfs_mds_fallback_read_pagelist(hdr->inode,
+ hdr->args.offset, hdr->args.count,
+@@ -1909,11 +1912,14 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+ int vers;
+ struct nfs_fh *fh;
+ u32 idx = hdr->pgio_mirror_idx;
++ bool ds_fatal_error = false;
+
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
+- if (!ds)
++ if (IS_ERR(ds)) {
++ ds_fatal_error = nfs_error_is_fatal(PTR_ERR(ds));
+ goto out_failed;
++ }
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+ hdr->inode);
+@@ -1956,7 +1962,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync)
+ return PNFS_ATTEMPTED;
+
+ out_failed:
+- if (ff_layout_avoid_mds_available_ds(lseg))
++ if (ff_layout_avoid_mds_available_ds(lseg) && !ds_fatal_error)
+ return PNFS_TRY_AGAIN;
+ trace_pnfs_mds_fallback_write_pagelist(hdr->inode,
+ hdr->args.offset, hdr->args.count,
+@@ -1998,7 +2004,7 @@ static int ff_layout_initiate_commit(struct nfs_commit_data *data, int how)
+ idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
+ mirror = FF_LAYOUT_COMP(lseg, idx);
+ ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true);
+- if (!ds)
++ if (IS_ERR(ds))
+ goto out_err;
+
+ ds_clnt = nfs4_ff_find_or_create_ds_client(mirror, ds->ds_clp,
+diff --git a/fs/nfs/flexfilelayout/flexfilelayoutdev.c b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+index d21c5ecfbf1c..95d5dca67145 100644
+--- a/fs/nfs/flexfilelayout/flexfilelayoutdev.c
++++ b/fs/nfs/flexfilelayout/flexfilelayoutdev.c
+@@ -370,11 +370,11 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+ struct nfs4_ff_layout_mirror *mirror,
+ bool fail_return)
+ {
+- struct nfs4_pnfs_ds *ds = NULL;
++ struct nfs4_pnfs_ds *ds;
+ struct inode *ino = lseg->pls_layout->plh_inode;
+ struct nfs_server *s = NFS_SERVER(ino);
+ unsigned int max_payload;
+- int status;
++ int status = -EAGAIN;
+
+ if (!ff_layout_init_mirror_ds(lseg->pls_layout, mirror))
+ goto noconnect;
+@@ -412,7 +412,7 @@ nfs4_ff_layout_prepare_ds(struct pnfs_layout_segment *lseg,
+ ff_layout_send_layouterror(lseg);
+ if (fail_return || !ff_layout_has_available_ds(lseg))
+ pnfs_error_mark_layout_for_return(ino, lseg);
+- ds = NULL;
++ ds = ERR_PTR(status);
+ out:
+ return ds;
+ }
+--
+2.39.5
+
--- /dev/null
+From 4d0b1d2cc112dedf978a6a535cba94e397f6e3b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Jul 2025 08:02:07 +0000
+Subject: pptp: ensure minimal skb length in pptp_xmit()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit de9c4861fb42f0cd72da844c3c34f692d5895b7b ]
+
+Commit aabc6596ffb3 ("net: ppp: Add bound checking for skb data
+on ppp_sync_txmung") fixed ppp_sync_txmunge()
+
+We need a similar fix in pptp_xmit(), otherwise we might
+read uninit data as reported by syzbot.
+
+BUG: KMSAN: uninit-value in pptp_xmit+0xc34/0x2720 drivers/net/ppp/pptp.c:193
+ pptp_xmit+0xc34/0x2720 drivers/net/ppp/pptp.c:193
+ ppp_channel_bridge_input drivers/net/ppp/ppp_generic.c:2290 [inline]
+ ppp_input+0x1d6/0xe60 drivers/net/ppp/ppp_generic.c:2314
+ pppoe_rcv_core+0x1e8/0x760 drivers/net/ppp/pppoe.c:379
+ sk_backlog_rcv+0x142/0x420 include/net/sock.h:1148
+ __release_sock+0x1d3/0x330 net/core/sock.c:3213
+ release_sock+0x6b/0x270 net/core/sock.c:3767
+ pppoe_sendmsg+0x15d/0xcb0 drivers/net/ppp/pppoe.c:904
+ sock_sendmsg_nosec net/socket.c:712 [inline]
+ __sock_sendmsg+0x330/0x3d0 net/socket.c:727
+ ____sys_sendmsg+0x893/0xd80 net/socket.c:2566
+ ___sys_sendmsg+0x271/0x3b0 net/socket.c:2620
+ __sys_sendmmsg+0x2d9/0x7c0 net/socket.c:2709
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Reported-by: syzbot+afad90ffc8645324afe5@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/68887d86.a00a0220.b12ec.00cd.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Dawid Osuchowski <dawid.osuchowski@linux.intel.com>
+Link: https://patch.msgid.link/20250729080207.1863408-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/pptp.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 6833ef0c7930..4455d99be767 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -159,9 +159,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ int len;
+ unsigned char *data;
+ __u32 seq_recv;
+-
+-
+- struct rtable *rt;
++ struct rtable *rt = NULL;
+ struct net_device *tdev;
+ struct iphdr *iph;
+ int max_headroom;
+@@ -179,16 +177,20 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+
+ if (skb_headroom(skb) < max_headroom || skb_cloned(skb) || skb_shared(skb)) {
+ struct sk_buff *new_skb = skb_realloc_headroom(skb, max_headroom);
+- if (!new_skb) {
+- ip_rt_put(rt);
++
++ if (!new_skb)
+ goto tx_error;
+- }
++
+ if (skb->sk)
+ skb_set_owner_w(new_skb, skb->sk);
+ consume_skb(skb);
+ skb = new_skb;
+ }
+
++ /* Ensure we can safely access protocol field and LCP code */
++ if (!pskb_may_pull(skb, 3))
++ goto tx_error;
++
+ data = skb->data;
+ islcp = ((data[0] << 8) + data[1]) == PPP_LCP && 1 <= data[2] && data[2] <= 7;
+
+@@ -262,6 +264,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ return 1;
+
+ tx_error:
++ ip_rt_put(rt);
+ kfree_skb(skb);
+ return 1;
+ }
+--
+2.39.5
+
--- /dev/null
+From b7a9473be5e8cc473f94c8c63ac6f13cfb553c8a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 14:21:46 +0000
+Subject: pptp: fix pptp_xmit() error path
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ae633388cae349886f1a3cfb27aa092854b24c1b ]
+
+I accidentally added a bug in pptp_xmit() that syzbot caught for us.
+
+Only call ip_rt_put() if a route has been allocated.
+
+BUG: unable to handle page fault for address: ffffffffffffffdb
+PGD df3b067 P4D df3b067 PUD df3d067 PMD 0
+Oops: Oops: 0002 [#1] SMP KASAN PTI
+CPU: 1 UID: 0 PID: 6346 Comm: syz.0.336 Not tainted 6.16.0-next-20250804-syzkaller #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/12/2025
+RIP: 0010:arch_atomic_add_return arch/x86/include/asm/atomic.h:85 [inline]
+RIP: 0010:raw_atomic_sub_return_release include/linux/atomic/atomic-arch-fallback.h:846 [inline]
+RIP: 0010:atomic_sub_return_release include/linux/atomic/atomic-instrumented.h:327 [inline]
+RIP: 0010:__rcuref_put include/linux/rcuref.h:109 [inline]
+RIP: 0010:rcuref_put+0x172/0x210 include/linux/rcuref.h:173
+Call Trace:
+ <TASK>
+ dst_release+0x24/0x1b0 net/core/dst.c:167
+ ip_rt_put include/net/route.h:285 [inline]
+ pptp_xmit+0x14b/0x1a90 drivers/net/ppp/pptp.c:267
+ __ppp_channel_push+0xf2/0x1c0 drivers/net/ppp/ppp_generic.c:2166
+ ppp_channel_push+0x123/0x660 drivers/net/ppp/ppp_generic.c:2198
+ ppp_write+0x2b0/0x400 drivers/net/ppp/ppp_generic.c:544
+ vfs_write+0x27b/0xb30 fs/read_write.c:684
+ ksys_write+0x145/0x250 fs/read_write.c:738
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Fixes: de9c4861fb42 ("pptp: ensure minimal skb length in pptp_xmit()")
+Reported-by: syzbot+27d7cfbc93457e472e00@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/689095a5.050a0220.1fc43d.0009.GAE@google.com/
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://patch.msgid.link/20250807142146.2877060-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ppp/pptp.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
+index 4455d99be767..3a10303eb756 100644
+--- a/drivers/net/ppp/pptp.c
++++ b/drivers/net/ppp/pptp.c
+@@ -159,17 +159,17 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+ int len;
+ unsigned char *data;
+ __u32 seq_recv;
+- struct rtable *rt = NULL;
++ struct rtable *rt;
+ struct net_device *tdev;
+ struct iphdr *iph;
+ int max_headroom;
+
+ if (sk_pppox(po)->sk_state & PPPOX_DEAD)
+- goto tx_error;
++ goto tx_drop;
+
+ rt = pptp_route_output(po, &fl4);
+ if (IS_ERR(rt))
+- goto tx_error;
++ goto tx_drop;
+
+ tdev = rt->dst.dev;
+
+@@ -265,6 +265,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
+
+ tx_error:
+ ip_rt_put(rt);
++tx_drop:
+ kfree_skb(skb);
+ return 1;
+ }
+--
+2.39.5
+
--- /dev/null
+From ddf11bdbf22fd50cd147400ca2a776fb6a65c42b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Sep 2024 15:31:41 +1000
+Subject: sched: Add test_and_clear_wake_up_bit() and atomic_dec_and_wake_up()
+
+From: NeilBrown <neilb@suse.de>
+
+[ Upstream commit 52d633def56c10fe3e82a2c5d88c3ecb3f4e4852 ]
+
+There are common patterns in the kernel of using test_and_clear_bit()
+before wake_up_bit(), and atomic_dec_and_test() before wake_up_var().
+
+These combinations don't need extra barriers but sometimes include them
+unnecessarily.
+
+To help avoid the unnecessary barriers and to help discourage the
+general use of wake_up_bit/var (which is a fragile interface) introduce
+two combined functions which implement these patterns.
+
+Also add store_release_wake_up() which supports the task of simply
+setting a non-atomic variable and sending a wakeup. This pattern
+requires barriers which are often omitted.
+
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Link: https://lore.kernel.org/r/20240925053405.3960701-5-neilb@suse.de
+Stable-dep-of: 1db3a48e83bb ("NFS: Fix wakeup of __nfs_lookup_revalidate() in unblock_revalidate()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/wait_bit.h | 60 ++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 60 insertions(+)
+
+diff --git a/include/linux/wait_bit.h b/include/linux/wait_bit.h
+index 7725b7579b78..2209c227e859 100644
+--- a/include/linux/wait_bit.h
++++ b/include/linux/wait_bit.h
+@@ -335,4 +335,64 @@ static inline void clear_and_wake_up_bit(int bit, void *word)
+ wake_up_bit(word, bit);
+ }
+
++/**
++ * test_and_clear_wake_up_bit - clear a bit if it was set: wake up anyone waiting on that bit
++ * @bit: the bit of the word being waited on
++ * @word: the address of memory containing that bit
++ *
++ * If the bit is set and can be atomically cleared, any tasks waiting in
++ * wait_on_bit() or similar will be woken. This call has the same
++ * complete ordering semantics as test_and_clear_bit(). Any changes to
++ * memory made before this call are guaranteed to be visible after the
++ * corresponding wait_on_bit() completes.
++ *
++ * Returns %true if the bit was successfully set and the wake up was sent.
++ */
++static inline bool test_and_clear_wake_up_bit(int bit, unsigned long *word)
++{
++ if (!test_and_clear_bit(bit, word))
++ return false;
++ /* no extra barrier required */
++ wake_up_bit(word, bit);
++ return true;
++}
++
++/**
++ * atomic_dec_and_wake_up - decrement an atomic_t and if zero, wake up waiters
++ * @var: the variable to dec and test
++ *
++ * Decrements the atomic variable and if it reaches zero, send a wake_up to any
++ * processes waiting on the variable.
++ *
++ * This function has the same complete ordering semantics as atomic_dec_and_test.
++ *
++ * Returns %true is the variable reaches zero and the wake up was sent.
++ */
++
++static inline bool atomic_dec_and_wake_up(atomic_t *var)
++{
++ if (!atomic_dec_and_test(var))
++ return false;
++ /* No extra barrier required */
++ wake_up_var(var);
++ return true;
++}
++
++/**
++ * store_release_wake_up - update a variable and send a wake_up
++ * @var: the address of the variable to be updated and woken
++ * @val: the value to store in the variable.
++ *
++ * Store the given value in the variable send a wake up to any tasks
++ * waiting on the variable. All necessary barriers are included to ensure
++ * the task calling wait_var_event() sees the new value and all values
++ * written to memory before this call.
++ */
++#define store_release_wake_up(var, val) \
++do { \
++ smp_store_release(var, val); \
++ smp_mb(); \
++ wake_up_var(var); \
++} while (0)
++
+ #endif /* _LINUX_WAIT_BIT_H */
+--
+2.39.5
+
powerpc-eeh-export-eeh_unfreeze_pe.patch
powerpc-eeh-make-eeh-driver-device-hotplug-safe.patch
pci-pnv_php-fix-surprise-plug-detection-and-recovery.patch
+pnfs-flexfiles-don-t-attempt-pnfs-on-fatal-ds-errors.patch
+sched-add-test_and_clear_wake_up_bit-and-atomic_dec_.patch
+nfs-fix-wakeup-of-__nfs_lookup_revalidate-in-unblock.patch
+nfs-fix-filehandle-bounds-checking-in-nfs_fh_to_dent.patch
+nfsv4.2-another-fix-for-listxattr.patch
+nfs-fixup-allocation-flags-for-nfsiod-s-__gfp_noretr.patch
+md-md-cluster-handle-remove-message-earlier.patch
+netpoll-prevent-hanging-napi-when-netcons-gets-enabl.patch
+phy-mscc-fix-parsing-of-unicast-frames.patch
+net-ipa-add-ipa-v5.1-and-v5.5-to-ipa_version_string.patch
+pptp-ensure-minimal-skb-length-in-pptp_xmit.patch
+netlink-specs-ethtool-fix-module-eeprom-input-output.patch
+net-mlx5-correctly-set-gso_segs-when-lro-is-used.patch
+ipv6-reject-malicious-packets-in-ipv6_gso_segment.patch
+net-drop-ufo-packets-in-udp_rcv_segment.patch
+net-sched-taprio-enforce-minimum-value-for-picos_per.patch
+sunrpc-fix-client-side-handling-of-tls-alerts.patch
+benet-fix-bug-when-creating-vfs.patch
+net-sched-mqprio-fix-stack-out-of-bounds-write-in-tc.patch
+irqchip-build-imx_mu_msi-only-on-arm.patch
+alsa-hda-ca0132-fix-missing-error-handling-in-ca0132.patch
+smb-server-remove-separate-empty_recvmsg_queue.patch
+smb-server-make-sure-we-call-ib_dma_unmap_single-onl.patch
+smb-server-let-recv_done-consistently-call-put_recvm.patch
+smb-server-let-recv_done-avoid-touching-data_transfe.patch
+smb-client-use-min-macro.patch
+smb-client-correct-typos-in-multiple-comments-across.patch
+smb-smbdirect-add-smbdirect_socket.h.patch
+smb-client-make-use-of-common-smbdirect_socket.patch
+smb-client-let-send_done-cleanup-before-calling-smbd.patch
+smb-client-make-sure-we-call-ib_dma_unmap_single-onl.patch
+smb-client-let-recv_done-cleanup-before-notifying-th.patch
+pptp-fix-pptp_xmit-error-path.patch
+smb-client-return-an-error-if-rdma_connect-does-not-.patch
--- /dev/null
+From b3e6ac9c0094cecfef05e2911290fcaf5ed28ee7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Sep 2024 14:55:43 +0800
+Subject: smb: client: Correct typos in multiple comments across various files
+
+From: Shen Lichuan <shenlichuan@vivo.com>
+
+[ Upstream commit e9f49feefb4b13b36441aae51649a67a8389bd40 ]
+
+Fixed some confusing typos that were currently identified witch codespell,
+the details are as follows:
+
+-in the code comments:
+fs/smb/client/cifsacl.h:58: inheritence ==> inheritance
+fs/smb/client/cifsencrypt.c:242: origiginal ==> original
+fs/smb/client/cifsfs.c:164: referece ==> reference
+fs/smb/client/cifsfs.c:292: ned ==> need
+fs/smb/client/cifsglob.h:779: initital ==> initial
+fs/smb/client/cifspdu.h:784: altetnative ==> alternative
+fs/smb/client/cifspdu.h:2409: conrol ==> control
+fs/smb/client/cifssmb.c:1218: Expirement ==> Experiment
+fs/smb/client/cifssmb.c:3021: conver ==> convert
+fs/smb/client/cifssmb.c:3998: asterik ==> asterisk
+fs/smb/client/file.c:2505: useable ==> usable
+fs/smb/client/fs_context.h:263: timemout ==> timeout
+fs/smb/client/misc.c:257: responsbility ==> responsibility
+fs/smb/client/netmisc.c:1006: divisable ==> divisible
+fs/smb/client/readdir.c:556: endianess ==> endianness
+fs/smb/client/readdir.c:818: bu ==> by
+fs/smb/client/smb2ops.c:2180: snaphots ==> snapshots
+fs/smb/client/smb2ops.c:3586: otions ==> options
+fs/smb/client/smb2pdu.c:2979: timestaps ==> timestamps
+fs/smb/client/smb2pdu.c:4574: memmory ==> memory
+fs/smb/client/smb2transport.c:699: origiginal ==> original
+fs/smb/client/smbdirect.c:222: happenes ==> happens
+fs/smb/client/smbdirect.c:1347: registartions ==> registrations
+fs/smb/client/smbdirect.h:114: accoutning ==> accounting
+
+Signed-off-by: Shen Lichuan <shenlichuan@vivo.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 5349ae5e05fa ("smb: client: let send_done() cleanup before calling smbd_disconnect_rdma_connection()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifsacl.h | 2 +-
+ fs/smb/client/cifsencrypt.c | 2 +-
+ fs/smb/client/cifsfs.c | 4 ++--
+ fs/smb/client/cifsglob.h | 2 +-
+ fs/smb/client/cifspdu.h | 4 ++--
+ fs/smb/client/cifssmb.c | 6 +++---
+ fs/smb/client/file.c | 2 +-
+ fs/smb/client/fs_context.h | 2 +-
+ fs/smb/client/misc.c | 2 +-
+ fs/smb/client/netmisc.c | 2 +-
+ fs/smb/client/readdir.c | 4 ++--
+ fs/smb/client/smb2ops.c | 4 ++--
+ fs/smb/client/smb2pdu.c | 4 ++--
+ fs/smb/client/smb2transport.c | 2 +-
+ fs/smb/client/smbdirect.c | 4 ++--
+ fs/smb/client/smbdirect.h | 2 +-
+ 16 files changed, 24 insertions(+), 24 deletions(-)
+
+diff --git a/fs/smb/client/cifsacl.h b/fs/smb/client/cifsacl.h
+index cbaed8038e36..05b3650ba0ae 100644
+--- a/fs/smb/client/cifsacl.h
++++ b/fs/smb/client/cifsacl.h
+@@ -144,7 +144,7 @@ struct smb3_sd {
+ #define ACL_CONTROL_SI 0x0800 /* SACL Auto-Inherited */
+ #define ACL_CONTROL_DI 0x0400 /* DACL Auto-Inherited */
+ #define ACL_CONTROL_SC 0x0200 /* SACL computed through inheritance */
+-#define ACL_CONTROL_DC 0x0100 /* DACL computed through inheritence */
++#define ACL_CONTROL_DC 0x0100 /* DACL computed through inheritance */
+ #define ACL_CONTROL_SS 0x0080 /* Create server ACL */
+ #define ACL_CONTROL_DT 0x0040 /* DACL provided by trusted source */
+ #define ACL_CONTROL_SD 0x0020 /* SACL defaulted */
+diff --git a/fs/smb/client/cifsencrypt.c b/fs/smb/client/cifsencrypt.c
+index b0473c2567fe..da3d003cb43d 100644
+--- a/fs/smb/client/cifsencrypt.c
++++ b/fs/smb/client/cifsencrypt.c
+@@ -353,7 +353,7 @@ int cifs_verify_signature(struct smb_rqst *rqst,
+ cifs_dbg(FYI, "dummy signature received for smb command 0x%x\n",
+ cifs_pdu->Command);
+
+- /* save off the origiginal signature so we can modify the smb and check
++ /* save off the original signature so we can modify the smb and check
+ its signature against what the server sent */
+ memcpy(server_response_sig, cifs_pdu->Signature.SecuritySignature, 8);
+
+diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
+index bbb0ef18d7b8..a1ab95f382d5 100644
+--- a/fs/smb/client/cifsfs.c
++++ b/fs/smb/client/cifsfs.c
+@@ -161,7 +161,7 @@ __u32 cifs_lock_secret;
+
+ /*
+ * Bumps refcount for cifs super block.
+- * Note that it should be only called if a referece to VFS super block is
++ * Note that it should be only called if a reference to VFS super block is
+ * already held, e.g. in open-type syscalls context. Otherwise it can race with
+ * atomic_dec_and_test in deactivate_locked_super.
+ */
+@@ -289,7 +289,7 @@ static void cifs_kill_sb(struct super_block *sb)
+ struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
+
+ /*
+- * We ned to release all dentries for the cached directories
++ * We need to release all dentries for the cached directories
+ * before we kill the sb.
+ */
+ if (cifs_sb->root) {
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index c9b37f2ebde8..4bafb1adfb22 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -785,7 +785,7 @@ struct TCP_Server_Info {
+ } compression;
+ __u16 signing_algorithm;
+ __le16 cipher_type;
+- /* save initital negprot hash */
++ /* save initial negprot hash */
+ __u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
+ bool signing_negotiated; /* true if valid signing context rcvd from server */
+ bool posix_ext_supported;
+diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h
+index 763178b77454..f4cfb082dfd1 100644
+--- a/fs/smb/client/cifspdu.h
++++ b/fs/smb/client/cifspdu.h
+@@ -781,7 +781,7 @@ typedef struct smb_com_logoff_andx_rsp {
+ __u16 ByteCount;
+ } __attribute__((packed)) LOGOFF_ANDX_RSP;
+
+-typedef union smb_com_tree_disconnect { /* as an altetnative can use flag on
++typedef union smb_com_tree_disconnect { /* as an alternative can use flag on
+ tree_connect PDU to effect disconnect */
+ /* tdis is probably simplest SMB PDU */
+ struct {
+@@ -2405,7 +2405,7 @@ struct cifs_posix_ace { /* access control entry (ACE) */
+ __le64 cifs_uid; /* or gid */
+ } __attribute__((packed));
+
+-struct cifs_posix_acl { /* access conrol list (ACL) */
++struct cifs_posix_acl { /* access control list (ACL) */
+ __le16 version;
+ __le16 access_entry_count; /* access ACL - count of entries */
+ __le16 default_entry_count; /* default ACL - count of entries */
+diff --git a/fs/smb/client/cifssmb.c b/fs/smb/client/cifssmb.c
+index db35e68e8a58..81d425f571e2 100644
+--- a/fs/smb/client/cifssmb.c
++++ b/fs/smb/client/cifssmb.c
+@@ -1214,7 +1214,7 @@ CIFS_open(const unsigned int xid, struct cifs_open_parms *oparms, int *oplock,
+ req->CreateDisposition = cpu_to_le32(disposition);
+ req->CreateOptions = cpu_to_le32(create_options & CREATE_OPTIONS_MASK);
+
+- /* BB Expirement with various impersonation levels and verify */
++ /* BB Experiment with various impersonation levels and verify */
+ req->ImpersonationLevel = cpu_to_le32(SECURITY_IMPERSONATION);
+ req->SecurityFlags = SECURITY_CONTEXT_TRACKING|SECURITY_EFFECTIVE_ONLY;
+
+@@ -2993,7 +2993,7 @@ static void cifs_init_ace(struct cifs_posix_ace *cifs_ace,
+
+ /**
+ * posix_acl_to_cifs - convert ACLs from POSIX ACL to cifs format
+- * @parm_data: ACLs in cifs format to conver to
++ * @parm_data: ACLs in cifs format to convert to
+ * @acl: ACLs in POSIX ACL format to convert from
+ * @acl_type: the type of POSIX ACLs stored in @acl
+ *
+@@ -3970,7 +3970,7 @@ CIFSFindFirst(const unsigned int xid, struct cifs_tcon *tcon,
+ name_len =
+ cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+ PATH_MAX, nls_codepage, remap);
+- /* We can not add the asterik earlier in case
++ /* We can not add the asterisk earlier in case
+ it got remapped to 0xF03A as if it were part of the
+ directory name instead of a wildcard */
+ name_len *= 2;
+diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
+index 99a8c6fbd41a..7a2b81fbd9cf 100644
+--- a/fs/smb/client/file.c
++++ b/fs/smb/client/file.c
+@@ -2421,7 +2421,7 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
+ }
+ }
+ }
+- /* couldn't find useable FH with same pid, try any available */
++ /* couldn't find usable FH with same pid, try any available */
+ if (!any_available) {
+ any_available = true;
+ goto refind_writable;
+diff --git a/fs/smb/client/fs_context.h b/fs/smb/client/fs_context.h
+index 52ee72e562f5..90ebff5d0199 100644
+--- a/fs/smb/client/fs_context.h
++++ b/fs/smb/client/fs_context.h
+@@ -263,7 +263,7 @@ struct smb3_fs_context {
+ unsigned int min_offload;
+ unsigned int retrans;
+ bool sockopt_tcp_nodelay:1;
+- /* attribute cache timemout for files and directories in jiffies */
++ /* attribute cache timeout for files and directories in jiffies */
+ unsigned long acregmax;
+ unsigned long acdirmax;
+ /* timeout for deferred close of files in jiffies */
+diff --git a/fs/smb/client/misc.c b/fs/smb/client/misc.c
+index bbbe48447765..ad77952f6d81 100644
+--- a/fs/smb/client/misc.c
++++ b/fs/smb/client/misc.c
+@@ -260,7 +260,7 @@ free_rsp_buf(int resp_buftype, void *rsp)
+ }
+
+ /* NB: MID can not be set if treeCon not passed in, in that
+- case it is responsbility of caller to set the mid */
++ case it is responsibility of caller to set the mid */
+ void
+ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
+ const struct cifs_tcon *treeCon, int word_count
+diff --git a/fs/smb/client/netmisc.c b/fs/smb/client/netmisc.c
+index 1b52e6ac431c..2a8d71221e5e 100644
+--- a/fs/smb/client/netmisc.c
++++ b/fs/smb/client/netmisc.c
+@@ -1003,7 +1003,7 @@ struct timespec64 cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset)
+ year is 2**7, the last year is 1980+127, which means we need only
+ consider 2 special case years, ie the years 2000 and 2100, and only
+ adjust for the lack of leap year for the year 2100, as 2000 was a
+- leap year (divisable by 400) */
++ leap year (divisible by 400) */
+ if (year >= 120) /* the year 2100 */
+ days = days - 1; /* do not count leap year for the year 2100 */
+
+diff --git a/fs/smb/client/readdir.c b/fs/smb/client/readdir.c
+index 0be16f8acd9a..5febf8afaab0 100644
+--- a/fs/smb/client/readdir.c
++++ b/fs/smb/client/readdir.c
+@@ -567,7 +567,7 @@ static void cifs_fill_dirent_std(struct cifs_dirent *de,
+ const FIND_FILE_STANDARD_INFO *info)
+ {
+ de->name = &info->FileName[0];
+- /* one byte length, no endianess conversion */
++ /* one byte length, no endianness conversion */
+ de->namelen = info->FileNameLength;
+ de->resume_key = info->ResumeKey;
+ }
+@@ -832,7 +832,7 @@ static bool emit_cached_dirents(struct cached_dirents *cde,
+ * However, this sequence of ->pos values may have holes
+ * in it, for example dot-dirs returned from the server
+ * are suppressed.
+- * Handle this bu forcing ctx->pos to be the same as the
++ * Handle this by forcing ctx->pos to be the same as the
+ * ->pos of the current dirent we emit from the cache.
+ * This means that when we emit these entries from the cache
+ * we now emit them with the same ->pos value as in the
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 2385e570e331..d0734aa1961a 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -2132,7 +2132,7 @@ smb3_enum_snapshots(const unsigned int xid, struct cifs_tcon *tcon,
+ NULL, 0 /* no input data */, max_response_size,
+ (char **)&retbuf,
+ &ret_data_len);
+- cifs_dbg(FYI, "enum snaphots ioctl returned %d and ret buflen is %d\n",
++ cifs_dbg(FYI, "enum snapshots ioctl returned %d and ret buflen is %d\n",
+ rc, ret_data_len);
+ if (rc)
+ return rc;
+@@ -3540,7 +3540,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
+ /*
+ * At this point, we are trying to fallocate an internal
+ * regions of a sparse file. Since smb2 does not have a
+- * fallocate command we have two otions on how to emulate this.
++ * fallocate command we have two options on how to emulate this.
+ * We can either turn the entire file to become non-sparse
+ * which we only do if the fallocate is for virtually
+ * the whole file, or we can overwrite the region with zeroes
+diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
+index 357abb0170c4..e58cad5d735a 100644
+--- a/fs/smb/client/smb2pdu.c
++++ b/fs/smb/client/smb2pdu.c
+@@ -2989,7 +2989,7 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+
+ SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+- /* Eventually save off posix specific response info and timestaps */
++ /* Eventually save off posix specific response info and timestamps */
+
+ err_free_rsp_buf:
+ free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+@@ -4574,7 +4574,7 @@ smb2_readv_callback(struct mid_q_entry *mid)
+ }
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+ /*
+- * If this rdata has a memmory registered, the MR can be freed
++ * If this rdata has a memory registered, the MR can be freed
+ * MR needs to be freed as soon as I/O finishes to prevent deadlock
+ * because they have limited number and are used for future I/Os
+ */
+diff --git a/fs/smb/client/smb2transport.c b/fs/smb/client/smb2transport.c
+index 4a43802375b3..99081e9d6283 100644
+--- a/fs/smb/client/smb2transport.c
++++ b/fs/smb/client/smb2transport.c
+@@ -720,7 +720,7 @@ smb2_verify_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
+ shdr->Command);
+
+ /*
+- * Save off the origiginal signature so we can modify the smb and check
++ * Save off the original signature so we can modify the smb and check
+ * our calculated signature against what the server sent.
+ */
+ memcpy(server_response_sig, shdr->Signature, SMB2_SIGNATURE_SIZE);
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index c41a44f4fc63..e7f15515f5d4 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -218,7 +218,7 @@ static int smbd_conn_upcall(
+
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ case RDMA_CM_EVENT_DISCONNECTED:
+- /* This happenes when we fail the negotiation */
++ /* This happens when we fail the negotiation */
+ if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
+ info->transport_status = SMBD_DISCONNECTED;
+ wake_up(&info->conn_wait);
+@@ -1343,7 +1343,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ * are not locked by srv_mutex. It is possible some processes are
+ * blocked on transport srv_mutex while holding memory registration.
+ * Release the transport srv_mutex to allow them to hit the failure
+- * path when sending data, and then release memory registartions.
++ * path when sending data, and then release memory registrations.
+ */
+ log_rdma_event(INFO, "freeing mr list\n");
+ wake_up_interruptible_all(&info->wait_mr);
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index 83f239f376f0..c08e3665150d 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -111,7 +111,7 @@ struct smbd_connection {
+ /* Used by transport to wait until all MRs are returned */
+ wait_queue_head_t wait_for_mr_cleanup;
+
+- /* Activity accoutning */
++ /* Activity accounting */
+ atomic_t send_pending;
+ wait_queue_head_t wait_send_pending;
+ wait_queue_head_t wait_post_send;
+--
+2.39.5
+
--- /dev/null
+From 20883f2bf026600895b4868dda2bb3beac0959bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:15 +0200
+Subject: smb: client: let recv_done() cleanup before notifying the callers.
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit bdd7afc6dca5e0ebbb75583484aa6ea9e03fbb13 ]
+
+We should call put_receive_buffer() before waking up the callers.
+
+For the internal error case of response->type being unexpected,
+we now also call smbd_disconnect_rdma_connection() instead
+of not waking up the callers at all.
+
+Note that the SMBD_TRANSFER_DATA case still has problems,
+which will be addressed in the next commit in order to make
+it easier to review this one.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 49aafb58c7df..c7f4eb8c9a10 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -467,7 +467,6 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
+ log_rdma_recv(INFO, "wc->status=%d opcode=%d\n",
+ wc->status, wc->opcode);
+- smbd_disconnect_rdma_connection(info);
+ goto error;
+ }
+
+@@ -484,8 +483,9 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ info->full_packet_received = true;
+ info->negotiate_done =
+ process_negotiation_response(response, wc->byte_len);
++ put_receive_buffer(info, response);
+ complete(&info->negotiate_completion);
+- break;
++ return;
+
+ /* SMBD data transfer packet */
+ case SMBD_TRANSFER_DATA:
+@@ -542,14 +542,16 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ }
+
+ return;
+-
+- default:
+- log_rdma_recv(ERR,
+- "unexpected response type=%d\n", response->type);
+ }
+
++ /*
++ * This is an internal error!
++ */
++ log_rdma_recv(ERR, "unexpected response type=%d\n", response->type);
++ WARN_ON_ONCE(response->type != SMBD_TRANSFER_DATA);
+ error:
+ put_receive_buffer(info, response);
++ smbd_disconnect_rdma_connection(info);
+ }
+
+ static struct rdma_cm_id *smbd_create_id(
+--
+2.39.5
+
--- /dev/null
+From 850451f27cf8795d3ac0df4540d82a062f8c0752 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:12 +0200
+Subject: smb: client: let send_done() cleanup before calling
+ smbd_disconnect_rdma_connection()
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 5349ae5e05fa37409fd48a1eb483b199c32c889b ]
+
+We should call ib_dma_unmap_single() and mempool_free() before calling
+smbd_disconnect_rdma_connection().
+
+And smbd_disconnect_rdma_connection() needs to be the last function to
+call as all other state might already be gone after it returns.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 14 ++++++++------
+ 1 file changed, 8 insertions(+), 6 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 8d215b207dcc..9aef85f3cf11 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -282,18 +282,20 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+ log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+ request, wc->status);
+
+- if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
+- log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
+- wc->status, wc->opcode);
+- smbd_disconnect_rdma_connection(request->info);
+- }
+-
+ for (i = 0; i < request->num_sge; i++)
+ ib_dma_unmap_single(sc->ib.dev,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+
++ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_SEND) {
++ log_rdma_send(ERR, "wc->status=%d wc->opcode=%d\n",
++ wc->status, wc->opcode);
++ mempool_free(request, info->request_mempool);
++ smbd_disconnect_rdma_connection(info);
++ return;
++ }
++
+ if (atomic_dec_and_test(&request->info->send_pending))
+ wake_up(&request->info->wait_send_pending);
+
+--
+2.39.5
+
--- /dev/null
+From cee3520dcab31ac7e693acd281e93bf18f867d05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:10:14 +0200
+Subject: smb: client: make sure we call ib_dma_unmap_single() only if we
+ called ib_dma_map_single already
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 047682c370b6f18fec818b57b0ed8b501bdb79f8 ]
+
+In case of failures either ib_dma_map_single() might not be called yet
+or ib_dma_unmap_single() was already called.
+
+We should make sure put_receive_buffer() only calls
+ib_dma_unmap_single() if needed.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 9aef85f3cf11..49aafb58c7df 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1043,6 +1043,7 @@ static int smbd_post_recv(
+ if (rc) {
+ ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+ response->sge.length, DMA_FROM_DEVICE);
++ response->sge.length = 0;
+ smbd_disconnect_rdma_connection(info);
+ log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+ }
+@@ -1202,8 +1203,13 @@ static void put_receive_buffer(
+ struct smbdirect_socket *sc = &info->socket;
+ unsigned long flags;
+
+- ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+- response->sge.length, DMA_FROM_DEVICE);
++ if (likely(response->sge.length != 0)) {
++ ib_dma_unmap_single(sc->ib.dev,
++ response->sge.addr,
++ response->sge.length,
++ DMA_FROM_DEVICE);
++ response->sge.length = 0;
++ }
+
+ spin_lock_irqsave(&info->receive_queue_lock, flags);
+ list_add_tail(&response->list, &info->receive_queue);
+@@ -1241,6 +1247,7 @@ static int allocate_receive_buffers(struct smbd_connection *info, int num_buf)
+ goto allocate_failed;
+
+ response->info = info;
++ response->sge.length = 0;
+ list_add_tail(&response->list, &info->receive_queue);
+ info->count_receive_queue++;
+ }
+--
+2.39.5
+
--- /dev/null
+From 5262b19d5bb2c17eef92dfbac0cc94573f95fcd6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:37 +0200
+Subject: smb: client: make use of common smbdirect_socket
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit c3011b9a7deaaaabdf955815d29eac39c8b75e67 ]
+
+This is the next step in the direction of a common smbdirect layer.
+Currently only structures are shared, but that will change
+over time until everything is shared.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 5349ae5e05fa ("smb: client: let send_done() cleanup before calling smbd_disconnect_rdma_connection()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_debug.c | 2 +-
+ fs/smb/client/smbdirect.c | 258 ++++++++++++++++++++-----------------
+ fs/smb/client/smbdirect.h | 12 +-
+ 3 files changed, 146 insertions(+), 126 deletions(-)
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 4a20e92474b2..50ad8246ed18 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -384,7 +384,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+ seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+ "transport status: %x",
+ server->smbd_conn->protocol,
+- server->smbd_conn->transport_status);
++ server->smbd_conn->socket.status);
+ seq_printf(m, "\nConn receive_credit_max: %x "
+ "send_credit_target: %x max_send_size: %x",
+ server->smbd_conn->receive_credit_max,
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index e7f15515f5d4..8d215b207dcc 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -164,10 +164,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work)
+ {
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, disconnect_work);
++ struct smbdirect_socket *sc = &info->socket;
+
+- if (info->transport_status == SMBD_CONNECTED) {
+- info->transport_status = SMBD_DISCONNECTING;
+- rdma_disconnect(info->id);
++ if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
++ sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
++ rdma_disconnect(sc->rdma.cm_id);
+ }
+ }
+
+@@ -181,6 +182,7 @@ static int smbd_conn_upcall(
+ struct rdma_cm_id *id, struct rdma_cm_event *event)
+ {
+ struct smbd_connection *info = id->context;
++ struct smbdirect_socket *sc = &info->socket;
+
+ log_rdma_event(INFO, "event=%d status=%d\n",
+ event->event, event->status);
+@@ -204,7 +206,7 @@ static int smbd_conn_upcall(
+
+ case RDMA_CM_EVENT_ESTABLISHED:
+ log_rdma_event(INFO, "connected event=%d\n", event->event);
+- info->transport_status = SMBD_CONNECTED;
++ sc->status = SMBDIRECT_SOCKET_CONNECTED;
+ wake_up_interruptible(&info->conn_wait);
+ break;
+
+@@ -212,20 +214,20 @@ static int smbd_conn_upcall(
+ case RDMA_CM_EVENT_UNREACHABLE:
+ case RDMA_CM_EVENT_REJECTED:
+ log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
+- info->transport_status = SMBD_DISCONNECTED;
++ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ wake_up_interruptible(&info->conn_wait);
+ break;
+
+ case RDMA_CM_EVENT_DEVICE_REMOVAL:
+ case RDMA_CM_EVENT_DISCONNECTED:
+ /* This happens when we fail the negotiation */
+- if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
+- info->transport_status = SMBD_DISCONNECTED;
++ if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
++ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ wake_up(&info->conn_wait);
+ break;
+ }
+
+- info->transport_status = SMBD_DISCONNECTED;
++ sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+ wake_up_interruptible(&info->disconn_wait);
+ wake_up_interruptible(&info->wait_reassembly_queue);
+ wake_up_interruptible_all(&info->wait_send_queue);
+@@ -274,6 +276,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+ int i;
+ struct smbd_request *request =
+ container_of(wc->wr_cqe, struct smbd_request, cqe);
++ struct smbd_connection *info = request->info;
++ struct smbdirect_socket *sc = &info->socket;
+
+ log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+ request, wc->status);
+@@ -285,7 +289,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+ }
+
+ for (i = 0; i < request->num_sge; i++)
+- ib_dma_unmap_single(request->info->id->device,
++ ib_dma_unmap_single(sc->ib.dev,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+@@ -392,8 +396,9 @@ static void smbd_post_send_credits(struct work_struct *work)
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection,
+ post_send_credits_work);
++ struct smbdirect_socket *sc = &info->socket;
+
+- if (info->transport_status != SMBD_CONNECTED) {
++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ wake_up(&info->wait_receive_queues);
+ return;
+ }
+@@ -634,32 +639,34 @@ static int smbd_ia_open(
+ struct smbd_connection *info,
+ struct sockaddr *dstaddr, int port)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ int rc;
+
+- info->id = smbd_create_id(info, dstaddr, port);
+- if (IS_ERR(info->id)) {
+- rc = PTR_ERR(info->id);
++ sc->rdma.cm_id = smbd_create_id(info, dstaddr, port);
++ if (IS_ERR(sc->rdma.cm_id)) {
++ rc = PTR_ERR(sc->rdma.cm_id);
+ goto out1;
+ }
++ sc->ib.dev = sc->rdma.cm_id->device;
+
+- if (!frwr_is_supported(&info->id->device->attrs)) {
++ if (!frwr_is_supported(&sc->ib.dev->attrs)) {
+ log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
+ log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
+- info->id->device->attrs.device_cap_flags,
+- info->id->device->attrs.max_fast_reg_page_list_len);
++ sc->ib.dev->attrs.device_cap_flags,
++ sc->ib.dev->attrs.max_fast_reg_page_list_len);
+ rc = -EPROTONOSUPPORT;
+ goto out2;
+ }
+ info->max_frmr_depth = min_t(int,
+ smbd_max_frmr_depth,
+- info->id->device->attrs.max_fast_reg_page_list_len);
++ sc->ib.dev->attrs.max_fast_reg_page_list_len);
+ info->mr_type = IB_MR_TYPE_MEM_REG;
+- if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
++ if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+ info->mr_type = IB_MR_TYPE_SG_GAPS;
+
+- info->pd = ib_alloc_pd(info->id->device, 0);
+- if (IS_ERR(info->pd)) {
+- rc = PTR_ERR(info->pd);
++ sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
++ if (IS_ERR(sc->ib.pd)) {
++ rc = PTR_ERR(sc->ib.pd);
+ log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
+ goto out2;
+ }
+@@ -667,8 +674,8 @@ static int smbd_ia_open(
+ return 0;
+
+ out2:
+- rdma_destroy_id(info->id);
+- info->id = NULL;
++ rdma_destroy_id(sc->rdma.cm_id);
++ sc->rdma.cm_id = NULL;
+
+ out1:
+ return rc;
+@@ -682,6 +689,7 @@ static int smbd_ia_open(
+ */
+ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ struct ib_send_wr send_wr;
+ int rc = -ENOMEM;
+ struct smbd_request *request;
+@@ -705,18 +713,18 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+
+ request->num_sge = 1;
+ request->sge[0].addr = ib_dma_map_single(
+- info->id->device, (void *)packet,
++ sc->ib.dev, (void *)packet,
+ sizeof(*packet), DMA_TO_DEVICE);
+- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
+ rc = -EIO;
+ goto dma_mapping_failed;
+ }
+
+ request->sge[0].length = sizeof(*packet);
+- request->sge[0].lkey = info->pd->local_dma_lkey;
++ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
+
+ ib_dma_sync_single_for_device(
+- info->id->device, request->sge[0].addr,
++ sc->ib.dev, request->sge[0].addr,
+ request->sge[0].length, DMA_TO_DEVICE);
+
+ request->cqe.done = send_done;
+@@ -733,14 +741,14 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ request->sge[0].length, request->sge[0].lkey);
+
+ atomic_inc(&info->send_pending);
+- rc = ib_post_send(info->id->qp, &send_wr, NULL);
++ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
+ if (!rc)
+ return 0;
+
+ /* if we reach here, post send failed */
+ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ atomic_dec(&info->send_pending);
+- ib_dma_unmap_single(info->id->device, request->sge[0].addr,
++ ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
+ request->sge[0].length, DMA_TO_DEVICE);
+
+ smbd_disconnect_rdma_connection(info);
+@@ -792,6 +800,7 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
+ static int smbd_post_send(struct smbd_connection *info,
+ struct smbd_request *request)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ struct ib_send_wr send_wr;
+ int rc, i;
+
+@@ -800,7 +809,7 @@ static int smbd_post_send(struct smbd_connection *info,
+ "rdma_request sge[%d] addr=0x%llx length=%u\n",
+ i, request->sge[i].addr, request->sge[i].length);
+ ib_dma_sync_single_for_device(
+- info->id->device,
++ sc->ib.dev,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+@@ -815,7 +824,7 @@ static int smbd_post_send(struct smbd_connection *info,
+ send_wr.opcode = IB_WR_SEND;
+ send_wr.send_flags = IB_SEND_SIGNALED;
+
+- rc = ib_post_send(info->id->qp, &send_wr, NULL);
++ rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
+ if (rc) {
+ log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+ smbd_disconnect_rdma_connection(info);
+@@ -832,6 +841,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ struct iov_iter *iter,
+ int *_remaining_data_length)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ int i, rc;
+ int header_length;
+ int data_length;
+@@ -843,11 +853,11 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ /* Wait for send credits. A SMBD packet needs one credit */
+ rc = wait_event_interruptible(info->wait_send_queue,
+ atomic_read(&info->send_credits) > 0 ||
+- info->transport_status != SMBD_CONNECTED);
++ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ if (rc)
+ goto err_wait_credit;
+
+- if (info->transport_status != SMBD_CONNECTED) {
++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_outgoing(ERR, "disconnected not sending on wait_credit\n");
+ rc = -EAGAIN;
+ goto err_wait_credit;
+@@ -860,9 +870,9 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ wait_send_queue:
+ wait_event(info->wait_post_send,
+ atomic_read(&info->send_pending) < info->send_credit_target ||
+- info->transport_status != SMBD_CONNECTED);
++ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+
+- if (info->transport_status != SMBD_CONNECTED) {
++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
+ rc = -EAGAIN;
+ goto err_wait_send_queue;
+@@ -889,8 +899,8 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ .nr_sge = 1,
+ .max_sge = SMBDIRECT_MAX_SEND_SGE,
+ .sge = request->sge,
+- .device = info->id->device,
+- .local_dma_lkey = info->pd->local_dma_lkey,
++ .device = sc->ib.dev,
++ .local_dma_lkey = sc->ib.pd->local_dma_lkey,
+ .direction = DMA_TO_DEVICE,
+ };
+
+@@ -942,18 +952,18 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ if (!data_length)
+ header_length = offsetof(struct smbd_data_transfer, padding);
+
+- request->sge[0].addr = ib_dma_map_single(info->id->device,
++ request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
+ (void *)packet,
+ header_length,
+ DMA_TO_DEVICE);
+- if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++ if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
+ rc = -EIO;
+ request->sge[0].addr = 0;
+ goto err_dma;
+ }
+
+ request->sge[0].length = header_length;
+- request->sge[0].lkey = info->pd->local_dma_lkey;
++ request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
+
+ rc = smbd_post_send(info, request);
+ if (!rc)
+@@ -962,7 +972,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ err_dma:
+ for (i = 0; i < request->num_sge; i++)
+ if (request->sge[i].addr)
+- ib_dma_unmap_single(info->id->device,
++ ib_dma_unmap_single(sc->ib.dev,
+ request->sge[i].addr,
+ request->sge[i].length,
+ DMA_TO_DEVICE);
+@@ -1007,17 +1017,18 @@ static int smbd_post_send_empty(struct smbd_connection *info)
+ static int smbd_post_recv(
+ struct smbd_connection *info, struct smbd_response *response)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ struct ib_recv_wr recv_wr;
+ int rc = -EIO;
+
+ response->sge.addr = ib_dma_map_single(
+- info->id->device, response->packet,
++ sc->ib.dev, response->packet,
+ info->max_receive_size, DMA_FROM_DEVICE);
+- if (ib_dma_mapping_error(info->id->device, response->sge.addr))
++ if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr))
+ return rc;
+
+ response->sge.length = info->max_receive_size;
+- response->sge.lkey = info->pd->local_dma_lkey;
++ response->sge.lkey = sc->ib.pd->local_dma_lkey;
+
+ response->cqe.done = recv_done;
+
+@@ -1026,9 +1037,9 @@ static int smbd_post_recv(
+ recv_wr.sg_list = &response->sge;
+ recv_wr.num_sge = 1;
+
+- rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
++ rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL);
+ if (rc) {
+- ib_dma_unmap_single(info->id->device, response->sge.addr,
++ ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+ response->sge.length, DMA_FROM_DEVICE);
+ smbd_disconnect_rdma_connection(info);
+ log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+@@ -1186,9 +1197,10 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
+ static void put_receive_buffer(
+ struct smbd_connection *info, struct smbd_response *response)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ unsigned long flags;
+
+- ib_dma_unmap_single(info->id->device, response->sge.addr,
++ ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+ response->sge.length, DMA_FROM_DEVICE);
+
+ spin_lock_irqsave(&info->receive_queue_lock, flags);
+@@ -1288,6 +1300,7 @@ static void idle_connection_timer(struct work_struct *work)
+ void smbd_destroy(struct TCP_Server_Info *server)
+ {
+ struct smbd_connection *info = server->smbd_conn;
++ struct smbdirect_socket *sc;
+ struct smbd_response *response;
+ unsigned long flags;
+
+@@ -1295,19 +1308,21 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ log_rdma_event(INFO, "rdma session already destroyed\n");
+ return;
+ }
++ sc = &info->socket;
+
+ log_rdma_event(INFO, "destroying rdma session\n");
+- if (info->transport_status != SMBD_DISCONNECTED) {
+- rdma_disconnect(server->smbd_conn->id);
++ if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
++ rdma_disconnect(sc->rdma.cm_id);
+ log_rdma_event(INFO, "wait for transport being disconnected\n");
+ wait_event_interruptible(
+ info->disconn_wait,
+- info->transport_status == SMBD_DISCONNECTED);
++ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+ }
+
+ log_rdma_event(INFO, "destroying qp\n");
+- ib_drain_qp(info->id->qp);
+- rdma_destroy_qp(info->id);
++ ib_drain_qp(sc->ib.qp);
++ rdma_destroy_qp(sc->rdma.cm_id);
++ sc->ib.qp = NULL;
+
+ log_rdma_event(INFO, "cancelling idle timer\n");
+ cancel_delayed_work_sync(&info->idle_timer_work);
+@@ -1354,10 +1369,10 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ }
+ destroy_mr_list(info);
+
+- ib_free_cq(info->send_cq);
+- ib_free_cq(info->recv_cq);
+- ib_dealloc_pd(info->pd);
+- rdma_destroy_id(info->id);
++ ib_free_cq(sc->ib.send_cq);
++ ib_free_cq(sc->ib.recv_cq);
++ ib_dealloc_pd(sc->ib.pd);
++ rdma_destroy_id(sc->rdma.cm_id);
+
+ /* free mempools */
+ mempool_destroy(info->request_mempool);
+@@ -1366,7 +1381,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ mempool_destroy(info->response_mempool);
+ kmem_cache_destroy(info->response_cache);
+
+- info->transport_status = SMBD_DESTROYED;
++ sc->status = SMBDIRECT_SOCKET_DESTROYED;
+
+ destroy_workqueue(info->workqueue);
+ log_rdma_event(INFO, "rdma session destroyed\n");
+@@ -1391,7 +1406,7 @@ int smbd_reconnect(struct TCP_Server_Info *server)
+ * This is possible if transport is disconnected and we haven't received
+ * notification from RDMA, but upper layer has detected timeout
+ */
+- if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
++ if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) {
+ log_rdma_event(INFO, "disconnecting transport\n");
+ smbd_destroy(server);
+ }
+@@ -1490,6 +1505,7 @@ static struct smbd_connection *_smbd_get_connection(
+ {
+ int rc;
+ struct smbd_connection *info;
++ struct smbdirect_socket *sc;
+ struct rdma_conn_param conn_param;
+ struct ib_qp_init_attr qp_attr;
+ struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
+@@ -1499,29 +1515,30 @@ static struct smbd_connection *_smbd_get_connection(
+ info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
+ if (!info)
+ return NULL;
++ sc = &info->socket;
+
+- info->transport_status = SMBD_CONNECTING;
++ sc->status = SMBDIRECT_SOCKET_CONNECTING;
+ rc = smbd_ia_open(info, dstaddr, port);
+ if (rc) {
+ log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
+ goto create_id_failed;
+ }
+
+- if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
+- smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
++ if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe ||
++ smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
+ log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+ smbd_send_credit_target,
+- info->id->device->attrs.max_cqe,
+- info->id->device->attrs.max_qp_wr);
++ sc->ib.dev->attrs.max_cqe,
++ sc->ib.dev->attrs.max_qp_wr);
+ goto config_failed;
+ }
+
+- if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
+- smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
++ if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe ||
++ smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) {
+ log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+ smbd_receive_credit_max,
+- info->id->device->attrs.max_cqe,
+- info->id->device->attrs.max_qp_wr);
++ sc->ib.dev->attrs.max_cqe,
++ sc->ib.dev->attrs.max_qp_wr);
+ goto config_failed;
+ }
+
+@@ -1532,32 +1549,30 @@ static struct smbd_connection *_smbd_get_connection(
+ info->max_receive_size = smbd_max_receive_size;
+ info->keep_alive_interval = smbd_keep_alive_interval;
+
+- if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
+- info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
++ if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
++ sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
+ log_rdma_event(ERR,
+ "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
+ IB_DEVICE_NAME_MAX,
+- info->id->device->name,
+- info->id->device->attrs.max_send_sge,
+- info->id->device->attrs.max_recv_sge);
++ sc->ib.dev->name,
++ sc->ib.dev->attrs.max_send_sge,
++ sc->ib.dev->attrs.max_recv_sge);
+ goto config_failed;
+ }
+
+- info->send_cq = NULL;
+- info->recv_cq = NULL;
+- info->send_cq =
+- ib_alloc_cq_any(info->id->device, info,
++ sc->ib.send_cq =
++ ib_alloc_cq_any(sc->ib.dev, info,
+ info->send_credit_target, IB_POLL_SOFTIRQ);
+- if (IS_ERR(info->send_cq)) {
+- info->send_cq = NULL;
++ if (IS_ERR(sc->ib.send_cq)) {
++ sc->ib.send_cq = NULL;
+ goto alloc_cq_failed;
+ }
+
+- info->recv_cq =
+- ib_alloc_cq_any(info->id->device, info,
++ sc->ib.recv_cq =
++ ib_alloc_cq_any(sc->ib.dev, info,
+ info->receive_credit_max, IB_POLL_SOFTIRQ);
+- if (IS_ERR(info->recv_cq)) {
+- info->recv_cq = NULL;
++ if (IS_ERR(sc->ib.recv_cq)) {
++ sc->ib.recv_cq = NULL;
+ goto alloc_cq_failed;
+ }
+
+@@ -1571,29 +1586,30 @@ static struct smbd_connection *_smbd_get_connection(
+ qp_attr.cap.max_inline_data = 0;
+ qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+ qp_attr.qp_type = IB_QPT_RC;
+- qp_attr.send_cq = info->send_cq;
+- qp_attr.recv_cq = info->recv_cq;
++ qp_attr.send_cq = sc->ib.send_cq;
++ qp_attr.recv_cq = sc->ib.recv_cq;
+ qp_attr.port_num = ~0;
+
+- rc = rdma_create_qp(info->id, info->pd, &qp_attr);
++ rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
+ goto create_qp_failed;
+ }
++ sc->ib.qp = sc->rdma.cm_id->qp;
+
+ memset(&conn_param, 0, sizeof(conn_param));
+ conn_param.initiator_depth = 0;
+
+ conn_param.responder_resources =
+- min(info->id->device->attrs.max_qp_rd_atom,
++ min(sc->ib.dev->attrs.max_qp_rd_atom,
+ SMBD_CM_RESPONDER_RESOURCES);
+ info->responder_resources = conn_param.responder_resources;
+ log_rdma_mr(INFO, "responder_resources=%d\n",
+ info->responder_resources);
+
+ /* Need to send IRD/ORD in private data for iWARP */
+- info->id->device->ops.get_port_immutable(
+- info->id->device, info->id->port_num, &port_immutable);
++ sc->ib.dev->ops.get_port_immutable(
++ sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
+ if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+ ird_ord_hdr[0] = info->responder_resources;
+ ird_ord_hdr[1] = 1;
+@@ -1614,16 +1630,16 @@ static struct smbd_connection *_smbd_get_connection(
+ init_waitqueue_head(&info->conn_wait);
+ init_waitqueue_head(&info->disconn_wait);
+ init_waitqueue_head(&info->wait_reassembly_queue);
+- rc = rdma_connect(info->id, &conn_param);
++ rc = rdma_connect(sc->rdma.cm_id, &conn_param);
+ if (rc) {
+ log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
+ goto rdma_connect_failed;
+ }
+
+ wait_event_interruptible(
+- info->conn_wait, info->transport_status != SMBD_CONNECTING);
++ info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
+
+- if (info->transport_status != SMBD_CONNECTED) {
++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+ goto rdma_connect_failed;
+ }
+@@ -1674,26 +1690,26 @@ static struct smbd_connection *_smbd_get_connection(
+ negotiation_failed:
+ cancel_delayed_work_sync(&info->idle_timer_work);
+ destroy_caches_and_workqueue(info);
+- info->transport_status = SMBD_NEGOTIATE_FAILED;
++ sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
+ init_waitqueue_head(&info->conn_wait);
+- rdma_disconnect(info->id);
++ rdma_disconnect(sc->rdma.cm_id);
+ wait_event(info->conn_wait,
+- info->transport_status == SMBD_DISCONNECTED);
++ sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+
+ allocate_cache_failed:
+ rdma_connect_failed:
+- rdma_destroy_qp(info->id);
++ rdma_destroy_qp(sc->rdma.cm_id);
+
+ create_qp_failed:
+ alloc_cq_failed:
+- if (info->send_cq)
+- ib_free_cq(info->send_cq);
+- if (info->recv_cq)
+- ib_free_cq(info->recv_cq);
++ if (sc->ib.send_cq)
++ ib_free_cq(sc->ib.send_cq);
++ if (sc->ib.recv_cq)
++ ib_free_cq(sc->ib.recv_cq);
+
+ config_failed:
+- ib_dealloc_pd(info->pd);
+- rdma_destroy_id(info->id);
++ ib_dealloc_pd(sc->ib.pd);
++ rdma_destroy_id(sc->rdma.cm_id);
+
+ create_id_failed:
+ kfree(info);
+@@ -1733,6 +1749,7 @@ struct smbd_connection *smbd_get_connection(
+ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ unsigned int size)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ struct smbd_response *response;
+ struct smbd_data_transfer *data_transfer;
+ int to_copy, to_read, data_read, offset;
+@@ -1847,12 +1864,12 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+ rc = wait_event_interruptible(
+ info->wait_reassembly_queue,
+ info->reassembly_data_length >= size ||
+- info->transport_status != SMBD_CONNECTED);
++ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ /* Don't return any data if interrupted */
+ if (rc)
+ return rc;
+
+- if (info->transport_status != SMBD_CONNECTED) {
++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_read(ERR, "disconnected\n");
+ return -ECONNABORTED;
+ }
+@@ -1870,6 +1887,7 @@ static int smbd_recv_page(struct smbd_connection *info,
+ struct page *page, unsigned int page_offset,
+ unsigned int to_read)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ int ret;
+ char *to_address;
+ void *page_address;
+@@ -1878,7 +1896,7 @@ static int smbd_recv_page(struct smbd_connection *info,
+ ret = wait_event_interruptible(
+ info->wait_reassembly_queue,
+ info->reassembly_data_length >= to_read ||
+- info->transport_status != SMBD_CONNECTED);
++ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ if (ret)
+ return ret;
+
+@@ -1953,12 +1971,13 @@ int smbd_send(struct TCP_Server_Info *server,
+ int num_rqst, struct smb_rqst *rqst_array)
+ {
+ struct smbd_connection *info = server->smbd_conn;
++ struct smbdirect_socket *sc = &info->socket;
+ struct smb_rqst *rqst;
+ struct iov_iter iter;
+ unsigned int remaining_data_length, klen;
+ int rc, i, rqst_idx;
+
+- if (info->transport_status != SMBD_CONNECTED)
++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
+ return -EAGAIN;
+
+ /*
+@@ -2052,6 +2071,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+ {
+ struct smbd_connection *info =
+ container_of(work, struct smbd_connection, mr_recovery_work);
++ struct smbdirect_socket *sc = &info->socket;
+ struct smbd_mr *smbdirect_mr;
+ int rc;
+
+@@ -2069,7 +2089,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+ }
+
+ smbdirect_mr->mr = ib_alloc_mr(
+- info->pd, info->mr_type,
++ sc->ib.pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+@@ -2098,12 +2118,13 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+
+ static void destroy_mr_list(struct smbd_connection *info)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ struct smbd_mr *mr, *tmp;
+
+ cancel_work_sync(&info->mr_recovery_work);
+ list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
+ if (mr->state == MR_INVALIDATED)
+- ib_dma_unmap_sg(info->id->device, mr->sgt.sgl,
++ ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
+ mr->sgt.nents, mr->dir);
+ ib_dereg_mr(mr->mr);
+ kfree(mr->sgt.sgl);
+@@ -2120,6 +2141,7 @@ static void destroy_mr_list(struct smbd_connection *info)
+ */
+ static int allocate_mr_list(struct smbd_connection *info)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ int i;
+ struct smbd_mr *smbdirect_mr, *tmp;
+
+@@ -2135,7 +2157,7 @@ static int allocate_mr_list(struct smbd_connection *info)
+ smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+ if (!smbdirect_mr)
+ goto cleanup_entries;
+- smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
++ smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
+ info->max_frmr_depth);
+ if (IS_ERR(smbdirect_mr->mr)) {
+ log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+@@ -2180,20 +2202,20 @@ static int allocate_mr_list(struct smbd_connection *info)
+ */
+ static struct smbd_mr *get_mr(struct smbd_connection *info)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ struct smbd_mr *ret;
+ int rc;
+ again:
+ rc = wait_event_interruptible(info->wait_mr,
+ atomic_read(&info->mr_ready_count) ||
+- info->transport_status != SMBD_CONNECTED);
++ sc->status != SMBDIRECT_SOCKET_CONNECTED);
+ if (rc) {
+ log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
+ return NULL;
+ }
+
+- if (info->transport_status != SMBD_CONNECTED) {
+- log_rdma_mr(ERR, "info->transport_status=%x\n",
+- info->transport_status);
++ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
++ log_rdma_mr(ERR, "sc->status=%x\n", sc->status);
+ return NULL;
+ }
+
+@@ -2246,6 +2268,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+ struct iov_iter *iter,
+ bool writing, bool need_invalidate)
+ {
++ struct smbdirect_socket *sc = &info->socket;
+ struct smbd_mr *smbdirect_mr;
+ int rc, num_pages;
+ enum dma_data_direction dir;
+@@ -2275,7 +2298,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+ num_pages, iov_iter_count(iter), info->max_frmr_depth);
+ smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
+
+- rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl,
++ rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
+ smbdirect_mr->sgt.nents, dir);
+ if (!rc) {
+ log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+@@ -2311,7 +2334,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+ * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
+ * on the next ib_post_send when we actaully send I/O to remote peer
+ */
+- rc = ib_post_send(info->id->qp, ®_wr->wr, NULL);
++ rc = ib_post_send(sc->ib.qp, ®_wr->wr, NULL);
+ if (!rc)
+ return smbdirect_mr;
+
+@@ -2320,7 +2343,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+
+ /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
+ map_mr_error:
+- ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl,
++ ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
+ smbdirect_mr->sgt.nents, smbdirect_mr->dir);
+
+ dma_map_error:
+@@ -2358,6 +2381,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+ {
+ struct ib_send_wr *wr;
+ struct smbd_connection *info = smbdirect_mr->conn;
++ struct smbdirect_socket *sc = &info->socket;
+ int rc = 0;
+
+ if (smbdirect_mr->need_invalidate) {
+@@ -2371,7 +2395,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+ wr->send_flags = IB_SEND_SIGNALED;
+
+ init_completion(&smbdirect_mr->invalidate_done);
+- rc = ib_post_send(info->id->qp, wr, NULL);
++ rc = ib_post_send(sc->ib.qp, wr, NULL);
+ if (rc) {
+ log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
+ smbd_disconnect_rdma_connection(info);
+@@ -2388,7 +2412,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+
+ if (smbdirect_mr->state == MR_INVALIDATED) {
+ ib_dma_unmap_sg(
+- info->id->device, smbdirect_mr->sgt.sgl,
++ sc->ib.dev, smbdirect_mr->sgt.sgl,
+ smbdirect_mr->sgt.nents,
+ smbdirect_mr->dir);
+ smbdirect_mr->state = MR_READY;
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index c08e3665150d..c881e58c639d 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -15,6 +15,8 @@
+ #include <rdma/rdma_cm.h>
+ #include <linux/mempool.h>
+
++#include "../common/smbdirect/smbdirect_socket.h"
++
+ extern int rdma_readwrite_threshold;
+ extern int smbd_max_frmr_depth;
+ extern int smbd_keep_alive_interval;
+@@ -50,14 +52,8 @@ enum smbd_connection_status {
+ * 5. mempools for allocating packets
+ */
+ struct smbd_connection {
+- enum smbd_connection_status transport_status;
+-
+- /* RDMA related */
+- struct rdma_cm_id *id;
+- struct ib_qp_init_attr qp_attr;
+- struct ib_pd *pd;
+- struct ib_cq *send_cq, *recv_cq;
+- struct ib_device_attr dev_attr;
++ struct smbdirect_socket socket;
++
+ int ri_rc;
+ struct completion ri_done;
+ wait_queue_head_t conn_wait;
+--
+2.39.5
+
--- /dev/null
+From 9ea0b5859a4238354556a5e094bcec004b1df514 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Aug 2025 18:12:11 +0200
+Subject: smb: client: return an error if rdma_connect does not return within 5
+ seconds
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 03537826f77f1c829d0593d211b38b9c876c1722 ]
+
+This matches the timeout for tcp connections.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: f198186aa9bb ("CIFS: SMBD: Establish SMB Direct connection")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index c7f4eb8c9a10..71aef565db5f 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1647,8 +1647,10 @@ static struct smbd_connection *_smbd_get_connection(
+ goto rdma_connect_failed;
+ }
+
+- wait_event_interruptible(
+- info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
++ wait_event_interruptible_timeout(
++ info->conn_wait,
++ sc->status != SMBDIRECT_SOCKET_CONNECTING,
++ msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT));
+
+ if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+ log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+--
+2.39.5
+
--- /dev/null
+From a27bbb10df966852ddffd898ef05f97a9c81cff5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Aug 2024 16:54:20 +0800
+Subject: smb: client: Use min() macro
+
+From: Shen Lichuan <shenlichuan@vivo.com>
+
+[ Upstream commit 25e68c37caf2b87c7dbcd99c54ec3102db7e4296 ]
+
+Use the min() macro to simplify the function and improve
+its readability.
+
+Signed-off-by: Shen Lichuan <shenlichuan@vivo.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 5349ae5e05fa ("smb: client: let send_done() cleanup before calling smbd_disconnect_rdma_connection()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifsacl.c | 2 +-
+ fs/smb/client/smbdirect.c | 6 ++----
+ 2 files changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
+index bf32bc22ebd6..7bd29e827c8f 100644
+--- a/fs/smb/client/cifsacl.c
++++ b/fs/smb/client/cifsacl.c
+@@ -187,7 +187,7 @@ compare_sids(const struct smb_sid *ctsid, const struct smb_sid *cwsid)
+ /* compare all of the subauth values if any */
+ num_sat = ctsid->num_subauth;
+ num_saw = cwsid->num_subauth;
+- num_subauth = num_sat < num_saw ? num_sat : num_saw;
++ num_subauth = min(num_sat, num_saw);
+ if (num_subauth) {
+ for (i = 0; i < num_subauth; ++i) {
+ if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) {
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index d74e829de51c..c41a44f4fc63 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1585,10 +1585,8 @@ static struct smbd_connection *_smbd_get_connection(
+ conn_param.initiator_depth = 0;
+
+ conn_param.responder_resources =
+- info->id->device->attrs.max_qp_rd_atom
+- < SMBD_CM_RESPONDER_RESOURCES ?
+- info->id->device->attrs.max_qp_rd_atom :
+- SMBD_CM_RESPONDER_RESOURCES;
++ min(info->id->device->attrs.max_qp_rd_atom,
++ SMBD_CM_RESPONDER_RESOURCES);
+ info->responder_resources = conn_param.responder_resources;
+ log_rdma_mr(INFO, "responder_resources=%d\n",
+ info->responder_resources);
+--
+2.39.5
+
--- /dev/null
+From f27fab220120a1838fb6c6fa6f674904a7990e54 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:53 +0200
+Subject: smb: server: let recv_done() avoid touching data_transfer after
+ cleanup/move
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit a6c015b7ac2d8c5233337e5793f50d04fac17669 ]
+
+Calling enqueue_reassembly() and wake_up_interruptible(&t->wait_reassembly_queue)
+or put_receive_buffer() means the recvmsg/data_transfer pointer might
+get re-used by another thread, which means these should be
+the last operations before calling return.
+
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index b22fe18212cf..6c3a57bff147 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -580,16 +580,11 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ else
+ t->full_packet_received = true;
+
+- enqueue_reassembly(t, recvmsg, (int)data_length);
+- wake_up_interruptible(&t->wait_reassembly_queue);
+-
+ spin_lock(&t->receive_credit_lock);
+ receive_credits = --(t->recv_credits);
+ avail_recvmsg_count = t->count_avail_recvmsg;
+ spin_unlock(&t->receive_credit_lock);
+ } else {
+- put_recvmsg(t, recvmsg);
+-
+ spin_lock(&t->receive_credit_lock);
+ receive_credits = --(t->recv_credits);
+ avail_recvmsg_count = ++(t->count_avail_recvmsg);
+@@ -611,6 +606,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+ mod_delayed_work(smb_direct_wq,
+ &t->post_recv_credits_work, 0);
++
++ if (data_length) {
++ enqueue_reassembly(t, recvmsg, (int)data_length);
++ wake_up_interruptible(&t->wait_reassembly_queue);
++ } else
++ put_recvmsg(t, recvmsg);
++
+ return;
+ }
+ }
+--
+2.39.5
+
--- /dev/null
+From e80bb5ebf058071a21e053077bc148fdbfbddebd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:52 +0200
+Subject: smb: server: let recv_done() consistently call
+ put_recvmsg/smb_direct_disconnect_rdma_connection
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit cfe76fdbb9729c650f3505d9cfb2f70ddda2dbdc ]
+
+We should call put_recvmsg() before smb_direct_disconnect_rdma_connection()
+in order to call it before waking up the callers.
+
+In all error cases we should call smb_direct_disconnect_rdma_connection()
+in order to avoid stale connections.
+
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index d28d85a46597..b22fe18212cf 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -520,13 +520,13 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ t = recvmsg->transport;
+
+ if (wc->status != IB_WC_SUCCESS || wc->opcode != IB_WC_RECV) {
++ put_recvmsg(t, recvmsg);
+ if (wc->status != IB_WC_WR_FLUSH_ERR) {
+ pr_err("Recv error. status='%s (%d)' opcode=%d\n",
+ ib_wc_status_msg(wc->status), wc->status,
+ wc->opcode);
+ smb_direct_disconnect_rdma_connection(t);
+ }
+- put_recvmsg(t, recvmsg);
+ return;
+ }
+
+@@ -541,6 +541,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+ if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
+ return;
+ }
+ t->negotiation_requested = true;
+@@ -548,7 +549,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ t->status = SMB_DIRECT_CS_CONNECTED;
+ enqueue_reassembly(t, recvmsg, 0);
+ wake_up_interruptible(&t->wait_status);
+- break;
++ return;
+ case SMB_DIRECT_MSG_DATA_TRANSFER: {
+ struct smb_direct_data_transfer *data_transfer =
+ (struct smb_direct_data_transfer *)recvmsg->packet;
+@@ -558,6 +559,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ if (wc->byte_len <
+ offsetof(struct smb_direct_data_transfer, padding)) {
+ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
+ return;
+ }
+
+@@ -566,6 +568,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+ (u64)data_length) {
+ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
+ return;
+ }
+
+@@ -608,11 +611,16 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ if (is_receive_credit_post_required(receive_credits, avail_recvmsg_count))
+ mod_delayed_work(smb_direct_wq,
+ &t->post_recv_credits_work, 0);
+- break;
++ return;
+ }
+- default:
+- break;
+ }
++
++ /*
++ * This is an internal error!
++ */
++ WARN_ON_ONCE(recvmsg->type != SMB_DIRECT_MSG_DATA_TRANSFER);
++ put_recvmsg(t, recvmsg);
++ smb_direct_disconnect_rdma_connection(t);
+ }
+
+ static int smb_direct_post_recv(struct smb_direct_transport *t,
+--
+2.39.5
+
--- /dev/null
+From 19a9384f6ff802dfc8dda8edb912d49685d9bdb6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:51 +0200
+Subject: smb: server: make sure we call ib_dma_unmap_single() only if we
+ called ib_dma_map_single already
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit afb4108c92898350e66b9a009692230bcdd2ac73 ]
+
+In case of failures either ib_dma_map_single() might not be called yet
+or ib_dma_unmap_single() was already called.
+
+We should make sure put_recvmsg() only calls ib_dma_unmap_single() if needed.
+
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index 228b7627a115..d28d85a46597 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -264,8 +264,13 @@ smb_direct_recvmsg *get_free_recvmsg(struct smb_direct_transport *t)
+ static void put_recvmsg(struct smb_direct_transport *t,
+ struct smb_direct_recvmsg *recvmsg)
+ {
+- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+- recvmsg->sge.length, DMA_FROM_DEVICE);
++ if (likely(recvmsg->sge.length != 0)) {
++ ib_dma_unmap_single(t->cm_id->device,
++ recvmsg->sge.addr,
++ recvmsg->sge.length,
++ DMA_FROM_DEVICE);
++ recvmsg->sge.length = 0;
++ }
+
+ spin_lock(&t->recvmsg_queue_lock);
+ list_add(&recvmsg->list, &t->recvmsg_queue);
+@@ -637,6 +642,7 @@ static int smb_direct_post_recv(struct smb_direct_transport *t,
+ ib_dma_unmap_single(t->cm_id->device,
+ recvmsg->sge.addr, recvmsg->sge.length,
+ DMA_FROM_DEVICE);
++ recvmsg->sge.length = 0;
+ smb_direct_disconnect_rdma_connection(t);
+ return ret;
+ }
+@@ -1818,6 +1824,7 @@ static int smb_direct_create_pools(struct smb_direct_transport *t)
+ if (!recvmsg)
+ goto err;
+ recvmsg->transport = t;
++ recvmsg->sge.length = 0;
+ list_add(&recvmsg->list, &t->recvmsg_queue);
+ }
+ t->count_avail_recvmsg = t->recv_credit_max;
+--
+2.39.5
+
--- /dev/null
+From 4dae5fd61dc8bc370f921e742e845787659aafe5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Aug 2025 14:15:50 +0200
+Subject: smb: server: remove separate empty_recvmsg_queue
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 01027a62b508c48c762096f347de925eedcbd008 ]
+
+There's no need to maintain two lists, we can just
+have a single list of receive buffers, which are free to use.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Fixes: 0626e6641f6b ("cifsd: add server handler for central processing and tranport layers")
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Acked-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/server/transport_rdma.c | 60 +++++-----------------------------
+ 1 file changed, 8 insertions(+), 52 deletions(-)
+
+diff --git a/fs/smb/server/transport_rdma.c b/fs/smb/server/transport_rdma.c
+index eaef45977615..228b7627a115 100644
+--- a/fs/smb/server/transport_rdma.c
++++ b/fs/smb/server/transport_rdma.c
+@@ -128,9 +128,6 @@ struct smb_direct_transport {
+ spinlock_t recvmsg_queue_lock;
+ struct list_head recvmsg_queue;
+
+- spinlock_t empty_recvmsg_queue_lock;
+- struct list_head empty_recvmsg_queue;
+-
+ int send_credit_target;
+ atomic_t send_credits;
+ spinlock_t lock_new_recv_credits;
+@@ -275,32 +272,6 @@ static void put_recvmsg(struct smb_direct_transport *t,
+ spin_unlock(&t->recvmsg_queue_lock);
+ }
+
+-static struct
+-smb_direct_recvmsg *get_empty_recvmsg(struct smb_direct_transport *t)
+-{
+- struct smb_direct_recvmsg *recvmsg = NULL;
+-
+- spin_lock(&t->empty_recvmsg_queue_lock);
+- if (!list_empty(&t->empty_recvmsg_queue)) {
+- recvmsg = list_first_entry(&t->empty_recvmsg_queue,
+- struct smb_direct_recvmsg, list);
+- list_del(&recvmsg->list);
+- }
+- spin_unlock(&t->empty_recvmsg_queue_lock);
+- return recvmsg;
+-}
+-
+-static void put_empty_recvmsg(struct smb_direct_transport *t,
+- struct smb_direct_recvmsg *recvmsg)
+-{
+- ib_dma_unmap_single(t->cm_id->device, recvmsg->sge.addr,
+- recvmsg->sge.length, DMA_FROM_DEVICE);
+-
+- spin_lock(&t->empty_recvmsg_queue_lock);
+- list_add_tail(&recvmsg->list, &t->empty_recvmsg_queue);
+- spin_unlock(&t->empty_recvmsg_queue_lock);
+-}
+-
+ static void enqueue_reassembly(struct smb_direct_transport *t,
+ struct smb_direct_recvmsg *recvmsg,
+ int data_length)
+@@ -385,9 +356,6 @@ static struct smb_direct_transport *alloc_transport(struct rdma_cm_id *cm_id)
+ spin_lock_init(&t->recvmsg_queue_lock);
+ INIT_LIST_HEAD(&t->recvmsg_queue);
+
+- spin_lock_init(&t->empty_recvmsg_queue_lock);
+- INIT_LIST_HEAD(&t->empty_recvmsg_queue);
+-
+ init_waitqueue_head(&t->wait_send_pending);
+ atomic_set(&t->send_pending, 0);
+
+@@ -553,7 +521,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ wc->opcode);
+ smb_direct_disconnect_rdma_connection(t);
+ }
+- put_empty_recvmsg(t, recvmsg);
++ put_recvmsg(t, recvmsg);
+ return;
+ }
+
+@@ -567,7 +535,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ switch (recvmsg->type) {
+ case SMB_DIRECT_MSG_NEGOTIATE_REQ:
+ if (wc->byte_len < sizeof(struct smb_direct_negotiate_req)) {
+- put_empty_recvmsg(t, recvmsg);
++ put_recvmsg(t, recvmsg);
+ return;
+ }
+ t->negotiation_requested = true;
+@@ -584,7 +552,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+
+ if (wc->byte_len <
+ offsetof(struct smb_direct_data_transfer, padding)) {
+- put_empty_recvmsg(t, recvmsg);
++ put_recvmsg(t, recvmsg);
+ return;
+ }
+
+@@ -592,7 +560,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ if (data_length) {
+ if (wc->byte_len < sizeof(struct smb_direct_data_transfer) +
+ (u64)data_length) {
+- put_empty_recvmsg(t, recvmsg);
++ put_recvmsg(t, recvmsg);
+ return;
+ }
+
+@@ -612,7 +580,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ avail_recvmsg_count = t->count_avail_recvmsg;
+ spin_unlock(&t->receive_credit_lock);
+ } else {
+- put_empty_recvmsg(t, recvmsg);
++ put_recvmsg(t, recvmsg);
+
+ spin_lock(&t->receive_credit_lock);
+ receive_credits = --(t->recv_credits);
+@@ -810,7 +778,6 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
+ struct smb_direct_recvmsg *recvmsg;
+ int receive_credits, credits = 0;
+ int ret;
+- int use_free = 1;
+
+ spin_lock(&t->receive_credit_lock);
+ receive_credits = t->recv_credits;
+@@ -818,18 +785,9 @@ static void smb_direct_post_recv_credits(struct work_struct *work)
+
+ if (receive_credits < t->recv_credit_target) {
+ while (true) {
+- if (use_free)
+- recvmsg = get_free_recvmsg(t);
+- else
+- recvmsg = get_empty_recvmsg(t);
+- if (!recvmsg) {
+- if (use_free) {
+- use_free = 0;
+- continue;
+- } else {
+- break;
+- }
+- }
++ recvmsg = get_free_recvmsg(t);
++ if (!recvmsg)
++ break;
+
+ recvmsg->type = SMB_DIRECT_MSG_DATA_TRANSFER;
+ recvmsg->first_segment = false;
+@@ -1805,8 +1763,6 @@ static void smb_direct_destroy_pools(struct smb_direct_transport *t)
+
+ while ((recvmsg = get_free_recvmsg(t)))
+ mempool_free(recvmsg, t->recvmsg_mempool);
+- while ((recvmsg = get_empty_recvmsg(t)))
+- mempool_free(recvmsg, t->recvmsg_mempool);
+
+ mempool_destroy(t->recvmsg_mempool);
+ t->recvmsg_mempool = NULL;
+--
+2.39.5
+
--- /dev/null
+From 3c531337129e7e140b4fd1cdca36fb4abce3703f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:36 +0200
+Subject: smb: smbdirect: add smbdirect_socket.h
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 22234e37d7e97652cb53133009da5e14793d3c10 ]
+
+This abstracts the common smbdirect layer.
+
+Currently with just a few things in it,
+but that will change over time until everything is
+in common.
+
+Will be used in client and server in the next commits
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 5349ae5e05fa ("smb: client: let send_done() cleanup before calling smbd_disconnect_rdma_connection()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/common/smbdirect/smbdirect_socket.h | 41 ++++++++++++++++++++++
+ 1 file changed, 41 insertions(+)
+ create mode 100644 fs/smb/common/smbdirect/smbdirect_socket.h
+
+diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
+new file mode 100644
+index 000000000000..69a55561f91a
+--- /dev/null
++++ b/fs/smb/common/smbdirect/smbdirect_socket.h
+@@ -0,0 +1,41 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ * Copyright (c) 2025 Stefan Metzmacher
++ */
++
++#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
++#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
++
++enum smbdirect_socket_status {
++ SMBDIRECT_SOCKET_CREATED,
++ SMBDIRECT_SOCKET_CONNECTING,
++ SMBDIRECT_SOCKET_CONNECTED,
++ SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
++ SMBDIRECT_SOCKET_DISCONNECTING,
++ SMBDIRECT_SOCKET_DISCONNECTED,
++ SMBDIRECT_SOCKET_DESTROYED
++};
++
++struct smbdirect_socket {
++ enum smbdirect_socket_status status;
++
++ /* RDMA related */
++ struct {
++ struct rdma_cm_id *cm_id;
++ } rdma;
++
++ /* IB verbs related */
++ struct {
++ struct ib_pd *pd;
++ struct ib_cq *send_cq;
++ struct ib_cq *recv_cq;
++
++ /*
++ * shortcuts for rdma.cm_id->{qp,device};
++ */
++ struct ib_qp *qp;
++ struct ib_device *dev;
++ } ib;
++};
++
++#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
+--
+2.39.5
+
--- /dev/null
+From cc17b21883fd0436d19733a4ddfe5557ecd8fccb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Jul 2025 14:00:56 -0400
+Subject: sunrpc: fix client side handling of tls alerts
+
+From: Olga Kornievskaia <okorniev@redhat.com>
+
+[ Upstream commit cc5d59081fa26506d02de2127ab822f40d88bc5a ]
+
+A security exploit was discovered in NFS over TLS in tls_alert_recv
+due to its assumption that there is valid data in the msghdr's
+iterator's kvec.
+
+Instead, this patch proposes the rework how control messages are
+setup and used by sock_recvmsg().
+
+If no control message structure is setup, kTLS layer will read and
+process TLS data record types. As soon as it encounters a TLS control
+message, it would return an error. At that point, NFS can setup a kvec
+backed control buffer and read in the control message such as a TLS
+alert. Scott found that a msg iterator can advance the kvec pointer
+as a part of the copy process thus we need to revert the iterator
+before calling into the tls_alert_recv.
+
+Fixes: dea034b963c8 ("SUNRPC: Capture CMSG metadata on client-side receive")
+Suggested-by: Trond Myklebust <trondmy@hammerspace.com>
+Suggested-by: Scott Mayhew <smayhew@redhat.com>
+Signed-off-by: Olga Kornievskaia <okorniev@redhat.com>
+Link: https://lore.kernel.org/r/20250731180058.4669-3-okorniev@redhat.com
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtsock.c | 40 ++++++++++++++++++++++++++++++----------
+ 1 file changed, 30 insertions(+), 10 deletions(-)
+
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index f90d84492bbe..99bb3e762af4 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -358,7 +358,7 @@ xs_alloc_sparse_pages(struct xdr_buf *buf, size_t want, gfp_t gfp)
+
+ static int
+ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+- struct cmsghdr *cmsg, int ret)
++ unsigned int *msg_flags, struct cmsghdr *cmsg, int ret)
+ {
+ u8 content_type = tls_get_record_type(sock->sk, cmsg);
+ u8 level, description;
+@@ -371,7 +371,7 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+ * record, even though there might be more frames
+ * waiting to be decrypted.
+ */
+- msg->msg_flags &= ~MSG_EOR;
++ *msg_flags &= ~MSG_EOR;
+ break;
+ case TLS_RECORD_TYPE_ALERT:
+ tls_alert_recv(sock->sk, msg, &level, &description);
+@@ -386,19 +386,33 @@ xs_sock_process_cmsg(struct socket *sock, struct msghdr *msg,
+ }
+
+ static int
+-xs_sock_recv_cmsg(struct socket *sock, struct msghdr *msg, int flags)
++xs_sock_recv_cmsg(struct socket *sock, unsigned int *msg_flags, int flags)
+ {
+ union {
+ struct cmsghdr cmsg;
+ u8 buf[CMSG_SPACE(sizeof(u8))];
+ } u;
++ u8 alert[2];
++ struct kvec alert_kvec = {
++ .iov_base = alert,
++ .iov_len = sizeof(alert),
++ };
++ struct msghdr msg = {
++ .msg_flags = *msg_flags,
++ .msg_control = &u,
++ .msg_controllen = sizeof(u),
++ };
+ int ret;
+
+- msg->msg_control = &u;
+- msg->msg_controllen = sizeof(u);
+- ret = sock_recvmsg(sock, msg, flags);
+- if (msg->msg_controllen != sizeof(u))
+- ret = xs_sock_process_cmsg(sock, msg, &u.cmsg, ret);
++ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &alert_kvec, 1,
++ alert_kvec.iov_len);
++ ret = sock_recvmsg(sock, &msg, flags);
++ if (ret > 0 &&
++ tls_get_record_type(sock->sk, &u.cmsg) == TLS_RECORD_TYPE_ALERT) {
++ iov_iter_revert(&msg.msg_iter, ret);
++ ret = xs_sock_process_cmsg(sock, &msg, msg_flags, &u.cmsg,
++ -EAGAIN);
++ }
+ return ret;
+ }
+
+@@ -408,7 +422,13 @@ xs_sock_recvmsg(struct socket *sock, struct msghdr *msg, int flags, size_t seek)
+ ssize_t ret;
+ if (seek != 0)
+ iov_iter_advance(&msg->msg_iter, seek);
+- ret = xs_sock_recv_cmsg(sock, msg, flags);
++ ret = sock_recvmsg(sock, msg, flags);
++ /* Handle TLS inband control message lazily */
++ if (msg->msg_flags & MSG_CTRUNC) {
++ msg->msg_flags &= ~(MSG_CTRUNC | MSG_EOR);
++ if (ret == 0 || ret == -EIO)
++ ret = xs_sock_recv_cmsg(sock, &msg->msg_flags, flags);
++ }
+ return ret > 0 ? ret + seek : ret;
+ }
+
+@@ -434,7 +454,7 @@ xs_read_discard(struct socket *sock, struct msghdr *msg, int flags,
+ size_t count)
+ {
+ iov_iter_discard(&msg->msg_iter, ITER_DEST, count);
+- return xs_sock_recv_cmsg(sock, msg, flags);
++ return xs_sock_recvmsg(sock, msg, flags, 0);
+ }
+
+ #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
+--
+2.39.5
+