]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.12
authorSasha Levin <sashal@kernel.org>
Sun, 29 Jun 2025 14:24:07 +0000 (10:24 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 29 Jun 2025 14:24:07 +0000 (10:24 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
38 files changed:
queue-6.12/af_unix-don-t-set-econnreset-for-consumed-oob-skb.patch [new file with mode: 0644]
queue-6.12/alsa-hda-realtek-fix-built-in-mic-on-asus-vivobook-x.patch [new file with mode: 0644]
queue-6.12/alsa-usb-audio-fix-out-of-bounds-read-in-snd_usb_get.patch [new file with mode: 0644]
queue-6.12/atm-clip-prevent-null-deref-in-clip_push.patch [new file with mode: 0644]
queue-6.12/atm-release-atm_dev_mutex-after-removing-procfs-in-a.patch [new file with mode: 0644]
queue-6.12/attach_recursive_mnt-do-not-lock-the-covering-tree-w.patch [new file with mode: 0644]
queue-6.12/bluetooth-hci_core-fix-use-after-free-in-vhci_flush.patch [new file with mode: 0644]
queue-6.12/bnxt-properly-flush-xdp-redirect-lists.patch [new file with mode: 0644]
queue-6.12/cifs-fix-reading-into-an-iter_folioq-from-the-smbdir.patch [new file with mode: 0644]
queue-6.12/cifs-fix-the-smbd_response-slab-to-allow-usercopy.patch [new file with mode: 0644]
queue-6.12/drm-amd-adjust-output-for-discovery-error-handling.patch [new file with mode: 0644]
queue-6.12/drm-amdgpu-discovery-optionally-use-fw-based-ip-disc.patch [new file with mode: 0644]
queue-6.12/drm-bridge-ti-sn65dsi86-add-hpd-for-displayport-conn.patch [new file with mode: 0644]
queue-6.12/drm-bridge-ti-sn65dsi86-make-use-of-debugfs_init-cal.patch [new file with mode: 0644]
queue-6.12/drm-i915-fix-build-error-some-more.patch [new file with mode: 0644]
queue-6.12/drm-xe-process-deferred-ggtt-node-removals-on-device.patch [new file with mode: 0644]
queue-6.12/ethernet-ionic-fix-dma-mapping-tests.patch [new file with mode: 0644]
queue-6.12/hid-wacom-fix-crash-in-wacom_aes_battery_handler.patch [new file with mode: 0644]
queue-6.12/libbpf-fix-null-pointer-dereference-in-btf_dump__fre.patch [new file with mode: 0644]
queue-6.12/libbpf-fix-possible-use-after-free-for-externs.patch [new file with mode: 0644]
queue-6.12/net-enetc-correct-endianness-handling-in-_enetc_rd_r.patch [new file with mode: 0644]
queue-6.12/net-selftests-fix-tcp-packet-checksum.patch [new file with mode: 0644]
queue-6.12/netlink-specs-tc-replace-underscores-with-dashes-in-.patch [new file with mode: 0644]
queue-6.12/series
queue-6.12/smb-client-fix-potential-deadlock-when-reconnecting-.patch [new file with mode: 0644]
queue-6.12/smb-client-make-use-of-common-smbdirect_pdu.h.patch [new file with mode: 0644]
queue-6.12/smb-client-make-use-of-common-smbdirect_socket.patch [new file with mode: 0644]
queue-6.12/smb-client-make-use-of-common-smbdirect_socket_param.patch [new file with mode: 0644]
queue-6.12/smb-smbdirect-add-smbdirect.h-with-public-structures.patch [new file with mode: 0644]
queue-6.12/smb-smbdirect-add-smbdirect_pdu.h-with-protocol-defi.patch [new file with mode: 0644]
queue-6.12/smb-smbdirect-add-smbdirect_socket.h.patch [new file with mode: 0644]
queue-6.12/smb-smbdirect-introduce-smbdirect_socket_parameters.patch [new file with mode: 0644]
queue-6.12/um-ubd-add-missing-error-check-in-start_io_thread.patch [new file with mode: 0644]
queue-6.12/vsock-uapi-fix-linux-vm_sockets.h-userspace-compilat.patch [new file with mode: 0644]
queue-6.12/wifi-mac80211-add-link-iteration-macro-for-link-data.patch [new file with mode: 0644]
queue-6.12/wifi-mac80211-create-separate-links-for-vlan-interfa.patch [new file with mode: 0644]
queue-6.12/wifi-mac80211-finish-link-init-before-rcu-publish.patch [new file with mode: 0644]
queue-6.12/wifi-mac80211-fix-beacon-interval-calculation-overfl.patch [new file with mode: 0644]

diff --git a/queue-6.12/af_unix-don-t-set-econnreset-for-consumed-oob-skb.patch b/queue-6.12/af_unix-don-t-set-econnreset-for-consumed-oob-skb.patch
new file mode 100644 (file)
index 0000000..d60a4ab
--- /dev/null
@@ -0,0 +1,99 @@
+From 876181dcd8355cf3a1db1c93ae359f17dff0d561 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Jun 2025 21:13:57 -0700
+Subject: af_unix: Don't set -ECONNRESET for consumed OOB skb.
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 2a5a4841846b079b5fca5752fe94e59346fbda40 ]
+
+Christian Brauner reported that even after MSG_OOB data is consumed,
+calling close() on the receiver socket causes the peer's recv() to
+return -ECONNRESET:
+
+  1. send() and recv() an OOB data.
+
+    >>> from socket import *
+    >>> s1, s2 = socketpair(AF_UNIX, SOCK_STREAM)
+    >>> s1.send(b'x', MSG_OOB)
+    1
+    >>> s2.recv(1, MSG_OOB)
+    b'x'
+
+  2. close() for s2 sets ECONNRESET to s1->sk_err even though
+     s2 consumed the OOB data
+
+    >>> s2.close()
+    >>> s1.recv(10, MSG_DONTWAIT)
+    ...
+    ConnectionResetError: [Errno 104] Connection reset by peer
+
+Even after being consumed, the skb holding the OOB 1-byte data stays in
+the recv queue to mark the OOB boundary and break recv() at that point.
+
+This must be considered while close()ing a socket.
+
+Let's skip the leading consumed OOB skb while checking the -ECONNRESET
+condition in unix_release_sock().
+
+Fixes: 314001f0bf92 ("af_unix: Add OOB support")
+Reported-by: Christian Brauner <brauner@kernel.org>
+Closes: https://lore.kernel.org/netdev/20250529-sinkt-abfeuern-e7b08200c6b0@brauner/
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Acked-by: Christian Brauner <brauner@kernel.org>
+Link: https://patch.msgid.link/20250619041457.1132791-4-kuni1840@gmail.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2dfd3b70a7178..45f8e21829ecd 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -666,6 +666,11 @@ static void unix_sock_destructor(struct sock *sk)
+ #endif
+ }
++static unsigned int unix_skb_len(const struct sk_buff *skb)
++{
++      return skb->len - UNIXCB(skb).consumed;
++}
++
+ static void unix_release_sock(struct sock *sk, int embrion)
+ {
+       struct unix_sock *u = unix_sk(sk);
+@@ -700,10 +705,16 @@ static void unix_release_sock(struct sock *sk, int embrion)
+       if (skpair != NULL) {
+               if (sk->sk_type == SOCK_STREAM || sk->sk_type == SOCK_SEQPACKET) {
++                      struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
++
++#if IS_ENABLED(CONFIG_AF_UNIX_OOB)
++                      if (skb && !unix_skb_len(skb))
++                              skb = skb_peek_next(skb, &sk->sk_receive_queue);
++#endif
+                       unix_state_lock(skpair);
+                       /* No more writes */
+                       WRITE_ONCE(skpair->sk_shutdown, SHUTDOWN_MASK);
+-                      if (!skb_queue_empty_lockless(&sk->sk_receive_queue) || embrion)
++                      if (skb || embrion)
+                               WRITE_ONCE(skpair->sk_err, ECONNRESET);
+                       unix_state_unlock(skpair);
+                       skpair->sk_state_change(skpair);
+@@ -2594,11 +2605,6 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
+       return timeo;
+ }
+-static unsigned int unix_skb_len(const struct sk_buff *skb)
+-{
+-      return skb->len - UNIXCB(skb).consumed;
+-}
+-
+ struct unix_stream_read_state {
+       int (*recv_actor)(struct sk_buff *, int, int,
+                         struct unix_stream_read_state *);
+-- 
+2.39.5
+
diff --git a/queue-6.12/alsa-hda-realtek-fix-built-in-mic-on-asus-vivobook-x.patch b/queue-6.12/alsa-hda-realtek-fix-built-in-mic-on-asus-vivobook-x.patch
new file mode 100644 (file)
index 0000000..c8bdcc9
--- /dev/null
@@ -0,0 +1,39 @@
+From 56c4baa674cb8c4459d249956f42ca90d924aba2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jun 2025 20:41:28 +0200
+Subject: ALSA: hda/realtek: Fix built-in mic on ASUS VivoBook X507UAR
+
+From: Salvatore Bonaccorso <carnil@debian.org>
+
+[ Upstream commit 7ab6847a03229e73bb7c58ca397630f699e79b53 ]
+
+The built-in mic of ASUS VivoBook X507UAR is broken recently by the fix
+of the pin sort. The fixup ALC256_FIXUP_ASUS_MIC_NO_PRESENCE is working
+for addressing the regression, too.
+
+Fixes: 3b4309546b48 ("ALSA: hda: Fix headset detection failure due to unstable sort")
+Reported-by: Igor Tamara <igor.tamara@gmail.com>
+Closes: https://bugs.debian.org/1108069
+Signed-off-by: Salvatore Bonaccorso <carnil@debian.org>
+Link: https://lore.kernel.org/CADdHDco7_o=4h_epjEAb92Dj-vUz_PoTC2-W9g5ncT2E0NzfeQ@mail.gmail.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index cb41cd2ba0ef1..94c5151c456d6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -10933,6 +10933,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x1da2, "ASUS UP6502ZA/ZD", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x1df3, "ASUS UM5606WA", ALC294_FIXUP_BASS_SPEAKER_15),
+       SND_PCI_QUIRK(0x1043, 0x1e02, "ASUS UX3402ZA", ALC245_FIXUP_CS35L41_SPI_2),
++      SND_PCI_QUIRK(0x1043, 0x1e10, "ASUS VivoBook X507UAR", ALC256_FIXUP_ASUS_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1043, 0x1e11, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA502),
+       SND_PCI_QUIRK(0x1043, 0x1e12, "ASUS UM3402", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x1043, 0x1e1f, "ASUS Vivobook 15 X1504VAP", ALC2XX_FIXUP_HEADSET_MIC),
+-- 
+2.39.5
+
diff --git a/queue-6.12/alsa-usb-audio-fix-out-of-bounds-read-in-snd_usb_get.patch b/queue-6.12/alsa-usb-audio-fix-out-of-bounds-read-in-snd_usb_get.patch
new file mode 100644 (file)
index 0000000..4139c99
--- /dev/null
@@ -0,0 +1,47 @@
+From 8b2401d08f13f5faa261bde98eaef3b29e3cc8fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Jun 2025 20:05:25 +0900
+Subject: ALSA: usb-audio: Fix out-of-bounds read in
+ snd_usb_get_audioformat_uac3()
+
+From: Youngjun Lee <yjjuny.lee@samsung.com>
+
+[ Upstream commit fb4e2a6e8f28a3c0ad382e363aeb9cd822007b8a ]
+
+In snd_usb_get_audioformat_uac3(), the length value returned from
+snd_usb_ctl_msg() is used directly for memory allocation without
+validation. This length is controlled by the USB device.
+
+The allocated buffer is cast to a uac3_cluster_header_descriptor
+and its fields are accessed without verifying that the buffer
+is large enough. If the device returns a smaller than expected
+length, this leads to an out-of-bounds read.
+
+Add a length check to ensure the buffer is large enough for
+uac3_cluster_header_descriptor.
+
+Signed-off-by: Youngjun Lee <yjjuny.lee@samsung.com>
+Fixes: 9a2fe9b801f5 ("ALSA: usb: initial USB Audio Device Class 3.0 support")
+Link: https://patch.msgid.link/20250623-uac3-oob-fix-v1-1-527303eaf40a@samsung.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/stream.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/usb/stream.c b/sound/usb/stream.c
+index c1ea8844a46fc..aa91d63749f2c 100644
+--- a/sound/usb/stream.c
++++ b/sound/usb/stream.c
+@@ -987,6 +987,8 @@ snd_usb_get_audioformat_uac3(struct snd_usb_audio *chip,
+        * and request Cluster Descriptor
+        */
+       wLength = le16_to_cpu(hc_header.wLength);
++      if (wLength < sizeof(cluster))
++              return NULL;
+       cluster = kzalloc(wLength, GFP_KERNEL);
+       if (!cluster)
+               return ERR_PTR(-ENOMEM);
+-- 
+2.39.5
+
diff --git a/queue-6.12/atm-clip-prevent-null-deref-in-clip_push.patch b/queue-6.12/atm-clip-prevent-null-deref-in-clip_push.patch
new file mode 100644 (file)
index 0000000..ba8dd00
--- /dev/null
@@ -0,0 +1,60 @@
+From 521a63a9e957fdb5b497b39562d827f427ff17ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jun 2025 14:28:44 +0000
+Subject: atm: clip: prevent NULL deref in clip_push()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit b993ea46b3b601915ceaaf3c802adf11e7d6bac6 ]
+
+Blamed commit missed that vcc_destroy_socket() calls
+clip_push() with a NULL skb.
+
+If clip_devs is NULL, clip_push() then crashes when reading
+skb->truesize.
+
+Fixes: 93a2014afbac ("atm: fix a UAF in lec_arp_clear_vccs()")
+Reported-by: syzbot+1316233c4c6803382a8b@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/68556f59.a00a0220.137b3.004e.GAE@google.com/T/#u
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Cong Wang <xiyou.wangcong@gmail.com>
+Cc: Gengming Liu <l.dmxcsnsbh@gmail.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/clip.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/net/atm/clip.c b/net/atm/clip.c
+index 42b910cb4e8ee..0d7744442b25a 100644
+--- a/net/atm/clip.c
++++ b/net/atm/clip.c
+@@ -193,12 +193,6 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
+       pr_debug("\n");
+-      if (!clip_devs) {
+-              atm_return(vcc, skb->truesize);
+-              kfree_skb(skb);
+-              return;
+-      }
+-
+       if (!skb) {
+               pr_debug("removing VCC %p\n", clip_vcc);
+               if (clip_vcc->entry)
+@@ -208,6 +202,11 @@ static void clip_push(struct atm_vcc *vcc, struct sk_buff *skb)
+               return;
+       }
+       atm_return(vcc, skb->truesize);
++      if (!clip_devs) {
++              kfree_skb(skb);
++              return;
++      }
++
+       skb->dev = clip_vcc->entry ? clip_vcc->entry->neigh->dev : clip_devs;
+       /* clip_vcc->entry == NULL if we don't have an IP address yet */
+       if (!skb->dev) {
+-- 
+2.39.5
+
diff --git a/queue-6.12/atm-release-atm_dev_mutex-after-removing-procfs-in-a.patch b/queue-6.12/atm-release-atm_dev_mutex-after-removing-procfs-in-a.patch
new file mode 100644 (file)
index 0000000..f215e97
--- /dev/null
@@ -0,0 +1,106 @@
+From 0333279f28d1cf9b23d6d3a9e89e61c86b6b8314 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jun 2025 14:45:00 -0700
+Subject: atm: Release atm_dev_mutex after removing procfs in
+ atm_dev_deregister().
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit a433791aeaea6e84df709e0b9584b9bbe040cd1c ]
+
+syzbot reported a warning below during atm_dev_register(). [0]
+
+Before creating a new device and procfs/sysfs for it, atm_dev_register()
+looks up a duplicated device by __atm_dev_lookup().  These operations are
+done under atm_dev_mutex.
+
+However, when removing a device in atm_dev_deregister(), it releases the
+mutex just after removing the device from the list that __atm_dev_lookup()
+iterates over.
+
+So, there will be a small race window where the device does not exist on
+the device list but procfs/sysfs are still not removed, triggering the
+splat.
+
+Let's hold the mutex until procfs/sysfs are removed in
+atm_dev_deregister().
+
+[0]:
+proc_dir_entry 'atm/atmtcp:0' already registered
+WARNING: CPU: 0 PID: 5919 at fs/proc/generic.c:377 proc_register+0x455/0x5f0 fs/proc/generic.c:377
+Modules linked in:
+CPU: 0 UID: 0 PID: 5919 Comm: syz-executor284 Not tainted 6.16.0-rc2-syzkaller-00047-g52da431bf03b #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/07/2025
+RIP: 0010:proc_register+0x455/0x5f0 fs/proc/generic.c:377
+Code: 48 89 f9 48 c1 e9 03 80 3c 01 00 0f 85 a2 01 00 00 48 8b 44 24 10 48 c7 c7 20 c0 c2 8b 48 8b b0 d8 00 00 00 e8 0c 02 1c ff 90 <0f> 0b 90 90 48 c7 c7 80 f2 82 8e e8 0b de 23 09 48 8b 4c 24 28 48
+RSP: 0018:ffffc9000466fa30 EFLAGS: 00010282
+RAX: 0000000000000000 RBX: 0000000000000000 RCX: ffffffff817ae248
+RDX: ffff888026280000 RSI: ffffffff817ae255 RDI: 0000000000000001
+RBP: ffff8880232bed48 R08: 0000000000000001 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000001 R12: ffff888076ed2140
+R13: dffffc0000000000 R14: ffff888078a61340 R15: ffffed100edda444
+FS:  00007f38b3b0c6c0(0000) GS:ffff888124753000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f38b3bdf953 CR3: 0000000076d58000 CR4: 00000000003526f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ proc_create_data+0xbe/0x110 fs/proc/generic.c:585
+ atm_proc_dev_register+0x112/0x1e0 net/atm/proc.c:361
+ atm_dev_register+0x46d/0x890 net/atm/resources.c:113
+ atmtcp_create+0x77/0x210 drivers/atm/atmtcp.c:369
+ atmtcp_attach drivers/atm/atmtcp.c:403 [inline]
+ atmtcp_ioctl+0x2f9/0xd60 drivers/atm/atmtcp.c:464
+ do_vcc_ioctl+0x12c/0x930 net/atm/ioctl.c:159
+ sock_do_ioctl+0x115/0x280 net/socket.c:1190
+ sock_ioctl+0x227/0x6b0 net/socket.c:1311
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:907 [inline]
+ __se_sys_ioctl fs/ioctl.c:893 [inline]
+ __x64_sys_ioctl+0x18b/0x210 fs/ioctl.c:893
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xcd/0x4c0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7f38b3b74459
+Code: 28 00 00 00 75 05 48 83 c4 28 c3 e8 51 18 00 00 90 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007f38b3b0c198 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007f38b3bfe318 RCX: 00007f38b3b74459
+RDX: 0000000000000000 RSI: 0000000000006180 RDI: 0000000000000005
+RBP: 00007f38b3bfe310 R08: 65732f636f72702f R09: 65732f636f72702f
+R10: 65732f636f72702f R11: 0000000000000246 R12: 00007f38b3bcb0ac
+R13: 00007f38b3b0c1a0 R14: 0000200000000200 R15: 00007f38b3bcb03b
+ </TASK>
+
+Fixes: 64bf69ddff76 ("[ATM]: deregistration removes device from atm_devs list immediately")
+Reported-by: syzbot+8bd335d2ad3b93e80715@syzkaller.appspotmail.com
+Closes: https://lore.kernel.org/netdev/685316de.050a0220.216029.0087.GAE@google.com/
+Tested-by: syzbot+8bd335d2ad3b93e80715@syzkaller.appspotmail.com
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Link: https://patch.msgid.link/20250624214505.570679-1-kuni1840@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/resources.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/net/atm/resources.c b/net/atm/resources.c
+index 995d29e7fb138..b19d851e1f443 100644
+--- a/net/atm/resources.c
++++ b/net/atm/resources.c
+@@ -146,11 +146,10 @@ void atm_dev_deregister(struct atm_dev *dev)
+        */
+       mutex_lock(&atm_dev_mutex);
+       list_del(&dev->dev_list);
+-      mutex_unlock(&atm_dev_mutex);
+-
+       atm_dev_release_vccs(dev);
+       atm_unregister_sysfs(dev);
+       atm_proc_dev_deregister(dev);
++      mutex_unlock(&atm_dev_mutex);
+       atm_dev_put(dev);
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/attach_recursive_mnt-do-not-lock-the-covering-tree-w.patch b/queue-6.12/attach_recursive_mnt-do-not-lock-the-covering-tree-w.patch
new file mode 100644 (file)
index 0000000..a629bae
--- /dev/null
@@ -0,0 +1,51 @@
+From 17e857ecab67e9fa97a212c83b1ce1f7c1ed1392 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 22 Jun 2025 18:03:29 -0400
+Subject: attach_recursive_mnt(): do not lock the covering tree when sliding
+ something under it
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+[ Upstream commit ce7df19686530920f2f6b636e71ce5eb1d9303ef ]
+
+If we are propagating across the userns boundary, we need to lock the
+mounts added there.  However, in case when something has already
+been mounted there and we end up sliding a new tree under that,
+the stuff that had been there before should not get locked.
+
+IOW, lock_mnt_tree() should be called before we reparent the
+preexisting tree on top of what we are adding.
+
+Fixes: 3bd045cc9c4b ("separate copying and locking mount tree on cross-userns copies")
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/namespace.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/fs/namespace.c b/fs/namespace.c
+index 843bc6191f30b..b5c5cf01d0c40 100644
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2521,14 +2521,14 @@ static int attach_recursive_mnt(struct mount *source_mnt,
+       hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
+               struct mount *q;
+               hlist_del_init(&child->mnt_hash);
+-              q = __lookup_mnt(&child->mnt_parent->mnt,
+-                               child->mnt_mountpoint);
+-              if (q)
+-                      mnt_change_mountpoint(child, smp, q);
+               /* Notice when we are propagating across user namespaces */
+               if (child->mnt_parent->mnt_ns->user_ns != user_ns)
+                       lock_mnt_tree(child);
+               child->mnt.mnt_flags &= ~MNT_LOCKED;
++              q = __lookup_mnt(&child->mnt_parent->mnt,
++                               child->mnt_mountpoint);
++              if (q)
++                      mnt_change_mountpoint(child, smp, q);
+               commit_tree(child);
+       }
+       put_mountpoint(smp);
+-- 
+2.39.5
+
diff --git a/queue-6.12/bluetooth-hci_core-fix-use-after-free-in-vhci_flush.patch b/queue-6.12/bluetooth-hci_core-fix-use-after-free-in-vhci_flush.patch
new file mode 100644 (file)
index 0000000..9d2b2ae
--- /dev/null
@@ -0,0 +1,252 @@
+From aa6b340121729bd728cfcce3d5cebbc6247e6571 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Jun 2025 09:58:13 -0700
+Subject: Bluetooth: hci_core: Fix use-after-free in vhci_flush()
+
+From: Kuniyuki Iwashima <kuniyu@google.com>
+
+[ Upstream commit 1d6123102e9fbedc8d25bf4731da6d513173e49e ]
+
+syzbot reported use-after-free in vhci_flush() without repro. [0]
+
+From the splat, a thread close()d a vhci file descriptor while
+its device was being used by iotcl() on another thread.
+
+Once the last fd refcnt is released, vhci_release() calls
+hci_unregister_dev(), hci_free_dev(), and kfree() for struct
+vhci_data, which is set to hci_dev->dev->driver_data.
+
+The problem is that there is no synchronisation after unlinking
+hdev from hci_dev_list in hci_unregister_dev().  There might be
+another thread still accessing the hdev which was fetched before
+the unlink operation.
+
+We can use SRCU for such synchronisation.
+
+Let's run hci_dev_reset() under SRCU and wait for its completion
+in hci_unregister_dev().
+
+Another option would be to restore hci_dev->destruct(), which was
+removed in commit 587ae086f6e4 ("Bluetooth: Remove unused
+hci-destruct cb").  However, this would not be a good solution, as
+we should not run hci_unregister_dev() while there are in-flight
+ioctl() requests, which could lead to another data-race KCSAN splat.
+
+Note that other drivers seem to have the same problem, for exmaple,
+virtbt_remove().
+
+[0]:
+BUG: KASAN: slab-use-after-free in skb_queue_empty_lockless include/linux/skbuff.h:1891 [inline]
+BUG: KASAN: slab-use-after-free in skb_queue_purge_reason+0x99/0x360 net/core/skbuff.c:3937
+Read of size 8 at addr ffff88807cb8d858 by task syz.1.219/6718
+
+CPU: 1 UID: 0 PID: 6718 Comm: syz.1.219 Not tainted 6.16.0-rc1-syzkaller-00196-g08207f42d3ff #0 PREEMPT(full)
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/07/2025
+Call Trace:
+ <TASK>
+ dump_stack_lvl+0x189/0x250 lib/dump_stack.c:120
+ print_address_description mm/kasan/report.c:408 [inline]
+ print_report+0xd2/0x2b0 mm/kasan/report.c:521
+ kasan_report+0x118/0x150 mm/kasan/report.c:634
+ skb_queue_empty_lockless include/linux/skbuff.h:1891 [inline]
+ skb_queue_purge_reason+0x99/0x360 net/core/skbuff.c:3937
+ skb_queue_purge include/linux/skbuff.h:3368 [inline]
+ vhci_flush+0x44/0x50 drivers/bluetooth/hci_vhci.c:69
+ hci_dev_do_reset net/bluetooth/hci_core.c:552 [inline]
+ hci_dev_reset+0x420/0x5c0 net/bluetooth/hci_core.c:592
+ sock_do_ioctl+0xd9/0x300 net/socket.c:1190
+ sock_ioctl+0x576/0x790 net/socket.c:1311
+ vfs_ioctl fs/ioctl.c:51 [inline]
+ __do_sys_ioctl fs/ioctl.c:907 [inline]
+ __se_sys_ioctl+0xf9/0x170 fs/ioctl.c:893
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+RIP: 0033:0x7fcf5b98e929
+Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 a8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007fcf5c7b9038 EFLAGS: 00000246 ORIG_RAX: 0000000000000010
+RAX: ffffffffffffffda RBX: 00007fcf5bbb6160 RCX: 00007fcf5b98e929
+RDX: 0000000000000000 RSI: 00000000400448cb RDI: 0000000000000009
+RBP: 00007fcf5ba10b39 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 0000000000000000 R14: 00007fcf5bbb6160 R15: 00007ffd6353d528
+ </TASK>
+
+Allocated by task 6535:
+ kasan_save_stack mm/kasan/common.c:47 [inline]
+ kasan_save_track+0x3e/0x80 mm/kasan/common.c:68
+ poison_kmalloc_redzone mm/kasan/common.c:377 [inline]
+ __kasan_kmalloc+0x93/0xb0 mm/kasan/common.c:394
+ kasan_kmalloc include/linux/kasan.h:260 [inline]
+ __kmalloc_cache_noprof+0x230/0x3d0 mm/slub.c:4359
+ kmalloc_noprof include/linux/slab.h:905 [inline]
+ kzalloc_noprof include/linux/slab.h:1039 [inline]
+ vhci_open+0x57/0x360 drivers/bluetooth/hci_vhci.c:635
+ misc_open+0x2bc/0x330 drivers/char/misc.c:161
+ chrdev_open+0x4c9/0x5e0 fs/char_dev.c:414
+ do_dentry_open+0xdf0/0x1970 fs/open.c:964
+ vfs_open+0x3b/0x340 fs/open.c:1094
+ do_open fs/namei.c:3887 [inline]
+ path_openat+0x2ee5/0x3830 fs/namei.c:4046
+ do_filp_open+0x1fa/0x410 fs/namei.c:4073
+ do_sys_openat2+0x121/0x1c0 fs/open.c:1437
+ do_sys_open fs/open.c:1452 [inline]
+ __do_sys_openat fs/open.c:1468 [inline]
+ __se_sys_openat fs/open.c:1463 [inline]
+ __x64_sys_openat+0x138/0x170 fs/open.c:1463
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+Freed by task 6535:
+ kasan_save_stack mm/kasan/common.c:47 [inline]
+ kasan_save_track+0x3e/0x80 mm/kasan/common.c:68
+ kasan_save_free_info+0x46/0x50 mm/kasan/generic.c:576
+ poison_slab_object mm/kasan/common.c:247 [inline]
+ __kasan_slab_free+0x62/0x70 mm/kasan/common.c:264
+ kasan_slab_free include/linux/kasan.h:233 [inline]
+ slab_free_hook mm/slub.c:2381 [inline]
+ slab_free mm/slub.c:4643 [inline]
+ kfree+0x18e/0x440 mm/slub.c:4842
+ vhci_release+0xbc/0xd0 drivers/bluetooth/hci_vhci.c:671
+ __fput+0x44c/0xa70 fs/file_table.c:465
+ task_work_run+0x1d1/0x260 kernel/task_work.c:227
+ exit_task_work include/linux/task_work.h:40 [inline]
+ do_exit+0x6ad/0x22e0 kernel/exit.c:955
+ do_group_exit+0x21c/0x2d0 kernel/exit.c:1104
+ __do_sys_exit_group kernel/exit.c:1115 [inline]
+ __se_sys_exit_group kernel/exit.c:1113 [inline]
+ __x64_sys_exit_group+0x3f/0x40 kernel/exit.c:1113
+ x64_sys_call+0x21ba/0x21c0 arch/x86/include/generated/asm/syscalls_64.h:232
+ do_syscall_x64 arch/x86/entry/syscall_64.c:63 [inline]
+ do_syscall_64+0xfa/0x3b0 arch/x86/entry/syscall_64.c:94
+ entry_SYSCALL_64_after_hwframe+0x77/0x7f
+
+The buggy address belongs to the object at ffff88807cb8d800
+ which belongs to the cache kmalloc-1k of size 1024
+The buggy address is located 88 bytes inside of
+ freed 1024-byte region [ffff88807cb8d800, ffff88807cb8dc00)
+
+Fixes: bf18c7118cf8 ("Bluetooth: vhci: Free driver_data on file release")
+Reported-by: syzbot+2faa4825e556199361f9@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=f62d64848fc4c7c30cd6
+Signed-off-by: Kuniyuki Iwashima <kuniyu@google.com>
+Acked-by: Paul Menzel <pmenzel@molgen.mpg.de>
+Signed-off-by: Luiz Augusto von Dentz <luiz.von.dentz@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/bluetooth/hci_core.h |  2 ++
+ net/bluetooth/hci_core.c         | 34 ++++++++++++++++++++++++++++----
+ 2 files changed, 32 insertions(+), 4 deletions(-)
+
+diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
+index e9e3366d059ef..730aa0245aef9 100644
+--- a/include/net/bluetooth/hci_core.h
++++ b/include/net/bluetooth/hci_core.h
+@@ -29,6 +29,7 @@
+ #include <linux/idr.h>
+ #include <linux/leds.h>
+ #include <linux/rculist.h>
++#include <linux/srcu.h>
+ #include <net/bluetooth/hci.h>
+ #include <net/bluetooth/hci_sync.h>
+@@ -338,6 +339,7 @@ struct adv_monitor {
+ struct hci_dev {
+       struct list_head list;
++      struct srcu_struct srcu;
+       struct mutex    lock;
+       struct ida      unset_handle_ida;
+diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
+index 0d3816c807588..b74ada8092378 100644
+--- a/net/bluetooth/hci_core.c
++++ b/net/bluetooth/hci_core.c
+@@ -64,7 +64,7 @@ static DEFINE_IDA(hci_index_ida);
+ /* Get HCI device by index.
+  * Device is held on return. */
+-struct hci_dev *hci_dev_get(int index)
++static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
+ {
+       struct hci_dev *hdev = NULL, *d;
+@@ -77,6 +77,8 @@ struct hci_dev *hci_dev_get(int index)
+       list_for_each_entry(d, &hci_dev_list, list) {
+               if (d->id == index) {
+                       hdev = hci_dev_hold(d);
++                      if (srcu_index)
++                              *srcu_index = srcu_read_lock(&d->srcu);
+                       break;
+               }
+       }
+@@ -84,6 +86,22 @@ struct hci_dev *hci_dev_get(int index)
+       return hdev;
+ }
++struct hci_dev *hci_dev_get(int index)
++{
++      return __hci_dev_get(index, NULL);
++}
++
++static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
++{
++      return __hci_dev_get(index, srcu_index);
++}
++
++static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
++{
++      srcu_read_unlock(&hdev->srcu, srcu_index);
++      hci_dev_put(hdev);
++}
++
+ /* ---- Inquiry support ---- */
+ bool hci_discovery_active(struct hci_dev *hdev)
+@@ -568,9 +586,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
+ int hci_dev_reset(__u16 dev)
+ {
+       struct hci_dev *hdev;
+-      int err;
++      int err, srcu_index;
+-      hdev = hci_dev_get(dev);
++      hdev = hci_dev_get_srcu(dev, &srcu_index);
+       if (!hdev)
+               return -ENODEV;
+@@ -592,7 +610,7 @@ int hci_dev_reset(__u16 dev)
+       err = hci_dev_do_reset(hdev);
+ done:
+-      hci_dev_put(hdev);
++      hci_dev_put_srcu(hdev, srcu_index);
+       return err;
+ }
+@@ -2439,6 +2457,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
+       if (!hdev)
+               return NULL;
++      if (init_srcu_struct(&hdev->srcu)) {
++              kfree(hdev);
++              return NULL;
++      }
++
+       hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
+       hdev->esco_type = (ESCO_HV1);
+       hdev->link_mode = (HCI_LM_ACCEPT);
+@@ -2684,6 +2707,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
+       list_del(&hdev->list);
+       write_unlock(&hci_dev_list_lock);
++      synchronize_srcu(&hdev->srcu);
++      cleanup_srcu_struct(&hdev->srcu);
++
+       disable_work_sync(&hdev->rx_work);
+       disable_work_sync(&hdev->cmd_work);
+       disable_work_sync(&hdev->tx_work);
+-- 
+2.39.5
+
diff --git a/queue-6.12/bnxt-properly-flush-xdp-redirect-lists.patch b/queue-6.12/bnxt-properly-flush-xdp-redirect-lists.patch
new file mode 100644 (file)
index 0000000..2a64e76
--- /dev/null
@@ -0,0 +1,139 @@
+From 561cbef22e8b17845436e55d604b22ccd7b889f1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Jun 2025 09:06:38 -0700
+Subject: bnxt: properly flush XDP redirect lists
+
+From: Yan Zhai <yan@cloudflare.com>
+
+[ Upstream commit 9caca6ac0e26cd20efd490d8b3b2ffb1c7c00f6f ]
+
+We encountered following crash when testing a XDP_REDIRECT feature
+in production:
+
+[56251.579676] list_add corruption. next->prev should be prev (ffff93120dd40f30), but was ffffb301ef3a6740. (next=ffff93120dd
+40f30).
+[56251.601413] ------------[ cut here ]------------
+[56251.611357] kernel BUG at lib/list_debug.c:29!
+[56251.621082] Oops: invalid opcode: 0000 [#1] PREEMPT SMP NOPTI
+[56251.632073] CPU: 111 UID: 0 PID: 0 Comm: swapper/111 Kdump: loaded Tainted: P           O       6.12.33-cloudflare-2025.6.
+3 #1
+[56251.653155] Tainted: [P]=PROPRIETARY_MODULE, [O]=OOT_MODULE
+[56251.663877] Hardware name: MiTAC GC68B-B8032-G11P6-GPU/S8032GM-HE-CFR, BIOS V7.020.B10-sig 01/22/2025
+[56251.682626] RIP: 0010:__list_add_valid_or_report+0x4b/0xa0
+[56251.693203] Code: 0e 48 c7 c7 68 e7 d9 97 e8 42 16 fe ff 0f 0b 48 8b 52 08 48 39 c2 74 14 48 89 f1 48 c7 c7 90 e7 d9 97 48
+ 89 c6 e8 25 16 fe ff <0f> 0b 4c 8b 02 49 39 f0 74 14 48 89 d1 48 c7 c7 e8 e7 d9 97 4c 89
+[56251.725811] RSP: 0018:ffff93120dd40b80 EFLAGS: 00010246
+[56251.736094] RAX: 0000000000000075 RBX: ffffb301e6bba9d8 RCX: 0000000000000000
+[56251.748260] RDX: 0000000000000000 RSI: ffff9149afda0b80 RDI: ffff9149afda0b80
+[56251.760349] RBP: ffff9131e49c8000 R08: 0000000000000000 R09: ffff93120dd40a18
+[56251.772382] R10: ffff9159cf2ce1a8 R11: 0000000000000003 R12: ffff911a80850000
+[56251.784364] R13: ffff93120fbc7000 R14: 0000000000000010 R15: ffff9139e7510e40
+[56251.796278] FS:  0000000000000000(0000) GS:ffff9149afd80000(0000) knlGS:0000000000000000
+[56251.809133] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[56251.819561] CR2: 00007f5e85e6f300 CR3: 00000038b85e2006 CR4: 0000000000770ef0
+[56251.831365] PKRU: 55555554
+[56251.838653] Call Trace:
+[56251.845560]  <IRQ>
+[56251.851943]  cpu_map_enqueue.cold+0x5/0xa
+[56251.860243]  xdp_do_redirect+0x2d9/0x480
+[56251.868388]  bnxt_rx_xdp+0x1d8/0x4c0 [bnxt_en]
+[56251.877028]  bnxt_rx_pkt+0x5f7/0x19b0 [bnxt_en]
+[56251.885665]  ? cpu_max_write+0x1e/0x100
+[56251.893510]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56251.902276]  __bnxt_poll_work+0x190/0x340 [bnxt_en]
+[56251.911058]  bnxt_poll+0xab/0x1b0 [bnxt_en]
+[56251.919041]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56251.927568]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56251.935958]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56251.944250]  __napi_poll+0x2b/0x160
+[56251.951155]  bpf_trampoline_6442548651+0x79/0x123
+[56251.959262]  __napi_poll+0x5/0x160
+[56251.966037]  net_rx_action+0x3d2/0x880
+[56251.973133]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56251.981265]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56251.989262]  ? __hrtimer_run_queues+0x162/0x2a0
+[56251.996967]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56252.004875]  ? srso_alias_return_thunk+0x5/0xfbef5
+[56252.012673]  ? bnxt_msix+0x62/0x70 [bnxt_en]
+[56252.019903]  handle_softirqs+0xcf/0x270
+[56252.026650]  irq_exit_rcu+0x67/0x90
+[56252.032933]  common_interrupt+0x85/0xa0
+[56252.039498]  </IRQ>
+[56252.044246]  <TASK>
+[56252.048935]  asm_common_interrupt+0x26/0x40
+[56252.055727] RIP: 0010:cpuidle_enter_state+0xb8/0x420
+[56252.063305] Code: dc 01 00 00 e8 f9 79 3b ff e8 64 f7 ff ff 49 89 c5 0f 1f 44 00 00 31 ff e8 a5 32 3a ff 45 84 ff 0f 85 ae
+ 01 00 00 fb 45 85 f6 <0f> 88 88 01 00 00 48 8b 04 24 49 63 ce 4c 89 ea 48 6b f1 68 48 29
+[56252.088911] RSP: 0018:ffff93120c97fe98 EFLAGS: 00000202
+[56252.096912] RAX: ffff9149afd80000 RBX: ffff9141d3a72800 RCX: 0000000000000000
+[56252.106844] RDX: 00003329176c6b98 RSI: ffffffe36db3fdc7 RDI: 0000000000000000
+[56252.116733] RBP: 0000000000000002 R08: 0000000000000002 R09: 000000000000004e
+[56252.126652] R10: ffff9149afdb30c4 R11: 071c71c71c71c71c R12: ffffffff985ff860
+[56252.136637] R13: 00003329176c6b98 R14: 0000000000000002 R15: 0000000000000000
+[56252.146667]  ? cpuidle_enter_state+0xab/0x420
+[56252.153909]  cpuidle_enter+0x2d/0x40
+[56252.160360]  do_idle+0x176/0x1c0
+[56252.166456]  cpu_startup_entry+0x29/0x30
+[56252.173248]  start_secondary+0xf7/0x100
+[56252.179941]  common_startup_64+0x13e/0x141
+[56252.186886]  </TASK>
+
+From the crash dump, we found that the cpu_map_flush_list inside
+redirect info is partially corrupted: its list_head->next points to
+itself, but list_head->prev points to a valid list of unflushed bq
+entries.
+
+This turned out to be a result of missed XDP flush on redirect lists. By
+digging in the actual source code, we found that
+commit 7f0a168b0441 ("bnxt_en: Add completion ring pointer in TX and RX
+ring structures") incorrectly overwrites the event mask for XDP_REDIRECT
+in bnxt_rx_xdp. We can stably reproduce this crash by returning XDP_TX
+and XDP_REDIRECT randomly for incoming packets in a naive XDP program.
+Properly propagate the XDP_REDIRECT events back fixes the crash.
+
+Fixes: a7559bc8c17c ("bnxt: support transmit and free of aggregation buffers")
+Tested-by: Andrew Rzeznik <arzeznik@cloudflare.com>
+Signed-off-by: Yan Zhai <yan@cloudflare.com>
+Acked-by: Jesper Dangaard Brouer <hawk@kernel.org>
+Reviewed-by: Michael Chan <michael.chan@broadcom.com>
+Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
+Link: https://patch.msgid.link/aFl7jpCNzscumuN2@debian.debian
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/broadcom/bnxt/bnxt.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+index 154f73f121eca..ad4aec522f4f8 100644
+--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
++++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+@@ -2871,6 +2871,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+ {
+       struct bnxt_napi *bnapi = cpr->bnapi;
+       u32 raw_cons = cpr->cp_raw_cons;
++      bool flush_xdp = false;
+       u32 cons;
+       int rx_pkts = 0;
+       u8 event = 0;
+@@ -2924,6 +2925,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+                       else
+                               rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
+                                                          &event);
++                      if (event & BNXT_REDIRECT_EVENT)
++                              flush_xdp = true;
+                       if (likely(rc >= 0))
+                               rx_pkts += rc;
+                       /* Increment rx_pkts when rc is -ENOMEM to count towards
+@@ -2948,7 +2951,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
+               }
+       }
+-      if (event & BNXT_REDIRECT_EVENT) {
++      if (flush_xdp) {
+               xdp_do_flush();
+               event &= ~BNXT_REDIRECT_EVENT;
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.12/cifs-fix-reading-into-an-iter_folioq-from-the-smbdir.patch b/queue-6.12/cifs-fix-reading-into-an-iter_folioq-from-the-smbdir.patch
new file mode 100644 (file)
index 0000000..5b727d9
--- /dev/null
@@ -0,0 +1,202 @@
+From 3d71881c7eb11fc6a96bda9068cf2439bd9c32ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Apr 2025 20:27:26 +0100
+Subject: cifs: Fix reading into an ITER_FOLIOQ from the smbdirect code
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 263debecb4aa7cec0a86487e6f409814f6194a21 ]
+
+When performing a file read from RDMA, smbd_recv() prints an "Invalid msg
+type 4" error and fails the I/O.  This is due to the switch-statement there
+not handling the ITER_FOLIOQ handed down from netfslib.
+
+Fix this by collapsing smbd_recv_buf() and smbd_recv_page() into
+smbd_recv() and just using copy_to_iter() instead of memcpy().  This
+future-proofs the function too, in case more ITER_* types are added.
+
+Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
+Reported-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Tom Talpey <tom@talpey.com>
+cc: Paulo Alcantara (Red Hat) <pc@manguebit.com>
+cc: Matthew Wilcox <willy@infradead.org>
+cc: linux-cifs@vger.kernel.org
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 112 ++++++--------------------------------
+ 1 file changed, 17 insertions(+), 95 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index b7932f63b4650..ac06f2617f346 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1755,35 +1755,39 @@ struct smbd_connection *smbd_get_connection(
+ }
+ /*
+- * Receive data from receive reassembly queue
++ * Receive data from the transport's receive reassembly queue
+  * All the incoming data packets are placed in reassembly queue
+- * buf: the buffer to read data into
++ * iter: the buffer to read data into
+  * size: the length of data to read
+  * return value: actual data read
+- * Note: this implementation copies the data from reassebmly queue to receive
++ *
++ * Note: this implementation copies the data from reassembly queue to receive
+  * buffers used by upper layer. This is not the optimal code path. A better way
+  * to do it is to not have upper layer allocate its receive buffers but rather
+  * borrow the buffer from reassembly queue, and return it after data is
+  * consumed. But this will require more changes to upper layer code, and also
+  * need to consider packet boundaries while they still being reassembled.
+  */
+-static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+-              unsigned int size)
++int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
+ {
+       struct smbdirect_socket *sc = &info->socket;
+       struct smbd_response *response;
+       struct smbdirect_data_transfer *data_transfer;
++      size_t size = iov_iter_count(&msg->msg_iter);
+       int to_copy, to_read, data_read, offset;
+       u32 data_length, remaining_data_length, data_offset;
+       int rc;
++      if (WARN_ON_ONCE(iov_iter_rw(&msg->msg_iter) == WRITE))
++              return -EINVAL; /* It's a bug in upper layer to get there */
++
+ again:
+       /*
+        * No need to hold the reassembly queue lock all the time as we are
+        * the only one reading from the front of the queue. The transport
+        * may add more entries to the back of the queue at the same time
+        */
+-      log_read(INFO, "size=%d info->reassembly_data_length=%d\n", size,
++      log_read(INFO, "size=%zd info->reassembly_data_length=%d\n", size,
+               info->reassembly_data_length);
+       if (info->reassembly_data_length >= size) {
+               int queue_length;
+@@ -1821,7 +1825,10 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+                       if (response->first_segment && size == 4) {
+                               unsigned int rfc1002_len =
+                                       data_length + remaining_data_length;
+-                              *((__be32 *)buf) = cpu_to_be32(rfc1002_len);
++                              __be32 rfc1002_hdr = cpu_to_be32(rfc1002_len);
++                              if (copy_to_iter(&rfc1002_hdr, sizeof(rfc1002_hdr),
++                                               &msg->msg_iter) != sizeof(rfc1002_hdr))
++                                      return -EFAULT;
+                               data_read = 4;
+                               response->first_segment = false;
+                               log_read(INFO, "returning rfc1002 length %d\n",
+@@ -1830,10 +1837,9 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+                       }
+                       to_copy = min_t(int, data_length - offset, to_read);
+-                      memcpy(
+-                              buf + data_read,
+-                              (char *)data_transfer + data_offset + offset,
+-                              to_copy);
++                      if (copy_to_iter((char *)data_transfer + data_offset + offset,
++                                       to_copy, &msg->msg_iter) != to_copy)
++                              return -EFAULT;
+                       /* move on to the next buffer? */
+                       if (to_copy == data_length - offset) {
+@@ -1898,90 +1904,6 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+       goto again;
+ }
+-/*
+- * Receive a page from receive reassembly queue
+- * page: the page to read data into
+- * to_read: the length of data to read
+- * return value: actual data read
+- */
+-static int smbd_recv_page(struct smbd_connection *info,
+-              struct page *page, unsigned int page_offset,
+-              unsigned int to_read)
+-{
+-      struct smbdirect_socket *sc = &info->socket;
+-      int ret;
+-      char *to_address;
+-      void *page_address;
+-
+-      /* make sure we have the page ready for read */
+-      ret = wait_event_interruptible(
+-              info->wait_reassembly_queue,
+-              info->reassembly_data_length >= to_read ||
+-                      sc->status != SMBDIRECT_SOCKET_CONNECTED);
+-      if (ret)
+-              return ret;
+-
+-      /* now we can read from reassembly queue and not sleep */
+-      page_address = kmap_atomic(page);
+-      to_address = (char *) page_address + page_offset;
+-
+-      log_read(INFO, "reading from page=%p address=%p to_read=%d\n",
+-              page, to_address, to_read);
+-
+-      ret = smbd_recv_buf(info, to_address, to_read);
+-      kunmap_atomic(page_address);
+-
+-      return ret;
+-}
+-
+-/*
+- * Receive data from transport
+- * msg: a msghdr point to the buffer, can be ITER_KVEC or ITER_BVEC
+- * return: total bytes read, or 0. SMB Direct will not do partial read.
+- */
+-int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
+-{
+-      char *buf;
+-      struct page *page;
+-      unsigned int to_read, page_offset;
+-      int rc;
+-
+-      if (iov_iter_rw(&msg->msg_iter) == WRITE) {
+-              /* It's a bug in upper layer to get there */
+-              cifs_dbg(VFS, "Invalid msg iter dir %u\n",
+-                       iov_iter_rw(&msg->msg_iter));
+-              rc = -EINVAL;
+-              goto out;
+-      }
+-
+-      switch (iov_iter_type(&msg->msg_iter)) {
+-      case ITER_KVEC:
+-              buf = msg->msg_iter.kvec->iov_base;
+-              to_read = msg->msg_iter.kvec->iov_len;
+-              rc = smbd_recv_buf(info, buf, to_read);
+-              break;
+-
+-      case ITER_BVEC:
+-              page = msg->msg_iter.bvec->bv_page;
+-              page_offset = msg->msg_iter.bvec->bv_offset;
+-              to_read = msg->msg_iter.bvec->bv_len;
+-              rc = smbd_recv_page(info, page, page_offset, to_read);
+-              break;
+-
+-      default:
+-              /* It's a bug in upper layer to get there */
+-              cifs_dbg(VFS, "Invalid msg type %d\n",
+-                       iov_iter_type(&msg->msg_iter));
+-              rc = -EINVAL;
+-      }
+-
+-out:
+-      /* SMBDirect will read it all or nothing */
+-      if (rc > 0)
+-              msg->msg_iter.count = 0;
+-      return rc;
+-}
+-
+ /*
+  * Send data to transport
+  * Each rqst is transported as a SMBDirect payload
+-- 
+2.39.5
+
diff --git a/queue-6.12/cifs-fix-the-smbd_response-slab-to-allow-usercopy.patch b/queue-6.12/cifs-fix-the-smbd_response-slab-to-allow-usercopy.patch
new file mode 100644 (file)
index 0000000..f8a2855
--- /dev/null
@@ -0,0 +1,100 @@
+From bfd9093437ce6f9e20de1769590126dc43a2bf41 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jun 2025 14:15:04 +0100
+Subject: cifs: Fix the smbd_response slab to allow usercopy
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit 43e7e284fc77b710d899569360ea46fa3374ae22 ]
+
+The handling of received data in the smbdirect client code involves using
+copy_to_iter() to copy data from the smbd_reponse struct's packet trailer
+to a folioq buffer provided by netfslib that encapsulates a chunk of
+pagecache.
+
+If, however, CONFIG_HARDENED_USERCOPY=y, this will result in the checks
+then performed in copy_to_iter() oopsing with something like the following:
+
+ CIFS: Attempting to mount //172.31.9.1/test
+ CIFS: VFS: RDMA transport established
+ usercopy: Kernel memory exposure attempt detected from SLUB object 'smbd_response_0000000091e24ea1' (offset 81, size 63)!
+ ------------[ cut here ]------------
+ kernel BUG at mm/usercopy.c:102!
+ ...
+ RIP: 0010:usercopy_abort+0x6c/0x80
+ ...
+ Call Trace:
+  <TASK>
+  __check_heap_object+0xe3/0x120
+  __check_object_size+0x4dc/0x6d0
+  smbd_recv+0x77f/0xfe0 [cifs]
+  cifs_readv_from_socket+0x276/0x8f0 [cifs]
+  cifs_read_from_socket+0xcd/0x120 [cifs]
+  cifs_demultiplex_thread+0x7e9/0x2d50 [cifs]
+  kthread+0x396/0x830
+  ret_from_fork+0x2b8/0x3b0
+  ret_from_fork_asm+0x1a/0x30
+
+The problem is that the smbd_response slab's packet field isn't marked as
+being permitted for usercopy.
+
+Fix this by passing parameters to kmem_slab_create() to indicate that
+copy_to_iter() is permitted from the packet region of the smbd_response
+slab objects, less the header space.
+
+Fixes: ee4cdf7ba857 ("netfs: Speed up buffered reading")
+Reported-by: Stefan Metzmacher <metze@samba.org>
+Link: https://lore.kernel.org/r/acb7f612-df26-4e2a-a35d-7cd040f513e1@samba.org/
+Signed-off-by: David Howells <dhowells@redhat.com>
+Reviewed-by: Stefan Metzmacher <metze@samba.org>
+Tested-by: Stefan Metzmacher <metze@samba.org>
+cc: Paulo Alcantara <pc@manguebit.com>
+cc: linux-cifs@vger.kernel.org
+cc: netfs@lists.linux.dev
+cc: linux-fsdevel@vger.kernel.org
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 18 +++++++++++++-----
+ 1 file changed, 13 insertions(+), 5 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index cbc85bca006f7..b7932f63b4650 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -1452,6 +1452,9 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+       char name[MAX_NAME_LEN];
+       int rc;
++      if (WARN_ON_ONCE(sp->max_recv_size < sizeof(struct smbdirect_data_transfer)))
++              return -ENOMEM;
++
+       scnprintf(name, MAX_NAME_LEN, "smbd_request_%p", info);
+       info->request_cache =
+               kmem_cache_create(
+@@ -1469,12 +1472,17 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+               goto out1;
+       scnprintf(name, MAX_NAME_LEN, "smbd_response_%p", info);
++
++      struct kmem_cache_args response_args = {
++              .align          = __alignof__(struct smbd_response),
++              .useroffset     = (offsetof(struct smbd_response, packet) +
++                                 sizeof(struct smbdirect_data_transfer)),
++              .usersize       = sp->max_recv_size - sizeof(struct smbdirect_data_transfer),
++      };
+       info->response_cache =
+-              kmem_cache_create(
+-                      name,
+-                      sizeof(struct smbd_response) +
+-                              sp->max_recv_size,
+-                      0, SLAB_HWCACHE_ALIGN, NULL);
++              kmem_cache_create(name,
++                                sizeof(struct smbd_response) + sp->max_recv_size,
++                                &response_args, SLAB_HWCACHE_ALIGN);
+       if (!info->response_cache)
+               goto out2;
+-- 
+2.39.5
+
diff --git a/queue-6.12/drm-amd-adjust-output-for-discovery-error-handling.patch b/queue-6.12/drm-amd-adjust-output-for-discovery-error-handling.patch
new file mode 100644 (file)
index 0000000..6a8a34b
--- /dev/null
@@ -0,0 +1,101 @@
+From 9174963281b22471fa5ddda633b1679a5eaa0111 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 17 Jun 2025 13:30:52 -0500
+Subject: drm/amd: Adjust output for discovery error handling
+
+From: Mario Limonciello <mario.limonciello@amd.com>
+
+[ Upstream commit 73eab78721f7b85216f1ca8c7b732f13213b5b32 ]
+
+commit 017fbb6690c2 ("drm/amdgpu/discovery: check ip_discovery fw file
+available") added support for reading an amdgpu IP discovery bin file
+for some specific products. If it's not found then it will fallback to
+hardcoded values. However if it's not found there is also a lot of noise
+about missing files and errors.
+
+Adjust the error handling to decrease most messages to DEBUG and to show
+users less about missing files.
+
+Reviewed-by: Lijo Lazar <lijo.lazar@amd.com>
+Reported-by: Marcus Seyfarth <m.seyfarth@gmail.com>
+Closes: https://gitlab.freedesktop.org/drm/amd/-/issues/4312
+Tested-by: Marcus Seyfarth <m.seyfarth@gmail.com>
+Fixes: 017fbb6690c2 ("drm/amdgpu/discovery: check ip_discovery fw file available")
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Link: https://lore.kernel.org/r/20250617183052.1692059-1-superm1@kernel.org
+Signed-off-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+(cherry picked from commit 49f1f9f6c3c9febf8ba93f94a8d9c8d03e1ea0a1)
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 28 +++++++++----------
+ 1 file changed, 13 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 8929478a8f45c..34d41e3ce3474 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -301,10 +301,12 @@ static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
+       const struct firmware *fw;
+       int r;
+-      r = request_firmware(&fw, fw_name, adev->dev);
++      r = firmware_request_nowarn(&fw, fw_name, adev->dev);
+       if (r) {
+-              dev_err(adev->dev, "can't load firmware \"%s\"\n",
+-                      fw_name);
++              if (amdgpu_discovery == 2)
++                      dev_err(adev->dev, "can't load firmware \"%s\"\n", fw_name);
++              else
++                      drm_info(&adev->ddev, "Optional firmware \"%s\" was not found\n", fw_name);
+               return r;
+       }
+@@ -419,16 +421,12 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
+       /* Read from file if it is the preferred option */
+       fw_name = amdgpu_discovery_get_fw_name(adev);
+       if (fw_name != NULL) {
+-              dev_info(adev->dev, "use ip discovery information from file");
++              drm_dbg(&adev->ddev, "use ip discovery information from file");
+               r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin, fw_name);
+-
+-              if (r) {
+-                      dev_err(adev->dev, "failed to read ip discovery binary from file\n");
+-                      r = -EINVAL;
++              if (r)
+                       goto out;
+-              }
+-
+       } else {
++              drm_dbg(&adev->ddev, "use ip discovery information from memory");
+               r = amdgpu_discovery_read_binary_from_mem(
+                       adev, adev->mman.discovery_bin);
+               if (r)
+@@ -1286,10 +1284,8 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
+       int r;
+       r = amdgpu_discovery_init(adev);
+-      if (r) {
+-              DRM_ERROR("amdgpu_discovery_init failed\n");
++      if (r)
+               return r;
+-      }
+       adev->gfx.xcc_mask = 0;
+       adev->sdma.sdma_mask = 0;
+@@ -2451,8 +2447,10 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+               break;
+       default:
+               r = amdgpu_discovery_reg_base_init(adev);
+-              if (r)
+-                      return -EINVAL;
++              if (r) {
++                      drm_err(&adev->ddev, "discovery failed: %d\n", r);
++                      return r;
++              }
+               amdgpu_discovery_harvest_ip(adev);
+               amdgpu_discovery_get_gfx_info(adev);
+-- 
+2.39.5
+
diff --git a/queue-6.12/drm-amdgpu-discovery-optionally-use-fw-based-ip-disc.patch b/queue-6.12/drm-amdgpu-discovery-optionally-use-fw-based-ip-disc.patch
new file mode 100644 (file)
index 0000000..5228140
--- /dev/null
@@ -0,0 +1,82 @@
+From af40a8a70000f7b374cc2ca65f3d467cf5e237b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Mar 2025 18:00:57 -0400
+Subject: drm/amdgpu/discovery: optionally use fw based ip discovery
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 80a0e828293389358f7db56adcdcb22b28df5e11 ]
+
+On chips without native IP discovery support, use the fw binary
+if available, otherwise we can continue without it.
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Reviewed-by: Flora Cui <flora.cui@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 73eab78721f7 ("drm/amd: Adjust output for discovery error handling")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 40 +++++++++++++++----
+ 1 file changed, 32 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+index 018240a2ab96a..8929478a8f45c 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+@@ -2429,6 +2429,38 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+ {
+       int r;
++      switch (adev->asic_type) {
++      case CHIP_VEGA10:
++      case CHIP_VEGA12:
++      case CHIP_RAVEN:
++      case CHIP_VEGA20:
++      case CHIP_ARCTURUS:
++      case CHIP_ALDEBARAN:
++              /* this is not fatal.  We have a fallback below
++               * if the new firmwares are not present. some of
++               * this will be overridden below to keep things
++               * consistent with the current behavior.
++               */
++              r = amdgpu_discovery_reg_base_init(adev);
++              if (!r) {
++                      amdgpu_discovery_harvest_ip(adev);
++                      amdgpu_discovery_get_gfx_info(adev);
++                      amdgpu_discovery_get_mall_info(adev);
++                      amdgpu_discovery_get_vcn_info(adev);
++              }
++              break;
++      default:
++              r = amdgpu_discovery_reg_base_init(adev);
++              if (r)
++                      return -EINVAL;
++
++              amdgpu_discovery_harvest_ip(adev);
++              amdgpu_discovery_get_gfx_info(adev);
++              amdgpu_discovery_get_mall_info(adev);
++              amdgpu_discovery_get_vcn_info(adev);
++              break;
++      }
++
+       switch (adev->asic_type) {
+       case CHIP_VEGA10:
+               vega10_reg_base_init(adev);
+@@ -2591,14 +2623,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
+               adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
+               break;
+       default:
+-              r = amdgpu_discovery_reg_base_init(adev);
+-              if (r)
+-                      return -EINVAL;
+-
+-              amdgpu_discovery_harvest_ip(adev);
+-              amdgpu_discovery_get_gfx_info(adev);
+-              amdgpu_discovery_get_mall_info(adev);
+-              amdgpu_discovery_get_vcn_info(adev);
+               break;
+       }
+-- 
+2.39.5
+
diff --git a/queue-6.12/drm-bridge-ti-sn65dsi86-add-hpd-for-displayport-conn.patch b/queue-6.12/drm-bridge-ti-sn65dsi86-add-hpd-for-displayport-conn.patch
new file mode 100644 (file)
index 0000000..27422c0
--- /dev/null
@@ -0,0 +1,150 @@
+From d13f835e4bdb6b92a23089bf907b71fa84b38208 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jun 2025 10:18:35 +0530
+Subject: drm/bridge: ti-sn65dsi86: Add HPD for DisplayPort connector type
+
+From: Jayesh Choudhary <j-choudhary@ti.com>
+
+[ Upstream commit 55e8ff842051b1150461d7595d8f1d033c69d66b ]
+
+By default, HPD was disabled on SN65DSI86 bridge. When the driver was
+added (commit "a095f15c00e27"), the HPD_DISABLE bit was set in pre-enable
+call which was moved to other function calls subsequently.
+Later on, commit "c312b0df3b13" added detect utility for DP mode. But with
+HPD_DISABLE bit set, all the HPD events are disabled[0] and the debounced
+state always return 1 (always connected state).
+
+Set HPD_DISABLE bit conditionally based on display sink's connector type.
+Since the HPD_STATE is reflected correctly only after waiting for debounce
+time (~100-400ms) and adding this delay in detect() is not feasible
+owing to the performace impact (glitches and frame drop), remove runtime
+calls in detect() and add hpd_enable()/disable() bridge hooks with runtime
+calls, to detect hpd properly without any delay.
+
+[0]: <https://www.ti.com/lit/gpn/SN65DSI86> (Pg. 32)
+
+Fixes: c312b0df3b13 ("drm/bridge: ti-sn65dsi86: Implement bridge connector operations for DP")
+Cc: Max Krummenacher <max.krummenacher@toradex.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Tested-by: Ernest Van Hoecke <ernest.vanhoecke@toradex.com>
+Signed-off-by: Jayesh Choudhary <j-choudhary@ti.com>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://lore.kernel.org/r/20250624044835.165708-1-j-choudhary@ti.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/ti-sn65dsi86.c | 69 +++++++++++++++++++++++----
+ 1 file changed, 60 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index ca7597805e30f..5500767cda7e4 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -331,12 +331,18 @@ static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
+        * 200 ms.  We'll assume that the panel driver will have the hardcoded
+        * delay in its prepare and always disable HPD.
+        *
+-       * If HPD somehow makes sense on some future panel we'll have to
+-       * change this to be conditional on someone specifying that HPD should
+-       * be used.
++       * For DisplayPort bridge type, we need HPD. So we use the bridge type
++       * to conditionally disable HPD.
++       * NOTE: The bridge type is set in ti_sn_bridge_probe() but enable_comms()
++       * can be called before. So for DisplayPort, HPD will be enabled once
++       * bridge type is set. We are using bridge type instead of "no-hpd"
++       * property because it is not used properly in devicetree description
++       * and hence is unreliable.
+        */
+-      regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+-                         HPD_DISABLE);
++
++      if (pdata->bridge.type != DRM_MODE_CONNECTOR_DisplayPort)
++              regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
++                                 HPD_DISABLE);
+       pdata->comms_enabled = true;
+@@ -1173,9 +1179,14 @@ static enum drm_connector_status ti_sn_bridge_detect(struct drm_bridge *bridge)
+       struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
+       int val = 0;
+-      pm_runtime_get_sync(pdata->dev);
++      /*
++       * Runtime reference is grabbed in ti_sn_bridge_hpd_enable()
++       * as the chip won't report HPD just after being powered on.
++       * HPD_DEBOUNCED_STATE reflects correct state only after the
++       * debounce time (~100-400 ms).
++       */
++
+       regmap_read(pdata->regmap, SN_HPD_DISABLE_REG, &val);
+-      pm_runtime_put_autosuspend(pdata->dev);
+       return val & HPD_DEBOUNCED_STATE ? connector_status_connected
+                                        : connector_status_disconnected;
+@@ -1198,6 +1209,26 @@ static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *
+       debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+ }
++static void ti_sn_bridge_hpd_enable(struct drm_bridge *bridge)
++{
++      struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
++
++      /*
++       * Device needs to be powered on before reading the HPD state
++       * for reliable hpd detection in ti_sn_bridge_detect() due to
++       * the high debounce time.
++       */
++
++      pm_runtime_get_sync(pdata->dev);
++}
++
++static void ti_sn_bridge_hpd_disable(struct drm_bridge *bridge)
++{
++      struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
++
++      pm_runtime_put_autosuspend(pdata->dev);
++}
++
+ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+       .attach = ti_sn_bridge_attach,
+       .detach = ti_sn_bridge_detach,
+@@ -1212,6 +1243,8 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+       .debugfs_init = ti_sn65dsi86_debugfs_init,
++      .hpd_enable = ti_sn_bridge_hpd_enable,
++      .hpd_disable = ti_sn_bridge_hpd_disable,
+ };
+ static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
+@@ -1300,8 +1333,26 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+       pdata->bridge.type = pdata->next_bridge->type == DRM_MODE_CONNECTOR_DisplayPort
+                          ? DRM_MODE_CONNECTOR_DisplayPort : DRM_MODE_CONNECTOR_eDP;
+-      if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort)
+-              pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT;
++      if (pdata->bridge.type == DRM_MODE_CONNECTOR_DisplayPort) {
++              pdata->bridge.ops = DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_DETECT |
++                                  DRM_BRIDGE_OP_HPD;
++              /*
++               * If comms were already enabled they would have been enabled
++               * with the wrong value of HPD_DISABLE. Update it now. Comms
++               * could be enabled if anyone is holding a pm_runtime reference
++               * (like if a GPIO is in use). Note that in most cases nobody
++               * is doing AUX channel xfers before the bridge is added so
++               * HPD doesn't _really_ matter then. The only exception is in
++               * the eDP case where the panel wants to read the EDID before
++               * the bridge is added. We always consistently have HPD disabled
++               * for eDP.
++               */
++              mutex_lock(&pdata->comms_mutex);
++              if (pdata->comms_enabled)
++                      regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG,
++                                         HPD_DISABLE, 0);
++              mutex_unlock(&pdata->comms_mutex);
++      };
+       drm_bridge_add(&pdata->bridge);
+-- 
+2.39.5
+
diff --git a/queue-6.12/drm-bridge-ti-sn65dsi86-make-use-of-debugfs_init-cal.patch b/queue-6.12/drm-bridge-ti-sn65dsi86-make-use-of-debugfs_init-cal.patch
new file mode 100644 (file)
index 0000000..1433888
--- /dev/null
@@ -0,0 +1,104 @@
+From 670c98e39a39d0c76bd334af9eaadacaf97524aa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 15 Mar 2025 21:15:11 +0100
+Subject: drm/bridge: ti-sn65dsi86: make use of debugfs_init callback
+
+From: Wolfram Sang <wsa+renesas@sang-engineering.com>
+
+[ Upstream commit 1d1f7b15cb9c11974cebfd39da51dc69b8cb31ff ]
+
+Do not create a custom directory in debugfs-root, but use the
+debugfs_init callback to create a custom directory at the given place
+for the bridge. The new directory layout looks like this on a Renesas
+GrayHawk-Single with a R-Car V4M SoC:
+
+       /sys/kernel/debug/dri/feb00000.display/DP-1/1-002c
+
+Signed-off-by: Wolfram Sang <wsa+renesas@sang-engineering.com>
+Reviewed-by: Dmitry Baryshkov <dmitry.baryshkov@oss.qualcomm.com>
+Reviewed-by: Douglas Anderson <dianders@chromium.org>
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250315201651.7339-2-wsa+renesas@sang-engineering.com
+Stable-dep-of: 55e8ff842051 ("drm/bridge: ti-sn65dsi86: Add HPD for DisplayPort connector type")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/bridge/ti-sn65dsi86.c | 40 +++++++--------------------
+ 1 file changed, 10 insertions(+), 30 deletions(-)
+
+diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+index 95ce50ed53acf..ca7597805e30f 100644
+--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
++++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+@@ -424,36 +424,8 @@ static int status_show(struct seq_file *s, void *data)
+       return 0;
+ }
+-
+ DEFINE_SHOW_ATTRIBUTE(status);
+-static void ti_sn65dsi86_debugfs_remove(void *data)
+-{
+-      debugfs_remove_recursive(data);
+-}
+-
+-static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
+-{
+-      struct device *dev = pdata->dev;
+-      struct dentry *debugfs;
+-      int ret;
+-
+-      debugfs = debugfs_create_dir(dev_name(dev), NULL);
+-
+-      /*
+-       * We might get an error back if debugfs wasn't enabled in the kernel
+-       * so let's just silently return upon failure.
+-       */
+-      if (IS_ERR_OR_NULL(debugfs))
+-              return;
+-
+-      ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
+-      if (ret)
+-              return;
+-
+-      debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
+-}
+-
+ /* -----------------------------------------------------------------------------
+  * Auxiliary Devices (*not* AUX)
+  */
+@@ -1217,6 +1189,15 @@ static const struct drm_edid *ti_sn_bridge_edid_read(struct drm_bridge *bridge,
+       return drm_edid_read_ddc(connector, &pdata->aux.ddc);
+ }
++static void ti_sn65dsi86_debugfs_init(struct drm_bridge *bridge, struct dentry *root)
++{
++      struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
++      struct dentry *debugfs;
++
++      debugfs = debugfs_create_dir(dev_name(pdata->dev), root);
++      debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
++}
++
+ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+       .attach = ti_sn_bridge_attach,
+       .detach = ti_sn_bridge_detach,
+@@ -1230,6 +1211,7 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+       .atomic_reset = drm_atomic_helper_bridge_reset,
+       .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
++      .debugfs_init = ti_sn65dsi86_debugfs_init,
+ };
+ static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
+@@ -1938,8 +1920,6 @@ static int ti_sn65dsi86_probe(struct i2c_client *client)
+       if (ret)
+               return ret;
+-      ti_sn65dsi86_debugfs_init(pdata);
+-
+       /*
+        * Break ourselves up into a collection of aux devices. The only real
+        * motiviation here is to solve the chicken-and-egg problem of probe
+-- 
+2.39.5
+
diff --git a/queue-6.12/drm-i915-fix-build-error-some-more.patch b/queue-6.12/drm-i915-fix-build-error-some-more.patch
new file mode 100644 (file)
index 0000000..7a2db96
--- /dev/null
@@ -0,0 +1,52 @@
+From f1b9f2d3fcc85fa70f9e11c250c4fb58d0e554ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Jun 2025 13:18:18 +0200
+Subject: drm/i915: fix build error some more
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit d02b2103a08b6d6908f1d3d8e8783d3f342555ac ]
+
+An earlier patch fixed a build failure with clang, but I still see the
+same problem with some configurations using gcc:
+
+drivers/gpu/drm/i915/i915_pmu.c: In function 'config_mask':
+include/linux/compiler_types.h:568:38: error: call to '__compiletime_assert_462' declared with attribute error: BUILD_BUG_ON failed: bit > BITS_PER_TYPE(typeof_member(struct i915_pmu, enable)) - 1
+drivers/gpu/drm/i915/i915_pmu.c:116:3: note: in expansion of macro 'BUILD_BUG_ON'
+  116 |   BUILD_BUG_ON(bit >
+
+As I understand it, the problem is that the function is not always fully
+inlined, but the __builtin_constant_p() can still evaluate the argument
+as being constant.
+
+Marking it as __always_inline so far works for me in all configurations.
+
+Fixes: a7137b1825b5 ("drm/i915/pmu: Fix build error with GCOV and AutoFDO enabled")
+Fixes: a644fde77ff7 ("drm/i915/pmu: Change bitmask of enabled events to u32")
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Link: https://lore.kernel.org/r/20250620111824.3395007-1-arnd@kernel.org
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+(cherry picked from commit ef69f9dd1cd7301cdf04ba326ed28152a3affcf6)
+Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_pmu.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
+index c43223916a1b1..5cc302ad13e16 100644
+--- a/drivers/gpu/drm/i915/i915_pmu.c
++++ b/drivers/gpu/drm/i915/i915_pmu.c
+@@ -111,7 +111,7 @@ static unsigned int config_bit(const u64 config)
+               return other_bit(config);
+ }
+-static u32 config_mask(const u64 config)
++static __always_inline u32 config_mask(const u64 config)
+ {
+       unsigned int bit = config_bit(config);
+-- 
+2.39.5
+
diff --git a/queue-6.12/drm-xe-process-deferred-ggtt-node-removals-on-device.patch b/queue-6.12/drm-xe-process-deferred-ggtt-node-removals-on-device.patch
new file mode 100644 (file)
index 0000000..a235327
--- /dev/null
@@ -0,0 +1,99 @@
+From 7a23e1ef42a874c4681b818effa80a02f44fa7fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 13 Jun 2025 00:09:36 +0200
+Subject: drm/xe: Process deferred GGTT node removals on device unwind
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Michal Wajdeczko <michal.wajdeczko@intel.com>
+
+[ Upstream commit af2b588abe006bd55ddd358c4c3b87523349c475 ]
+
+While we are indirectly draining our dedicated workqueue ggtt->wq
+that we use to complete asynchronous removal of some GGTT nodes,
+this happends as part of the managed-drm unwinding (ggtt_fini_early),
+which could be later then manage-device unwinding, where we could
+already unmap our MMIO/GMS mapping (mmio_fini).
+
+This was recently observed during unsuccessful VF initialization:
+
+ [ ] xe 0000:00:02.1: probe with driver xe failed with error -62
+ [ ] xe 0000:00:02.1: DEVRES REL ffff88811e747340 __xe_bo_unpin_map_no_vm (16 bytes)
+ [ ] xe 0000:00:02.1: DEVRES REL ffff88811e747540 __xe_bo_unpin_map_no_vm (16 bytes)
+ [ ] xe 0000:00:02.1: DEVRES REL ffff88811e747240 __xe_bo_unpin_map_no_vm (16 bytes)
+ [ ] xe 0000:00:02.1: DEVRES REL ffff88811e747040 tiles_fini (16 bytes)
+ [ ] xe 0000:00:02.1: DEVRES REL ffff88811e746840 mmio_fini (16 bytes)
+ [ ] xe 0000:00:02.1: DEVRES REL ffff88811e747f40 xe_bo_pinned_fini (16 bytes)
+ [ ] xe 0000:00:02.1: DEVRES REL ffff88811e746b40 devm_drm_dev_init_release (16 bytes)
+ [ ] xe 0000:00:02.1: [drm:drm_managed_release] drmres release begin
+ [ ] xe 0000:00:02.1: [drm:drm_managed_release] REL ffff88810ef81640 __fini_relay (8 bytes)
+ [ ] xe 0000:00:02.1: [drm:drm_managed_release] REL ffff88810ef80d40 guc_ct_fini (8 bytes)
+ [ ] xe 0000:00:02.1: [drm:drm_managed_release] REL ffff88810ef80040 __drmm_mutex_release (8 bytes)
+ [ ] xe 0000:00:02.1: [drm:drm_managed_release] REL ffff88810ef80140 ggtt_fini_early (8 bytes)
+
+and this was leading to:
+
+ [ ] BUG: unable to handle page fault for address: ffffc900058162a0
+ [ ] #PF: supervisor write access in kernel mode
+ [ ] #PF: error_code(0x0002) - not-present page
+ [ ] Oops: Oops: 0002 [#1] SMP NOPTI
+ [ ] Tainted: [W]=WARN
+ [ ] Workqueue: xe-ggtt-wq ggtt_node_remove_work_func [xe]
+ [ ] RIP: 0010:xe_ggtt_set_pte+0x6d/0x350 [xe]
+ [ ] Call Trace:
+ [ ]  <TASK>
+ [ ]  xe_ggtt_clear+0xb0/0x270 [xe]
+ [ ]  ggtt_node_remove+0xbb/0x120 [xe]
+ [ ]  ggtt_node_remove_work_func+0x30/0x50 [xe]
+ [ ]  process_one_work+0x22b/0x6f0
+ [ ]  worker_thread+0x1e8/0x3d
+
+Add managed-device action that will explicitly drain the workqueue
+with all pending node removals prior to releasing MMIO/GSM mapping.
+
+Fixes: 919bb54e989c ("drm/xe: Fix missing runtime outer protection for ggtt_remove_node")
+Signed-off-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
+Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Cc: Lucas De Marchi <lucas.demarchi@intel.com>
+Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Link: https://lore.kernel.org/r/20250612220937.857-2-michal.wajdeczko@intel.com
+(cherry picked from commit 89d2835c3680ab1938e22ad81b1c9f8c686bd391)
+Signed-off-by: Thomas Hellström <thomas.hellstrom@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/xe/xe_ggtt.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c
+index ff19eca5d358b..e9820126feb96 100644
+--- a/drivers/gpu/drm/xe/xe_ggtt.c
++++ b/drivers/gpu/drm/xe/xe_ggtt.c
+@@ -198,6 +198,13 @@ static const struct xe_ggtt_pt_ops xelpg_pt_wa_ops = {
+       .ggtt_set_pte = xe_ggtt_set_pte_and_flush,
+ };
++static void dev_fini_ggtt(void *arg)
++{
++      struct xe_ggtt *ggtt = arg;
++
++      drain_workqueue(ggtt->wq);
++}
++
+ /**
+  * xe_ggtt_init_early - Early GGTT initialization
+  * @ggtt: the &xe_ggtt to be initialized
+@@ -254,6 +261,10 @@ int xe_ggtt_init_early(struct xe_ggtt *ggtt)
+       if (err)
+               return err;
++      err = devm_add_action_or_reset(xe->drm.dev, dev_fini_ggtt, ggtt);
++      if (err)
++              return err;
++
+       if (IS_SRIOV_VF(xe)) {
+               err = xe_gt_sriov_vf_prepare_ggtt(xe_tile_get_gt(ggtt->tile, 0));
+               if (err)
+-- 
+2.39.5
+
diff --git a/queue-6.12/ethernet-ionic-fix-dma-mapping-tests.patch b/queue-6.12/ethernet-ionic-fix-dma-mapping-tests.patch
new file mode 100644 (file)
index 0000000..2503c37
--- /dev/null
@@ -0,0 +1,89 @@
+From 2c0b1d200adb3b455d79afb2d5fa2022a985cb18 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Jun 2025 11:45:30 +0200
+Subject: ethernet: ionic: Fix DMA mapping tests
+
+From: Thomas Fourier <fourier.thomas@gmail.com>
+
+[ Upstream commit d5e3241c5a386a2425823c8c7afb77a465bd040f ]
+
+Change error values of `ionic_tx_map_single()` and `ionic_tx_map_frag()`
+from 0 to `DMA_MAPPING_ERROR` to prevent collision with 0 as a valid
+address.
+
+This also fixes the use of `dma_mapping_error()` to test against 0 in
+`ionic_xdp_post_frame()`
+
+Fixes: 0f3154e6bcb3 ("ionic: Add Tx and Rx handling")
+Fixes: 56e41ee12d2d ("ionic: better dma-map error handling")
+Fixes: ac8813c0ab7d ("ionic: convert Rx queue buffers to use page_pool")
+Signed-off-by: Thomas Fourier <fourier.thomas@gmail.com>
+Reviewed-by: Brett Creeley <brett.creeley@amd.com>
+Link: https://patch.msgid.link/20250619094538.283723-2-fourier.thomas@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_txrx.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+index 0eeda7e502db2..0f5758c273c22 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c
+@@ -321,7 +321,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+                                          len, DMA_TO_DEVICE);
+       } else /* XDP_REDIRECT */ {
+               dma_addr = ionic_tx_map_single(q, frame->data, len);
+-              if (!dma_addr)
++              if (dma_addr == DMA_MAPPING_ERROR)
+                       return -EIO;
+       }
+@@ -357,7 +357,7 @@ static int ionic_xdp_post_frame(struct ionic_queue *q, struct xdp_frame *frame,
+                       } else {
+                               dma_addr = ionic_tx_map_frag(q, frag, 0,
+                                                            skb_frag_size(frag));
+-                              if (dma_mapping_error(q->dev, dma_addr)) {
++                              if (dma_addr == DMA_MAPPING_ERROR) {
+                                       ionic_tx_desc_unmap_bufs(q, desc_info);
+                                       return -EIO;
+                               }
+@@ -1083,7 +1083,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q,
+               net_warn_ratelimited("%s: DMA single map failed on %s!\n",
+                                    dev_name(dev), q->name);
+               q_to_tx_stats(q)->dma_map_err++;
+-              return 0;
++              return DMA_MAPPING_ERROR;
+       }
+       return dma_addr;
+ }
+@@ -1100,7 +1100,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q,
+               net_warn_ratelimited("%s: DMA frag map failed on %s!\n",
+                                    dev_name(dev), q->name);
+               q_to_tx_stats(q)->dma_map_err++;
+-              return 0;
++              return DMA_MAPPING_ERROR;
+       }
+       return dma_addr;
+ }
+@@ -1116,7 +1116,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
+       int frag_idx;
+       dma_addr = ionic_tx_map_single(q, skb->data, skb_headlen(skb));
+-      if (!dma_addr)
++      if (dma_addr == DMA_MAPPING_ERROR)
+               return -EIO;
+       buf_info->dma_addr = dma_addr;
+       buf_info->len = skb_headlen(skb);
+@@ -1126,7 +1126,7 @@ static int ionic_tx_map_skb(struct ionic_queue *q, struct sk_buff *skb,
+       nfrags = skb_shinfo(skb)->nr_frags;
+       for (frag_idx = 0; frag_idx < nfrags; frag_idx++, frag++) {
+               dma_addr = ionic_tx_map_frag(q, frag, 0, skb_frag_size(frag));
+-              if (!dma_addr)
++              if (dma_addr == DMA_MAPPING_ERROR)
+                       goto dma_fail;
+               buf_info->dma_addr = dma_addr;
+               buf_info->len = skb_frag_size(frag);
+-- 
+2.39.5
+
diff --git a/queue-6.12/hid-wacom-fix-crash-in-wacom_aes_battery_handler.patch b/queue-6.12/hid-wacom-fix-crash-in-wacom_aes_battery_handler.patch
new file mode 100644 (file)
index 0000000..c416c67
--- /dev/null
@@ -0,0 +1,46 @@
+From 54403c39de10fb52dddb75b698dbc330ffd7a67b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 May 2025 10:54:46 +0200
+Subject: HID: wacom: fix crash in wacom_aes_battery_handler()
+
+From: Thomas Zeitlhofer <thomas.zeitlhofer+lkml@ze-it.at>
+
+[ Upstream commit f3054152c12e2eed1e72704aff47b0ea58229584 ]
+
+Commit fd2a9b29dc9c ("HID: wacom: Remove AES power_supply after extended
+inactivity") introduced wacom_aes_battery_handler() which is scheduled
+as a delayed work (aes_battery_work).
+
+In wacom_remove(), aes_battery_work is not canceled. Consequently, if
+the device is removed while aes_battery_work is still pending, then hard
+crashes or "Oops: general protection fault..." are experienced when
+wacom_aes_battery_handler() is finally called. E.g., this happens with
+built-in USB devices after resume from hibernate when aes_battery_work
+was still pending at the time of hibernation.
+
+So, take care to cancel aes_battery_work in wacom_remove().
+
+Fixes: fd2a9b29dc9c ("HID: wacom: Remove AES power_supply after extended inactivity")
+Signed-off-by: Thomas Zeitlhofer <thomas.zeitlhofer+lkml@ze-it.at>
+Acked-by: Ping Cheng <ping.cheng@wacom.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/wacom_sys.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
+index 34428349fa311..64afaa243942c 100644
+--- a/drivers/hid/wacom_sys.c
++++ b/drivers/hid/wacom_sys.c
+@@ -2874,6 +2874,7 @@ static void wacom_remove(struct hid_device *hdev)
+       hid_hw_stop(hdev);
+       cancel_delayed_work_sync(&wacom->init_work);
++      cancel_delayed_work_sync(&wacom->aes_battery_work);
+       cancel_work_sync(&wacom->wireless_work);
+       cancel_work_sync(&wacom->battery_work);
+       cancel_work_sync(&wacom->remote_work);
+-- 
+2.39.5
+
diff --git a/queue-6.12/libbpf-fix-null-pointer-dereference-in-btf_dump__fre.patch b/queue-6.12/libbpf-fix-null-pointer-dereference-in-btf_dump__fre.patch
new file mode 100644 (file)
index 0000000..d4d6fb7
--- /dev/null
@@ -0,0 +1,42 @@
+From 087e5e00df011a081a0b9420c6bbcbf52ef077e9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Jun 2025 09:19:33 +0800
+Subject: libbpf: Fix null pointer dereference in btf_dump__free on allocation
+ failure
+
+From: Yuan Chen <chenyuan@kylinos.cn>
+
+[ Upstream commit aa485e8789d56a4573f7c8d000a182b749eaa64d ]
+
+When btf_dump__new() fails to allocate memory for the internal hashmap
+(btf_dump->type_names), it returns an error code. However, the cleanup
+function btf_dump__free() does not check if btf_dump->type_names is NULL
+before attempting to free it. This leads to a null pointer dereference
+when btf_dump__free() is called on a btf_dump object.
+
+Fixes: 351131b51c7a ("libbpf: add btf_dump API for BTF-to-C conversion")
+Signed-off-by: Yuan Chen <chenyuan@kylinos.cn>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20250618011933.11423-1-chenyuan_fl@163.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/lib/bpf/btf_dump.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
+index 46cce18c83086..12306b5de3efb 100644
+--- a/tools/lib/bpf/btf_dump.c
++++ b/tools/lib/bpf/btf_dump.c
+@@ -225,6 +225,9 @@ static void btf_dump_free_names(struct hashmap *map)
+       size_t bkt;
+       struct hashmap_entry *cur;
++      if (!map)
++              return;
++
+       hashmap__for_each_entry(map, cur, bkt)
+               free((void *)cur->pkey);
+-- 
+2.39.5
+
diff --git a/queue-6.12/libbpf-fix-possible-use-after-free-for-externs.patch b/queue-6.12/libbpf-fix-possible-use-after-free-for-externs.patch
new file mode 100644 (file)
index 0000000..515bccf
--- /dev/null
@@ -0,0 +1,113 @@
+From b863e364a50729414905a6b2de616399daa59107 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jun 2025 22:02:15 -0700
+Subject: libbpf: Fix possible use-after-free for externs
+
+From: Adin Scannell <amscanne@meta.com>
+
+[ Upstream commit fa6f092cc0a02d0fcee37e9e8172eda372a03d33 ]
+
+The `name` field in `obj->externs` points into the BTF data at initial
+open time. However, some functions may invalidate this after opening and
+before loading (e.g. `bpf_map__set_value_size`), which results in
+pointers into freed memory and undefined behavior.
+
+The simplest solution is to simply `strdup` these strings, similar to
+the `essent_name`, and free them at the same time.
+
+In order to test this path, the `global_map_resize` BPF selftest is
+modified slightly to ensure the presence of an extern, which causes this
+test to fail prior to the fix. Given there isn't an obvious API or error
+to test against, I opted to add this to the existing test as an aspect
+of the resizing feature rather than duplicate the test.
+
+Fixes: 9d0a23313b1a ("libbpf: Add capability for resizing datasec maps")
+Signed-off-by: Adin Scannell <amscanne@meta.com>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/bpf/20250625050215.2777374-1-amscanne@meta.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/lib/bpf/libbpf.c                           | 10 +++++++---
+ .../selftests/bpf/progs/test_global_map_resize.c | 16 ++++++++++++++++
+ 2 files changed, 23 insertions(+), 3 deletions(-)
+
+diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
+index 1290314da6761..36e341b4b77bf 100644
+--- a/tools/lib/bpf/libbpf.c
++++ b/tools/lib/bpf/libbpf.c
+@@ -596,7 +596,7 @@ struct extern_desc {
+       int sym_idx;
+       int btf_id;
+       int sec_btf_id;
+-      const char *name;
++      char *name;
+       char *essent_name;
+       bool is_set;
+       bool is_weak;
+@@ -4223,7 +4223,9 @@ static int bpf_object__collect_externs(struct bpf_object *obj)
+                       return ext->btf_id;
+               }
+               t = btf__type_by_id(obj->btf, ext->btf_id);
+-              ext->name = btf__name_by_offset(obj->btf, t->name_off);
++              ext->name = strdup(btf__name_by_offset(obj->btf, t->name_off));
++              if (!ext->name)
++                      return -ENOMEM;
+               ext->sym_idx = i;
+               ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
+@@ -9062,8 +9064,10 @@ void bpf_object__close(struct bpf_object *obj)
+       zfree(&obj->btf_custom_path);
+       zfree(&obj->kconfig);
+-      for (i = 0; i < obj->nr_extern; i++)
++      for (i = 0; i < obj->nr_extern; i++) {
++              zfree(&obj->externs[i].name);
+               zfree(&obj->externs[i].essent_name);
++      }
+       zfree(&obj->externs);
+       obj->nr_extern = 0;
+diff --git a/tools/testing/selftests/bpf/progs/test_global_map_resize.c b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
+index a3f220ba7025b..ee65bad0436d0 100644
+--- a/tools/testing/selftests/bpf/progs/test_global_map_resize.c
++++ b/tools/testing/selftests/bpf/progs/test_global_map_resize.c
+@@ -32,6 +32,16 @@ int my_int_last SEC(".data.array_not_last");
+ int percpu_arr[1] SEC(".data.percpu_arr");
++/* at least one extern is included, to ensure that a specific
++ * regression is tested whereby resizing resulted in a free-after-use
++ * bug after type information is invalidated by the resize operation.
++ *
++ * There isn't a particularly good API to test for this specific condition,
++ * but by having externs for the resizing tests it will cover this path.
++ */
++extern int LINUX_KERNEL_VERSION __kconfig;
++long version_sink;
++
+ SEC("tp/syscalls/sys_enter_getpid")
+ int bss_array_sum(void *ctx)
+ {
+@@ -44,6 +54,9 @@ int bss_array_sum(void *ctx)
+       for (size_t i = 0; i < bss_array_len; ++i)
+               sum += array[i];
++      /* see above; ensure this is not optimized out */
++      version_sink = LINUX_KERNEL_VERSION;
++
+       return 0;
+ }
+@@ -59,6 +72,9 @@ int data_array_sum(void *ctx)
+       for (size_t i = 0; i < data_array_len; ++i)
+               sum += my_array[i];
++      /* see above; ensure this is not optimized out */
++      version_sink = LINUX_KERNEL_VERSION;
++
+       return 0;
+ }
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-enetc-correct-endianness-handling-in-_enetc_rd_r.patch b/queue-6.12/net-enetc-correct-endianness-handling-in-_enetc_rd_r.patch
new file mode 100644 (file)
index 0000000..80a8a31
--- /dev/null
@@ -0,0 +1,60 @@
+From b3a2e6af877d18e0ce5fbca2423b987e68eb9635 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jun 2025 17:35:12 +0100
+Subject: net: enetc: Correct endianness handling in _enetc_rd_reg64
+
+From: Simon Horman <horms@kernel.org>
+
+[ Upstream commit 7b515f35a911fdc31fbde6531828dcd6ae9803d3 ]
+
+enetc_hw.h provides two versions of _enetc_rd_reg64.
+One which simply calls ioread64() when available.
+And another that composes the 64-bit result from ioread32() calls.
+
+In the second case the code appears to assume that each ioread32() call
+returns a little-endian value. However both the shift and logical or
+used to compose the return value would not work correctly on big endian
+systems if this were the case. Moreover, this is inconsistent with the
+first case where the return value of ioread64() is assumed to be in host
+byte order.
+
+It appears that the correct approach is for both versions to treat the
+return value of ioread*() functions as being in host byte order. And
+this patch corrects the ioread32()-based version to do so.
+
+This is a bug but would only manifest on big endian systems
+that make use of the ioread32-based implementation of _enetc_rd_reg64.
+While all in-tree users of this driver are little endian and
+make use of the ioread64-based implementation of _enetc_rd_reg64.
+Thus, no in-tree user of this driver is affected by this bug.
+
+Flagged by Sparse.
+Compile tested only.
+
+Fixes: 16eb4c85c964 ("enetc: Add ethtool statistics")
+Closes: https://lore.kernel.org/all/AM9PR04MB850500D3FC24FE23DEFCEA158879A@AM9PR04MB8505.eurprd04.prod.outlook.com/
+Signed-off-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Link: https://patch.msgid.link/20250624-etnetc-le-v1-1-a73a95d96e4e@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/enetc/enetc_hw.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+index 1619943fb2637..4e8881b479e48 100644
+--- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h
++++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h
+@@ -485,7 +485,7 @@ static inline u64 _enetc_rd_reg64(void __iomem *reg)
+               tmp = ioread32(reg + 4);
+       } while (high != tmp);
+-      return le64_to_cpu((__le64)high << 32 | low);
++      return (u64)high << 32 | low;
+ }
+ #endif
+-- 
+2.39.5
+
diff --git a/queue-6.12/net-selftests-fix-tcp-packet-checksum.patch b/queue-6.12/net-selftests-fix-tcp-packet-checksum.patch
new file mode 100644 (file)
index 0000000..8188fb8
--- /dev/null
@@ -0,0 +1,46 @@
+From 9c9685007b1a742e60f062bd0b1ff39d43982f73 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jun 2025 11:32:58 -0700
+Subject: net: selftests: fix TCP packet checksum
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit 8d89661a36dd3bb8c9902cff36dc0c144dce3faf ]
+
+The length in the pseudo header should be the length of the L3 payload
+AKA the L4 header+payload. The selftest code builds the packet from
+the lower layers up, so all the headers are pushed already when it
+constructs L4. We need to subtract the lower layer headers from skb->len.
+
+Fixes: 3e1e58d64c3d ("net: add generic selftest support")
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Reviewed-by: Gerhard Engleder <gerhard@engleder-embedded.com>
+Reported-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Tested-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Reviewed-by: Oleksij Rempel <o.rempel@pengutronix.de>
+Link: https://patch.msgid.link/20250624183258.3377740-1-kuba@kernel.org
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/selftests.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/core/selftests.c b/net/core/selftests.c
+index 561653f9d71d4..ef27594d6a996 100644
+--- a/net/core/selftests.c
++++ b/net/core/selftests.c
+@@ -160,8 +160,9 @@ static struct sk_buff *net_test_get_skb(struct net_device *ndev,
+       skb->csum = 0;
+       skb->ip_summed = CHECKSUM_PARTIAL;
+       if (attr->tcp) {
+-              thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr,
+-                                          ihdr->daddr, 0);
++              int l4len = skb->len - skb_transport_offset(skb);
++
++              thdr->check = ~tcp_v4_check(l4len, ihdr->saddr, ihdr->daddr, 0);
+               skb->csum_start = skb_transport_header(skb) - skb->head;
+               skb->csum_offset = offsetof(struct tcphdr, check);
+       } else {
+-- 
+2.39.5
+
diff --git a/queue-6.12/netlink-specs-tc-replace-underscores-with-dashes-in-.patch b/queue-6.12/netlink-specs-tc-replace-underscores-with-dashes-in-.patch
new file mode 100644 (file)
index 0000000..c808ddf
--- /dev/null
@@ -0,0 +1,48 @@
+From e6a91179f25a24d3f7651e19d4ffa56178f917b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jun 2025 14:10:01 -0700
+Subject: netlink: specs: tc: replace underscores with dashes in names
+
+From: Jakub Kicinski <kuba@kernel.org>
+
+[ Upstream commit eef0eaeca7fa8e358a31e89802f564451b797718 ]
+
+We're trying to add a strict regexp for the name format in the spec.
+Underscores will not be allowed, dashes should be used instead.
+This makes no difference to C (codegen, if used, replaces special
+chars in names) but it gives more uniform naming in Python.
+
+Fixes: a1bcfde83669 ("doc/netlink/specs: Add a spec for tc")
+Reviewed-by: Donald Hunter <donald.hunter@gmail.com>
+Link: https://patch.msgid.link/20250624211002.3475021-10-kuba@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/netlink/specs/tc.yaml | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml
+index c5579a5412fc9..043f205bc1ae7 100644
+--- a/Documentation/netlink/specs/tc.yaml
++++ b/Documentation/netlink/specs/tc.yaml
+@@ -227,7 +227,7 @@ definitions:
+         type: u8
+         doc: log(P_max / (qth-max - qth-min))
+       -
+-        name: Scell_log
++        name: Scell-log
+         type: u8
+         doc: cell size for idle damping
+       -
+@@ -248,7 +248,7 @@ definitions:
+         name: DPs
+         type: u32
+       -
+-        name: def_DP
++        name: def-DP
+         type: u32
+       -
+         name: grio
+-- 
+2.39.5
+
index 249f10f2aa8de9bbf0e3b1c99c2d844be1e04f02..be83722c98c5c9aae854ac8cf18ca77b16b06f36 100644 (file)
@@ -96,3 +96,40 @@ revert-riscv-define-task_size_max-for-__access_ok.patch
 revert-riscv-misaligned-fix-sleeping-function-called-during-misaligned-access-handling.patch
 drm-dp-change-aux-dpcd-probe-address-from-dpcd_rev-to-lane0_1_status.patch
 drm-xe-display-add-check-for-alloc_ordered_workqueue.patch
+hid-wacom-fix-crash-in-wacom_aes_battery_handler.patch
+atm-clip-prevent-null-deref-in-clip_push.patch
+bluetooth-hci_core-fix-use-after-free-in-vhci_flush.patch
+alsa-usb-audio-fix-out-of-bounds-read-in-snd_usb_get.patch
+attach_recursive_mnt-do-not-lock-the-covering-tree-w.patch
+libbpf-fix-null-pointer-dereference-in-btf_dump__fre.patch
+ethernet-ionic-fix-dma-mapping-tests.patch
+wifi-mac80211-fix-beacon-interval-calculation-overfl.patch
+af_unix-don-t-set-econnreset-for-consumed-oob-skb.patch
+wifi-mac80211-add-link-iteration-macro-for-link-data.patch
+wifi-mac80211-create-separate-links-for-vlan-interfa.patch
+wifi-mac80211-finish-link-init-before-rcu-publish.patch
+vsock-uapi-fix-linux-vm_sockets.h-userspace-compilat.patch
+bnxt-properly-flush-xdp-redirect-lists.patch
+um-ubd-add-missing-error-check-in-start_io_thread.patch
+libbpf-fix-possible-use-after-free-for-externs.patch
+net-enetc-correct-endianness-handling-in-_enetc_rd_r.patch
+netlink-specs-tc-replace-underscores-with-dashes-in-.patch
+atm-release-atm_dev_mutex-after-removing-procfs-in-a.patch
+alsa-hda-realtek-fix-built-in-mic-on-asus-vivobook-x.patch
+net-selftests-fix-tcp-packet-checksum.patch
+drm-amdgpu-discovery-optionally-use-fw-based-ip-disc.patch
+drm-amd-adjust-output-for-discovery-error-handling.patch
+drm-i915-fix-build-error-some-more.patch
+drm-bridge-ti-sn65dsi86-make-use-of-debugfs_init-cal.patch
+drm-bridge-ti-sn65dsi86-add-hpd-for-displayport-conn.patch
+drm-xe-process-deferred-ggtt-node-removals-on-device.patch
+smb-client-fix-potential-deadlock-when-reconnecting-.patch
+smb-smbdirect-add-smbdirect_pdu.h-with-protocol-defi.patch
+smb-client-make-use-of-common-smbdirect_pdu.h.patch
+smb-smbdirect-add-smbdirect.h-with-public-structures.patch
+smb-smbdirect-add-smbdirect_socket.h.patch
+smb-client-make-use-of-common-smbdirect_socket.patch
+smb-smbdirect-introduce-smbdirect_socket_parameters.patch
+smb-client-make-use-of-common-smbdirect_socket_param.patch
+cifs-fix-the-smbd_response-slab-to-allow-usercopy.patch
+cifs-fix-reading-into-an-iter_folioq-from-the-smbdir.patch
diff --git a/queue-6.12/smb-client-fix-potential-deadlock-when-reconnecting-.patch b/queue-6.12/smb-client-fix-potential-deadlock-when-reconnecting-.patch
new file mode 100644 (file)
index 0000000..e9694d6
--- /dev/null
@@ -0,0 +1,214 @@
+From fefc903dec13535230c69dc7ad94635eecba41d0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Jun 2025 12:22:38 -0300
+Subject: smb: client: fix potential deadlock when reconnecting channels
+
+From: Paulo Alcantara <pc@manguebit.org>
+
+[ Upstream commit 711741f94ac3cf9f4e3aa73aa171e76d188c0819 ]
+
+Fix cifs_signal_cifsd_for_reconnect() to take the correct lock order
+and prevent the following deadlock from happening
+
+======================================================
+WARNING: possible circular locking dependency detected
+6.16.0-rc3-build2+ #1301 Tainted: G S      W
+------------------------------------------------------
+cifsd/6055 is trying to acquire lock:
+ffff88810ad56038 (&tcp_ses->srv_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0x134/0x200
+
+but task is already holding lock:
+ffff888119c64330 (&ret_buf->chan_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0xcf/0x200
+
+which lock already depends on the new lock.
+
+the existing dependency chain (in reverse order) is:
+
+-> #2 (&ret_buf->chan_lock){+.+.}-{3:3}:
+       validate_chain+0x1cf/0x270
+       __lock_acquire+0x60e/0x780
+       lock_acquire.part.0+0xb4/0x1f0
+       _raw_spin_lock+0x2f/0x40
+       cifs_setup_session+0x81/0x4b0
+       cifs_get_smb_ses+0x771/0x900
+       cifs_mount_get_session+0x7e/0x170
+       cifs_mount+0x92/0x2d0
+       cifs_smb3_do_mount+0x161/0x460
+       smb3_get_tree+0x55/0x90
+       vfs_get_tree+0x46/0x180
+       do_new_mount+0x1b0/0x2e0
+       path_mount+0x6ee/0x740
+       do_mount+0x98/0xe0
+       __do_sys_mount+0x148/0x180
+       do_syscall_64+0xa4/0x260
+       entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+-> #1 (&ret_buf->ses_lock){+.+.}-{3:3}:
+       validate_chain+0x1cf/0x270
+       __lock_acquire+0x60e/0x780
+       lock_acquire.part.0+0xb4/0x1f0
+       _raw_spin_lock+0x2f/0x40
+       cifs_match_super+0x101/0x320
+       sget+0xab/0x270
+       cifs_smb3_do_mount+0x1e0/0x460
+       smb3_get_tree+0x55/0x90
+       vfs_get_tree+0x46/0x180
+       do_new_mount+0x1b0/0x2e0
+       path_mount+0x6ee/0x740
+       do_mount+0x98/0xe0
+       __do_sys_mount+0x148/0x180
+       do_syscall_64+0xa4/0x260
+       entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+-> #0 (&tcp_ses->srv_lock){+.+.}-{3:3}:
+       check_noncircular+0x95/0xc0
+       check_prev_add+0x115/0x2f0
+       validate_chain+0x1cf/0x270
+       __lock_acquire+0x60e/0x780
+       lock_acquire.part.0+0xb4/0x1f0
+       _raw_spin_lock+0x2f/0x40
+       cifs_signal_cifsd_for_reconnect+0x134/0x200
+       __cifs_reconnect+0x8f/0x500
+       cifs_handle_standard+0x112/0x280
+       cifs_demultiplex_thread+0x64d/0xbc0
+       kthread+0x2f7/0x310
+       ret_from_fork+0x2a/0x230
+       ret_from_fork_asm+0x1a/0x30
+
+other info that might help us debug this:
+
+Chain exists of:
+  &tcp_ses->srv_lock --> &ret_buf->ses_lock --> &ret_buf->chan_lock
+
+ Possible unsafe locking scenario:
+
+       CPU0                    CPU1
+       ----                    ----
+  lock(&ret_buf->chan_lock);
+                               lock(&ret_buf->ses_lock);
+                               lock(&ret_buf->chan_lock);
+  lock(&tcp_ses->srv_lock);
+
+ *** DEADLOCK ***
+
+3 locks held by cifsd/6055:
+ #0: ffffffff857de398 (&cifs_tcp_ses_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0x7b/0x200
+ #1: ffff888119c64060 (&ret_buf->ses_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0x9c/0x200
+ #2: ffff888119c64330 (&ret_buf->chan_lock){+.+.}-{3:3}, at: cifs_signal_cifsd_for_reconnect+0xcf/0x200
+
+Cc: linux-cifs@vger.kernel.org
+Reported-by: David Howells <dhowells@redhat.com>
+Fixes: d7d7a66aacd6 ("cifs: avoid use of global locks for high contention data")
+Reviewed-by: David Howells <dhowells@redhat.com>
+Tested-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Paulo Alcantara (Red Hat) <pc@manguebit.org>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifsglob.h |  1 +
+ fs/smb/client/connect.c  | 58 +++++++++++++++++++++++++---------------
+ 2 files changed, 37 insertions(+), 22 deletions(-)
+
+diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
+index d573740e54a1a..c66655adecb2c 100644
+--- a/fs/smb/client/cifsglob.h
++++ b/fs/smb/client/cifsglob.h
+@@ -677,6 +677,7 @@ inc_rfc1001_len(void *buf, int count)
+ struct TCP_Server_Info {
+       struct list_head tcp_ses_list;
+       struct list_head smb_ses_list;
++      struct list_head rlist; /* reconnect list */
+       spinlock_t srv_lock;  /* protect anything here that is not protected */
+       __u64 conn_id; /* connection identifier (useful for debugging) */
+       int srv_count; /* reference counter */
+diff --git a/fs/smb/client/connect.c b/fs/smb/client/connect.c
+index 91f5fd818cbf4..9275e0d1e2f64 100644
+--- a/fs/smb/client/connect.c
++++ b/fs/smb/client/connect.c
+@@ -140,6 +140,14 @@ static void smb2_query_server_interfaces(struct work_struct *work)
+                          (SMB_INTERFACE_POLL_INTERVAL * HZ));
+ }
++#define set_need_reco(server) \
++do { \
++      spin_lock(&server->srv_lock); \
++      if (server->tcpStatus != CifsExiting) \
++              server->tcpStatus = CifsNeedReconnect; \
++      spin_unlock(&server->srv_lock); \
++} while (0)
++
+ /*
+  * Update the tcpStatus for the server.
+  * This is used to signal the cifsd thread to call cifs_reconnect
+@@ -153,39 +161,45 @@ void
+ cifs_signal_cifsd_for_reconnect(struct TCP_Server_Info *server,
+                               bool all_channels)
+ {
+-      struct TCP_Server_Info *pserver;
++      struct TCP_Server_Info *nserver;
+       struct cifs_ses *ses;
++      LIST_HEAD(reco);
+       int i;
+-      /* If server is a channel, select the primary channel */
+-      pserver = SERVER_IS_CHAN(server) ? server->primary_server : server;
+-
+       /* if we need to signal just this channel */
+       if (!all_channels) {
+-              spin_lock(&server->srv_lock);
+-              if (server->tcpStatus != CifsExiting)
+-                      server->tcpStatus = CifsNeedReconnect;
+-              spin_unlock(&server->srv_lock);
++              set_need_reco(server);
+               return;
+       }
+-      spin_lock(&cifs_tcp_ses_lock);
+-      list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
+-              if (cifs_ses_exiting(ses))
+-                      continue;
+-              spin_lock(&ses->chan_lock);
+-              for (i = 0; i < ses->chan_count; i++) {
+-                      if (!ses->chans[i].server)
++      if (SERVER_IS_CHAN(server))
++              server = server->primary_server;
++      scoped_guard(spinlock, &cifs_tcp_ses_lock) {
++              set_need_reco(server);
++              list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
++                      spin_lock(&ses->ses_lock);
++                      if (ses->ses_status == SES_EXITING) {
++                              spin_unlock(&ses->ses_lock);
+                               continue;
+-
+-                      spin_lock(&ses->chans[i].server->srv_lock);
+-                      if (ses->chans[i].server->tcpStatus != CifsExiting)
+-                              ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+-                      spin_unlock(&ses->chans[i].server->srv_lock);
++                      }
++                      spin_lock(&ses->chan_lock);
++                      for (i = 1; i < ses->chan_count; i++) {
++                              nserver = ses->chans[i].server;
++                              if (!nserver)
++                                      continue;
++                              nserver->srv_count++;
++                              list_add(&nserver->rlist, &reco);
++                      }
++                      spin_unlock(&ses->chan_lock);
++                      spin_unlock(&ses->ses_lock);
+               }
+-              spin_unlock(&ses->chan_lock);
+       }
+-      spin_unlock(&cifs_tcp_ses_lock);
++
++      list_for_each_entry_safe(server, nserver, &reco, rlist) {
++              list_del_init(&server->rlist);
++              set_need_reco(server);
++              cifs_put_tcp_session(server, 0);
++      }
+ }
+ /*
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-make-use-of-common-smbdirect_pdu.h.patch b/queue-6.12/smb-client-make-use-of-common-smbdirect_pdu.h.patch
new file mode 100644 (file)
index 0000000..77aaebd
--- /dev/null
@@ -0,0 +1,240 @@
+From d83090d1cdae0be80a41e99fd183dac4dbb78c3b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:31 +0200
+Subject: smb: client: make use of common smbdirect_pdu.h
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 64946d5be665ddac6b5bf11f5b5ff319aae0f4c6 ]
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43e7e284fc77 ("cifs: Fix the smbd_response slab to allow usercopy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.c | 40 ++++++++++++++++++--------------------
+ fs/smb/client/smbdirect.h | 41 ---------------------------------------
+ 2 files changed, 19 insertions(+), 62 deletions(-)
+
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index 9d8be034f103f..d506ab259e082 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -7,6 +7,7 @@
+ #include <linux/module.h>
+ #include <linux/highmem.h>
+ #include <linux/folio_queue.h>
++#include "../common/smbdirect/smbdirect_pdu.h"
+ #include "smbdirect.h"
+ #include "cifs_debug.h"
+ #include "cifsproto.h"
+@@ -50,9 +51,6 @@ struct smb_extract_to_rdma {
+ static ssize_t smb_extract_iter_to_rdma(struct iov_iter *iter, size_t len,
+                                       struct smb_extract_to_rdma *rdma);
+-/* SMBD version number */
+-#define SMBD_V1       0x0100
+-
+ /* Port numbers for SMBD transport */
+ #define SMB_PORT      445
+ #define SMBD_PORT     5445
+@@ -299,7 +297,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+       mempool_free(request, request->info->request_mempool);
+ }
+-static void dump_smbd_negotiate_resp(struct smbd_negotiate_resp *resp)
++static void dump_smbdirect_negotiate_resp(struct smbdirect_negotiate_resp *resp)
+ {
+       log_rdma_event(INFO, "resp message min_version %u max_version %u negotiated_version %u credits_requested %u credits_granted %u status %u max_readwrite_size %u preferred_send_size %u max_receive_size %u max_fragmented_size %u\n",
+                      resp->min_version, resp->max_version,
+@@ -318,15 +316,15 @@ static bool process_negotiation_response(
+               struct smbd_response *response, int packet_length)
+ {
+       struct smbd_connection *info = response->info;
+-      struct smbd_negotiate_resp *packet = smbd_response_payload(response);
++      struct smbdirect_negotiate_resp *packet = smbd_response_payload(response);
+-      if (packet_length < sizeof(struct smbd_negotiate_resp)) {
++      if (packet_length < sizeof(struct smbdirect_negotiate_resp)) {
+               log_rdma_event(ERR,
+                       "error: packet_length=%d\n", packet_length);
+               return false;
+       }
+-      if (le16_to_cpu(packet->negotiated_version) != SMBD_V1) {
++      if (le16_to_cpu(packet->negotiated_version) != SMBDIRECT_V1) {
+               log_rdma_event(ERR, "error: negotiated_version=%x\n",
+                       le16_to_cpu(packet->negotiated_version));
+               return false;
+@@ -448,7 +446,7 @@ static void smbd_post_send_credits(struct work_struct *work)
+ /* Called from softirq, when recv is done */
+ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+ {
+-      struct smbd_data_transfer *data_transfer;
++      struct smbdirect_data_transfer *data_transfer;
+       struct smbd_response *response =
+               container_of(wc->wr_cqe, struct smbd_response, cqe);
+       struct smbd_connection *info = response->info;
+@@ -474,7 +472,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+       switch (response->type) {
+       /* SMBD negotiation response */
+       case SMBD_NEGOTIATE_RESP:
+-              dump_smbd_negotiate_resp(smbd_response_payload(response));
++              dump_smbdirect_negotiate_resp(smbd_response_payload(response));
+               info->full_packet_received = true;
+               info->negotiate_done =
+                       process_negotiation_response(response, wc->byte_len);
+@@ -531,7 +529,7 @@ static void recv_done(struct ib_cq *cq, struct ib_wc *wc)
+               /* Send a KEEP_ALIVE response right away if requested */
+               info->keep_alive_requested = KEEP_ALIVE_NONE;
+               if (le16_to_cpu(data_transfer->flags) &
+-                              SMB_DIRECT_RESPONSE_REQUESTED) {
++                              SMBDIRECT_FLAG_RESPONSE_REQUESTED) {
+                       info->keep_alive_requested = KEEP_ALIVE_PENDING;
+               }
+@@ -686,7 +684,7 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+       struct ib_send_wr send_wr;
+       int rc = -ENOMEM;
+       struct smbd_request *request;
+-      struct smbd_negotiate_req *packet;
++      struct smbdirect_negotiate_req *packet;
+       request = mempool_alloc(info->request_mempool, GFP_KERNEL);
+       if (!request)
+@@ -695,8 +693,8 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+       request->info = info;
+       packet = smbd_request_payload(request);
+-      packet->min_version = cpu_to_le16(SMBD_V1);
+-      packet->max_version = cpu_to_le16(SMBD_V1);
++      packet->min_version = cpu_to_le16(SMBDIRECT_V1);
++      packet->max_version = cpu_to_le16(SMBDIRECT_V1);
+       packet->reserved = 0;
+       packet->credits_requested = cpu_to_le16(info->send_credit_target);
+       packet->preferred_send_size = cpu_to_le32(info->max_send_size);
+@@ -774,10 +772,10 @@ static int manage_credits_prior_sending(struct smbd_connection *info)
+ /*
+  * Check if we need to send a KEEP_ALIVE message
+  * The idle connection timer triggers a KEEP_ALIVE message when expires
+- * SMB_DIRECT_RESPONSE_REQUESTED is set in the message flag to have peer send
++ * SMBDIRECT_FLAG_RESPONSE_REQUESTED is set in the message flag to have peer send
+  * back a response.
+  * return value:
+- * 1 if SMB_DIRECT_RESPONSE_REQUESTED needs to be set
++ * 1 if SMBDIRECT_FLAG_RESPONSE_REQUESTED needs to be set
+  * 0: otherwise
+  */
+ static int manage_keep_alive_before_sending(struct smbd_connection *info)
+@@ -837,7 +835,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+       int header_length;
+       int data_length;
+       struct smbd_request *request;
+-      struct smbd_data_transfer *packet;
++      struct smbdirect_data_transfer *packet;
+       int new_credits = 0;
+ wait_credit:
+@@ -919,7 +917,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+       packet->flags = 0;
+       if (manage_keep_alive_before_sending(info))
+-              packet->flags |= cpu_to_le16(SMB_DIRECT_RESPONSE_REQUESTED);
++              packet->flags |= cpu_to_le16(SMBDIRECT_FLAG_RESPONSE_REQUESTED);
+       packet->reserved = 0;
+       if (!data_length)
+@@ -938,10 +936,10 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+                    le32_to_cpu(packet->remaining_data_length));
+       /* Map the packet to DMA */
+-      header_length = sizeof(struct smbd_data_transfer);
++      header_length = sizeof(struct smbdirect_data_transfer);
+       /* If this is a packet without payload, don't send padding */
+       if (!data_length)
+-              header_length = offsetof(struct smbd_data_transfer, padding);
++              header_length = offsetof(struct smbdirect_data_transfer, padding);
+       request->sge[0].addr = ib_dma_map_single(info->id->device,
+                                                (void *)packet,
+@@ -1432,7 +1430,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+               kmem_cache_create(
+                       name,
+                       sizeof(struct smbd_request) +
+-                              sizeof(struct smbd_data_transfer),
++                              sizeof(struct smbdirect_data_transfer),
+                       0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!info->request_cache)
+               return -ENOMEM;
+@@ -1735,7 +1733,7 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+               unsigned int size)
+ {
+       struct smbd_response *response;
+-      struct smbd_data_transfer *data_transfer;
++      struct smbdirect_data_transfer *data_transfer;
+       int to_copy, to_read, data_read, offset;
+       u32 data_length, remaining_data_length, data_offset;
+       int rc;
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index c08e3665150d7..4da0974ce7305 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -177,47 +177,6 @@ enum smbd_message_type {
+       SMBD_TRANSFER_DATA,
+ };
+-#define SMB_DIRECT_RESPONSE_REQUESTED 0x0001
+-
+-/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
+-struct smbd_negotiate_req {
+-      __le16 min_version;
+-      __le16 max_version;
+-      __le16 reserved;
+-      __le16 credits_requested;
+-      __le32 preferred_send_size;
+-      __le32 max_receive_size;
+-      __le32 max_fragmented_size;
+-} __packed;
+-
+-/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
+-struct smbd_negotiate_resp {
+-      __le16 min_version;
+-      __le16 max_version;
+-      __le16 negotiated_version;
+-      __le16 reserved;
+-      __le16 credits_requested;
+-      __le16 credits_granted;
+-      __le32 status;
+-      __le32 max_readwrite_size;
+-      __le32 preferred_send_size;
+-      __le32 max_receive_size;
+-      __le32 max_fragmented_size;
+-} __packed;
+-
+-/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
+-struct smbd_data_transfer {
+-      __le16 credits_requested;
+-      __le16 credits_granted;
+-      __le16 flags;
+-      __le16 reserved;
+-      __le32 remaining_data_length;
+-      __le32 data_offset;
+-      __le32 data_length;
+-      __le32 padding;
+-      __u8 buffer[];
+-} __packed;
+-
+ /* The packet fields for a registered RDMA buffer */
+ struct smbd_buffer_descriptor_v1 {
+       __le64 offset;
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-make-use-of-common-smbdirect_socket.patch b/queue-6.12/smb-client-make-use-of-common-smbdirect_socket.patch
new file mode 100644 (file)
index 0000000..a7045bd
--- /dev/null
@@ -0,0 +1,866 @@
+From 21a5bb4f93832f901463c8c0fb07554d63e3a867 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:37 +0200
+Subject: smb: client: make use of common smbdirect_socket
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit c3011b9a7deaaaabdf955815d29eac39c8b75e67 ]
+
+This is the next step in the direction of a common smbdirect layer.
+Currently only structures are shared, but that will change
+over time until everything is shared.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43e7e284fc77 ("cifs: Fix the smbd_response slab to allow usercopy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_debug.c |   2 +-
+ fs/smb/client/smbdirect.c  | 258 ++++++++++++++++++++-----------------
+ fs/smb/client/smbdirect.h  |  12 +-
+ 3 files changed, 146 insertions(+), 126 deletions(-)
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index e03c890de0a06..56b0b5c82dd19 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -387,7 +387,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+               seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+                       "transport status: %x",
+                       server->smbd_conn->protocol,
+-                      server->smbd_conn->transport_status);
++                      server->smbd_conn->socket.status);
+               seq_printf(m, "\nConn receive_credit_max: %x "
+                       "send_credit_target: %x max_send_size: %x",
+                       server->smbd_conn->receive_credit_max,
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index d506ab259e082..ac489df8151a1 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -163,10 +163,11 @@ static void smbd_disconnect_rdma_work(struct work_struct *work)
+ {
+       struct smbd_connection *info =
+               container_of(work, struct smbd_connection, disconnect_work);
++      struct smbdirect_socket *sc = &info->socket;
+-      if (info->transport_status == SMBD_CONNECTED) {
+-              info->transport_status = SMBD_DISCONNECTING;
+-              rdma_disconnect(info->id);
++      if (sc->status == SMBDIRECT_SOCKET_CONNECTED) {
++              sc->status = SMBDIRECT_SOCKET_DISCONNECTING;
++              rdma_disconnect(sc->rdma.cm_id);
+       }
+ }
+@@ -180,6 +181,7 @@ static int smbd_conn_upcall(
+               struct rdma_cm_id *id, struct rdma_cm_event *event)
+ {
+       struct smbd_connection *info = id->context;
++      struct smbdirect_socket *sc = &info->socket;
+       log_rdma_event(INFO, "event=%d status=%d\n",
+               event->event, event->status);
+@@ -203,7 +205,7 @@ static int smbd_conn_upcall(
+       case RDMA_CM_EVENT_ESTABLISHED:
+               log_rdma_event(INFO, "connected event=%d\n", event->event);
+-              info->transport_status = SMBD_CONNECTED;
++              sc->status = SMBDIRECT_SOCKET_CONNECTED;
+               wake_up_interruptible(&info->conn_wait);
+               break;
+@@ -211,20 +213,20 @@ static int smbd_conn_upcall(
+       case RDMA_CM_EVENT_UNREACHABLE:
+       case RDMA_CM_EVENT_REJECTED:
+               log_rdma_event(INFO, "connecting failed event=%d\n", event->event);
+-              info->transport_status = SMBD_DISCONNECTED;
++              sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+               wake_up_interruptible(&info->conn_wait);
+               break;
+       case RDMA_CM_EVENT_DEVICE_REMOVAL:
+       case RDMA_CM_EVENT_DISCONNECTED:
+               /* This happens when we fail the negotiation */
+-              if (info->transport_status == SMBD_NEGOTIATE_FAILED) {
+-                      info->transport_status = SMBD_DISCONNECTED;
++              if (sc->status == SMBDIRECT_SOCKET_NEGOTIATE_FAILED) {
++                      sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+                       wake_up(&info->conn_wait);
+                       break;
+               }
+-              info->transport_status = SMBD_DISCONNECTED;
++              sc->status = SMBDIRECT_SOCKET_DISCONNECTED;
+               wake_up_interruptible(&info->disconn_wait);
+               wake_up_interruptible(&info->wait_reassembly_queue);
+               wake_up_interruptible_all(&info->wait_send_queue);
+@@ -273,6 +275,8 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+       int i;
+       struct smbd_request *request =
+               container_of(wc->wr_cqe, struct smbd_request, cqe);
++      struct smbd_connection *info = request->info;
++      struct smbdirect_socket *sc = &info->socket;
+       log_rdma_send(INFO, "smbd_request 0x%p completed wc->status=%d\n",
+               request, wc->status);
+@@ -284,7 +288,7 @@ static void send_done(struct ib_cq *cq, struct ib_wc *wc)
+       }
+       for (i = 0; i < request->num_sge; i++)
+-              ib_dma_unmap_single(request->info->id->device,
++              ib_dma_unmap_single(sc->ib.dev,
+                       request->sge[i].addr,
+                       request->sge[i].length,
+                       DMA_TO_DEVICE);
+@@ -391,8 +395,9 @@ static void smbd_post_send_credits(struct work_struct *work)
+       struct smbd_connection *info =
+               container_of(work, struct smbd_connection,
+                       post_send_credits_work);
++      struct smbdirect_socket *sc = &info->socket;
+-      if (info->transport_status != SMBD_CONNECTED) {
++      if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+               wake_up(&info->wait_receive_queues);
+               return;
+       }
+@@ -633,32 +638,34 @@ static int smbd_ia_open(
+               struct smbd_connection *info,
+               struct sockaddr *dstaddr, int port)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       int rc;
+-      info->id = smbd_create_id(info, dstaddr, port);
+-      if (IS_ERR(info->id)) {
+-              rc = PTR_ERR(info->id);
++      sc->rdma.cm_id = smbd_create_id(info, dstaddr, port);
++      if (IS_ERR(sc->rdma.cm_id)) {
++              rc = PTR_ERR(sc->rdma.cm_id);
+               goto out1;
+       }
++      sc->ib.dev = sc->rdma.cm_id->device;
+-      if (!frwr_is_supported(&info->id->device->attrs)) {
++      if (!frwr_is_supported(&sc->ib.dev->attrs)) {
+               log_rdma_event(ERR, "Fast Registration Work Requests (FRWR) is not supported\n");
+               log_rdma_event(ERR, "Device capability flags = %llx max_fast_reg_page_list_len = %u\n",
+-                             info->id->device->attrs.device_cap_flags,
+-                             info->id->device->attrs.max_fast_reg_page_list_len);
++                             sc->ib.dev->attrs.device_cap_flags,
++                             sc->ib.dev->attrs.max_fast_reg_page_list_len);
+               rc = -EPROTONOSUPPORT;
+               goto out2;
+       }
+       info->max_frmr_depth = min_t(int,
+               smbd_max_frmr_depth,
+-              info->id->device->attrs.max_fast_reg_page_list_len);
++              sc->ib.dev->attrs.max_fast_reg_page_list_len);
+       info->mr_type = IB_MR_TYPE_MEM_REG;
+-      if (info->id->device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
++      if (sc->ib.dev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)
+               info->mr_type = IB_MR_TYPE_SG_GAPS;
+-      info->pd = ib_alloc_pd(info->id->device, 0);
+-      if (IS_ERR(info->pd)) {
+-              rc = PTR_ERR(info->pd);
++      sc->ib.pd = ib_alloc_pd(sc->ib.dev, 0);
++      if (IS_ERR(sc->ib.pd)) {
++              rc = PTR_ERR(sc->ib.pd);
+               log_rdma_event(ERR, "ib_alloc_pd() returned %d\n", rc);
+               goto out2;
+       }
+@@ -666,8 +673,8 @@ static int smbd_ia_open(
+       return 0;
+ out2:
+-      rdma_destroy_id(info->id);
+-      info->id = NULL;
++      rdma_destroy_id(sc->rdma.cm_id);
++      sc->rdma.cm_id = NULL;
+ out1:
+       return rc;
+@@ -681,6 +688,7 @@ static int smbd_ia_open(
+  */
+ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       struct ib_send_wr send_wr;
+       int rc = -ENOMEM;
+       struct smbd_request *request;
+@@ -704,18 +712,18 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+       request->num_sge = 1;
+       request->sge[0].addr = ib_dma_map_single(
+-                              info->id->device, (void *)packet,
++                              sc->ib.dev, (void *)packet,
+                               sizeof(*packet), DMA_TO_DEVICE);
+-      if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++      if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
+               rc = -EIO;
+               goto dma_mapping_failed;
+       }
+       request->sge[0].length = sizeof(*packet);
+-      request->sge[0].lkey = info->pd->local_dma_lkey;
++      request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
+       ib_dma_sync_single_for_device(
+-              info->id->device, request->sge[0].addr,
++              sc->ib.dev, request->sge[0].addr,
+               request->sge[0].length, DMA_TO_DEVICE);
+       request->cqe.done = send_done;
+@@ -732,14 +740,14 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+               request->sge[0].length, request->sge[0].lkey);
+       atomic_inc(&info->send_pending);
+-      rc = ib_post_send(info->id->qp, &send_wr, NULL);
++      rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
+       if (!rc)
+               return 0;
+       /* if we reach here, post send failed */
+       log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+       atomic_dec(&info->send_pending);
+-      ib_dma_unmap_single(info->id->device, request->sge[0].addr,
++      ib_dma_unmap_single(sc->ib.dev, request->sge[0].addr,
+               request->sge[0].length, DMA_TO_DEVICE);
+       smbd_disconnect_rdma_connection(info);
+@@ -791,6 +799,7 @@ static int manage_keep_alive_before_sending(struct smbd_connection *info)
+ static int smbd_post_send(struct smbd_connection *info,
+               struct smbd_request *request)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       struct ib_send_wr send_wr;
+       int rc, i;
+@@ -799,7 +808,7 @@ static int smbd_post_send(struct smbd_connection *info,
+                       "rdma_request sge[%d] addr=0x%llx length=%u\n",
+                       i, request->sge[i].addr, request->sge[i].length);
+               ib_dma_sync_single_for_device(
+-                      info->id->device,
++                      sc->ib.dev,
+                       request->sge[i].addr,
+                       request->sge[i].length,
+                       DMA_TO_DEVICE);
+@@ -814,7 +823,7 @@ static int smbd_post_send(struct smbd_connection *info,
+       send_wr.opcode = IB_WR_SEND;
+       send_wr.send_flags = IB_SEND_SIGNALED;
+-      rc = ib_post_send(info->id->qp, &send_wr, NULL);
++      rc = ib_post_send(sc->ib.qp, &send_wr, NULL);
+       if (rc) {
+               log_rdma_send(ERR, "ib_post_send failed rc=%d\n", rc);
+               smbd_disconnect_rdma_connection(info);
+@@ -831,6 +840,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+                              struct iov_iter *iter,
+                              int *_remaining_data_length)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       int i, rc;
+       int header_length;
+       int data_length;
+@@ -842,11 +852,11 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+       /* Wait for send credits. A SMBD packet needs one credit */
+       rc = wait_event_interruptible(info->wait_send_queue,
+               atomic_read(&info->send_credits) > 0 ||
+-              info->transport_status != SMBD_CONNECTED);
++              sc->status != SMBDIRECT_SOCKET_CONNECTED);
+       if (rc)
+               goto err_wait_credit;
+-      if (info->transport_status != SMBD_CONNECTED) {
++      if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+               log_outgoing(ERR, "disconnected not sending on wait_credit\n");
+               rc = -EAGAIN;
+               goto err_wait_credit;
+@@ -859,9 +869,9 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ wait_send_queue:
+       wait_event(info->wait_post_send,
+               atomic_read(&info->send_pending) < info->send_credit_target ||
+-              info->transport_status != SMBD_CONNECTED);
++              sc->status != SMBDIRECT_SOCKET_CONNECTED);
+-      if (info->transport_status != SMBD_CONNECTED) {
++      if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+               log_outgoing(ERR, "disconnected not sending on wait_send_queue\n");
+               rc = -EAGAIN;
+               goto err_wait_send_queue;
+@@ -888,8 +898,8 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+                       .nr_sge         = 1,
+                       .max_sge        = SMBDIRECT_MAX_SEND_SGE,
+                       .sge            = request->sge,
+-                      .device         = info->id->device,
+-                      .local_dma_lkey = info->pd->local_dma_lkey,
++                      .device         = sc->ib.dev,
++                      .local_dma_lkey = sc->ib.pd->local_dma_lkey,
+                       .direction      = DMA_TO_DEVICE,
+               };
+@@ -941,18 +951,18 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+       if (!data_length)
+               header_length = offsetof(struct smbdirect_data_transfer, padding);
+-      request->sge[0].addr = ib_dma_map_single(info->id->device,
++      request->sge[0].addr = ib_dma_map_single(sc->ib.dev,
+                                                (void *)packet,
+                                                header_length,
+                                                DMA_TO_DEVICE);
+-      if (ib_dma_mapping_error(info->id->device, request->sge[0].addr)) {
++      if (ib_dma_mapping_error(sc->ib.dev, request->sge[0].addr)) {
+               rc = -EIO;
+               request->sge[0].addr = 0;
+               goto err_dma;
+       }
+       request->sge[0].length = header_length;
+-      request->sge[0].lkey = info->pd->local_dma_lkey;
++      request->sge[0].lkey = sc->ib.pd->local_dma_lkey;
+       rc = smbd_post_send(info, request);
+       if (!rc)
+@@ -961,7 +971,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ err_dma:
+       for (i = 0; i < request->num_sge; i++)
+               if (request->sge[i].addr)
+-                      ib_dma_unmap_single(info->id->device,
++                      ib_dma_unmap_single(sc->ib.dev,
+                                           request->sge[i].addr,
+                                           request->sge[i].length,
+                                           DMA_TO_DEVICE);
+@@ -1006,17 +1016,18 @@ static int smbd_post_send_empty(struct smbd_connection *info)
+ static int smbd_post_recv(
+               struct smbd_connection *info, struct smbd_response *response)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       struct ib_recv_wr recv_wr;
+       int rc = -EIO;
+       response->sge.addr = ib_dma_map_single(
+-                              info->id->device, response->packet,
++                              sc->ib.dev, response->packet,
+                               info->max_receive_size, DMA_FROM_DEVICE);
+-      if (ib_dma_mapping_error(info->id->device, response->sge.addr))
++      if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr))
+               return rc;
+       response->sge.length = info->max_receive_size;
+-      response->sge.lkey = info->pd->local_dma_lkey;
++      response->sge.lkey = sc->ib.pd->local_dma_lkey;
+       response->cqe.done = recv_done;
+@@ -1025,9 +1036,9 @@ static int smbd_post_recv(
+       recv_wr.sg_list = &response->sge;
+       recv_wr.num_sge = 1;
+-      rc = ib_post_recv(info->id->qp, &recv_wr, NULL);
++      rc = ib_post_recv(sc->ib.qp, &recv_wr, NULL);
+       if (rc) {
+-              ib_dma_unmap_single(info->id->device, response->sge.addr,
++              ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+                                   response->sge.length, DMA_FROM_DEVICE);
+               smbd_disconnect_rdma_connection(info);
+               log_rdma_recv(ERR, "ib_post_recv failed rc=%d\n", rc);
+@@ -1185,9 +1196,10 @@ static struct smbd_response *get_receive_buffer(struct smbd_connection *info)
+ static void put_receive_buffer(
+       struct smbd_connection *info, struct smbd_response *response)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       unsigned long flags;
+-      ib_dma_unmap_single(info->id->device, response->sge.addr,
++      ib_dma_unmap_single(sc->ib.dev, response->sge.addr,
+               response->sge.length, DMA_FROM_DEVICE);
+       spin_lock_irqsave(&info->receive_queue_lock, flags);
+@@ -1287,6 +1299,7 @@ static void idle_connection_timer(struct work_struct *work)
+ void smbd_destroy(struct TCP_Server_Info *server)
+ {
+       struct smbd_connection *info = server->smbd_conn;
++      struct smbdirect_socket *sc;
+       struct smbd_response *response;
+       unsigned long flags;
+@@ -1294,19 +1307,21 @@ void smbd_destroy(struct TCP_Server_Info *server)
+               log_rdma_event(INFO, "rdma session already destroyed\n");
+               return;
+       }
++      sc = &info->socket;
+       log_rdma_event(INFO, "destroying rdma session\n");
+-      if (info->transport_status != SMBD_DISCONNECTED) {
+-              rdma_disconnect(server->smbd_conn->id);
++      if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
++              rdma_disconnect(sc->rdma.cm_id);
+               log_rdma_event(INFO, "wait for transport being disconnected\n");
+               wait_event_interruptible(
+                       info->disconn_wait,
+-                      info->transport_status == SMBD_DISCONNECTED);
++                      sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+       }
+       log_rdma_event(INFO, "destroying qp\n");
+-      ib_drain_qp(info->id->qp);
+-      rdma_destroy_qp(info->id);
++      ib_drain_qp(sc->ib.qp);
++      rdma_destroy_qp(sc->rdma.cm_id);
++      sc->ib.qp = NULL;
+       log_rdma_event(INFO, "cancelling idle timer\n");
+       cancel_delayed_work_sync(&info->idle_timer_work);
+@@ -1353,10 +1368,10 @@ void smbd_destroy(struct TCP_Server_Info *server)
+       }
+       destroy_mr_list(info);
+-      ib_free_cq(info->send_cq);
+-      ib_free_cq(info->recv_cq);
+-      ib_dealloc_pd(info->pd);
+-      rdma_destroy_id(info->id);
++      ib_free_cq(sc->ib.send_cq);
++      ib_free_cq(sc->ib.recv_cq);
++      ib_dealloc_pd(sc->ib.pd);
++      rdma_destroy_id(sc->rdma.cm_id);
+       /* free mempools */
+       mempool_destroy(info->request_mempool);
+@@ -1365,7 +1380,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+       mempool_destroy(info->response_mempool);
+       kmem_cache_destroy(info->response_cache);
+-      info->transport_status = SMBD_DESTROYED;
++      sc->status = SMBDIRECT_SOCKET_DESTROYED;
+       destroy_workqueue(info->workqueue);
+       log_rdma_event(INFO,  "rdma session destroyed\n");
+@@ -1390,7 +1405,7 @@ int smbd_reconnect(struct TCP_Server_Info *server)
+        * This is possible if transport is disconnected and we haven't received
+        * notification from RDMA, but upper layer has detected timeout
+        */
+-      if (server->smbd_conn->transport_status == SMBD_CONNECTED) {
++      if (server->smbd_conn->socket.status == SMBDIRECT_SOCKET_CONNECTED) {
+               log_rdma_event(INFO, "disconnecting transport\n");
+               smbd_destroy(server);
+       }
+@@ -1489,6 +1504,7 @@ static struct smbd_connection *_smbd_get_connection(
+ {
+       int rc;
+       struct smbd_connection *info;
++      struct smbdirect_socket *sc;
+       struct rdma_conn_param conn_param;
+       struct ib_qp_init_attr qp_attr;
+       struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
+@@ -1498,29 +1514,30 @@ static struct smbd_connection *_smbd_get_connection(
+       info = kzalloc(sizeof(struct smbd_connection), GFP_KERNEL);
+       if (!info)
+               return NULL;
++      sc = &info->socket;
+-      info->transport_status = SMBD_CONNECTING;
++      sc->status = SMBDIRECT_SOCKET_CONNECTING;
+       rc = smbd_ia_open(info, dstaddr, port);
+       if (rc) {
+               log_rdma_event(INFO, "smbd_ia_open rc=%d\n", rc);
+               goto create_id_failed;
+       }
+-      if (smbd_send_credit_target > info->id->device->attrs.max_cqe ||
+-          smbd_send_credit_target > info->id->device->attrs.max_qp_wr) {
++      if (smbd_send_credit_target > sc->ib.dev->attrs.max_cqe ||
++          smbd_send_credit_target > sc->ib.dev->attrs.max_qp_wr) {
+               log_rdma_event(ERR, "consider lowering send_credit_target = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+                              smbd_send_credit_target,
+-                             info->id->device->attrs.max_cqe,
+-                             info->id->device->attrs.max_qp_wr);
++                             sc->ib.dev->attrs.max_cqe,
++                             sc->ib.dev->attrs.max_qp_wr);
+               goto config_failed;
+       }
+-      if (smbd_receive_credit_max > info->id->device->attrs.max_cqe ||
+-          smbd_receive_credit_max > info->id->device->attrs.max_qp_wr) {
++      if (smbd_receive_credit_max > sc->ib.dev->attrs.max_cqe ||
++          smbd_receive_credit_max > sc->ib.dev->attrs.max_qp_wr) {
+               log_rdma_event(ERR, "consider lowering receive_credit_max = %d. Possible CQE overrun, device reporting max_cqe %d max_qp_wr %d\n",
+                              smbd_receive_credit_max,
+-                             info->id->device->attrs.max_cqe,
+-                             info->id->device->attrs.max_qp_wr);
++                             sc->ib.dev->attrs.max_cqe,
++                             sc->ib.dev->attrs.max_qp_wr);
+               goto config_failed;
+       }
+@@ -1531,32 +1548,30 @@ static struct smbd_connection *_smbd_get_connection(
+       info->max_receive_size = smbd_max_receive_size;
+       info->keep_alive_interval = smbd_keep_alive_interval;
+-      if (info->id->device->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
+-          info->id->device->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
++      if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
++          sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
+               log_rdma_event(ERR,
+                       "device %.*s max_send_sge/max_recv_sge = %d/%d too small\n",
+                       IB_DEVICE_NAME_MAX,
+-                      info->id->device->name,
+-                      info->id->device->attrs.max_send_sge,
+-                      info->id->device->attrs.max_recv_sge);
++                      sc->ib.dev->name,
++                      sc->ib.dev->attrs.max_send_sge,
++                      sc->ib.dev->attrs.max_recv_sge);
+               goto config_failed;
+       }
+-      info->send_cq = NULL;
+-      info->recv_cq = NULL;
+-      info->send_cq =
+-              ib_alloc_cq_any(info->id->device, info,
++      sc->ib.send_cq =
++              ib_alloc_cq_any(sc->ib.dev, info,
+                               info->send_credit_target, IB_POLL_SOFTIRQ);
+-      if (IS_ERR(info->send_cq)) {
+-              info->send_cq = NULL;
++      if (IS_ERR(sc->ib.send_cq)) {
++              sc->ib.send_cq = NULL;
+               goto alloc_cq_failed;
+       }
+-      info->recv_cq =
+-              ib_alloc_cq_any(info->id->device, info,
++      sc->ib.recv_cq =
++              ib_alloc_cq_any(sc->ib.dev, info,
+                               info->receive_credit_max, IB_POLL_SOFTIRQ);
+-      if (IS_ERR(info->recv_cq)) {
+-              info->recv_cq = NULL;
++      if (IS_ERR(sc->ib.recv_cq)) {
++              sc->ib.recv_cq = NULL;
+               goto alloc_cq_failed;
+       }
+@@ -1570,29 +1585,30 @@ static struct smbd_connection *_smbd_get_connection(
+       qp_attr.cap.max_inline_data = 0;
+       qp_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
+       qp_attr.qp_type = IB_QPT_RC;
+-      qp_attr.send_cq = info->send_cq;
+-      qp_attr.recv_cq = info->recv_cq;
++      qp_attr.send_cq = sc->ib.send_cq;
++      qp_attr.recv_cq = sc->ib.recv_cq;
+       qp_attr.port_num = ~0;
+-      rc = rdma_create_qp(info->id, info->pd, &qp_attr);
++      rc = rdma_create_qp(sc->rdma.cm_id, sc->ib.pd, &qp_attr);
+       if (rc) {
+               log_rdma_event(ERR, "rdma_create_qp failed %i\n", rc);
+               goto create_qp_failed;
+       }
++      sc->ib.qp = sc->rdma.cm_id->qp;
+       memset(&conn_param, 0, sizeof(conn_param));
+       conn_param.initiator_depth = 0;
+       conn_param.responder_resources =
+-              min(info->id->device->attrs.max_qp_rd_atom,
++              min(sc->ib.dev->attrs.max_qp_rd_atom,
+                   SMBD_CM_RESPONDER_RESOURCES);
+       info->responder_resources = conn_param.responder_resources;
+       log_rdma_mr(INFO, "responder_resources=%d\n",
+               info->responder_resources);
+       /* Need to send IRD/ORD in private data for iWARP */
+-      info->id->device->ops.get_port_immutable(
+-              info->id->device, info->id->port_num, &port_immutable);
++      sc->ib.dev->ops.get_port_immutable(
++              sc->ib.dev, sc->rdma.cm_id->port_num, &port_immutable);
+       if (port_immutable.core_cap_flags & RDMA_CORE_PORT_IWARP) {
+               ird_ord_hdr[0] = info->responder_resources;
+               ird_ord_hdr[1] = 1;
+@@ -1613,16 +1629,16 @@ static struct smbd_connection *_smbd_get_connection(
+       init_waitqueue_head(&info->conn_wait);
+       init_waitqueue_head(&info->disconn_wait);
+       init_waitqueue_head(&info->wait_reassembly_queue);
+-      rc = rdma_connect(info->id, &conn_param);
++      rc = rdma_connect(sc->rdma.cm_id, &conn_param);
+       if (rc) {
+               log_rdma_event(ERR, "rdma_connect() failed with %i\n", rc);
+               goto rdma_connect_failed;
+       }
+       wait_event_interruptible(
+-              info->conn_wait, info->transport_status != SMBD_CONNECTING);
++              info->conn_wait, sc->status != SMBDIRECT_SOCKET_CONNECTING);
+-      if (info->transport_status != SMBD_CONNECTED) {
++      if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+               log_rdma_event(ERR, "rdma_connect failed port=%d\n", port);
+               goto rdma_connect_failed;
+       }
+@@ -1673,26 +1689,26 @@ static struct smbd_connection *_smbd_get_connection(
+ negotiation_failed:
+       cancel_delayed_work_sync(&info->idle_timer_work);
+       destroy_caches_and_workqueue(info);
+-      info->transport_status = SMBD_NEGOTIATE_FAILED;
++      sc->status = SMBDIRECT_SOCKET_NEGOTIATE_FAILED;
+       init_waitqueue_head(&info->conn_wait);
+-      rdma_disconnect(info->id);
++      rdma_disconnect(sc->rdma.cm_id);
+       wait_event(info->conn_wait,
+-              info->transport_status == SMBD_DISCONNECTED);
++              sc->status == SMBDIRECT_SOCKET_DISCONNECTED);
+ allocate_cache_failed:
+ rdma_connect_failed:
+-      rdma_destroy_qp(info->id);
++      rdma_destroy_qp(sc->rdma.cm_id);
+ create_qp_failed:
+ alloc_cq_failed:
+-      if (info->send_cq)
+-              ib_free_cq(info->send_cq);
+-      if (info->recv_cq)
+-              ib_free_cq(info->recv_cq);
++      if (sc->ib.send_cq)
++              ib_free_cq(sc->ib.send_cq);
++      if (sc->ib.recv_cq)
++              ib_free_cq(sc->ib.recv_cq);
+ config_failed:
+-      ib_dealloc_pd(info->pd);
+-      rdma_destroy_id(info->id);
++      ib_dealloc_pd(sc->ib.pd);
++      rdma_destroy_id(sc->rdma.cm_id);
+ create_id_failed:
+       kfree(info);
+@@ -1732,6 +1748,7 @@ struct smbd_connection *smbd_get_connection(
+ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+               unsigned int size)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       struct smbd_response *response;
+       struct smbdirect_data_transfer *data_transfer;
+       int to_copy, to_read, data_read, offset;
+@@ -1846,12 +1863,12 @@ static int smbd_recv_buf(struct smbd_connection *info, char *buf,
+       rc = wait_event_interruptible(
+               info->wait_reassembly_queue,
+               info->reassembly_data_length >= size ||
+-                      info->transport_status != SMBD_CONNECTED);
++                      sc->status != SMBDIRECT_SOCKET_CONNECTED);
+       /* Don't return any data if interrupted */
+       if (rc)
+               return rc;
+-      if (info->transport_status != SMBD_CONNECTED) {
++      if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+               log_read(ERR, "disconnected\n");
+               return -ECONNABORTED;
+       }
+@@ -1869,6 +1886,7 @@ static int smbd_recv_page(struct smbd_connection *info,
+               struct page *page, unsigned int page_offset,
+               unsigned int to_read)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       int ret;
+       char *to_address;
+       void *page_address;
+@@ -1877,7 +1895,7 @@ static int smbd_recv_page(struct smbd_connection *info,
+       ret = wait_event_interruptible(
+               info->wait_reassembly_queue,
+               info->reassembly_data_length >= to_read ||
+-                      info->transport_status != SMBD_CONNECTED);
++                      sc->status != SMBDIRECT_SOCKET_CONNECTED);
+       if (ret)
+               return ret;
+@@ -1952,12 +1970,13 @@ int smbd_send(struct TCP_Server_Info *server,
+       int num_rqst, struct smb_rqst *rqst_array)
+ {
+       struct smbd_connection *info = server->smbd_conn;
++      struct smbdirect_socket *sc = &info->socket;
+       struct smb_rqst *rqst;
+       struct iov_iter iter;
+       unsigned int remaining_data_length, klen;
+       int rc, i, rqst_idx;
+-      if (info->transport_status != SMBD_CONNECTED)
++      if (sc->status != SMBDIRECT_SOCKET_CONNECTED)
+               return -EAGAIN;
+       /*
+@@ -2051,6 +2070,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+ {
+       struct smbd_connection *info =
+               container_of(work, struct smbd_connection, mr_recovery_work);
++      struct smbdirect_socket *sc = &info->socket;
+       struct smbd_mr *smbdirect_mr;
+       int rc;
+@@ -2068,7 +2088,7 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+                       }
+                       smbdirect_mr->mr = ib_alloc_mr(
+-                              info->pd, info->mr_type,
++                              sc->ib.pd, info->mr_type,
+                               info->max_frmr_depth);
+                       if (IS_ERR(smbdirect_mr->mr)) {
+                               log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+@@ -2097,12 +2117,13 @@ static void smbd_mr_recovery_work(struct work_struct *work)
+ static void destroy_mr_list(struct smbd_connection *info)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       struct smbd_mr *mr, *tmp;
+       cancel_work_sync(&info->mr_recovery_work);
+       list_for_each_entry_safe(mr, tmp, &info->mr_list, list) {
+               if (mr->state == MR_INVALIDATED)
+-                      ib_dma_unmap_sg(info->id->device, mr->sgt.sgl,
++                      ib_dma_unmap_sg(sc->ib.dev, mr->sgt.sgl,
+                               mr->sgt.nents, mr->dir);
+               ib_dereg_mr(mr->mr);
+               kfree(mr->sgt.sgl);
+@@ -2119,6 +2140,7 @@ static void destroy_mr_list(struct smbd_connection *info)
+  */
+ static int allocate_mr_list(struct smbd_connection *info)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       int i;
+       struct smbd_mr *smbdirect_mr, *tmp;
+@@ -2134,7 +2156,7 @@ static int allocate_mr_list(struct smbd_connection *info)
+               smbdirect_mr = kzalloc(sizeof(*smbdirect_mr), GFP_KERNEL);
+               if (!smbdirect_mr)
+                       goto cleanup_entries;
+-              smbdirect_mr->mr = ib_alloc_mr(info->pd, info->mr_type,
++              smbdirect_mr->mr = ib_alloc_mr(sc->ib.pd, info->mr_type,
+                                       info->max_frmr_depth);
+               if (IS_ERR(smbdirect_mr->mr)) {
+                       log_rdma_mr(ERR, "ib_alloc_mr failed mr_type=%x max_frmr_depth=%x\n",
+@@ -2179,20 +2201,20 @@ static int allocate_mr_list(struct smbd_connection *info)
+  */
+ static struct smbd_mr *get_mr(struct smbd_connection *info)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       struct smbd_mr *ret;
+       int rc;
+ again:
+       rc = wait_event_interruptible(info->wait_mr,
+               atomic_read(&info->mr_ready_count) ||
+-              info->transport_status != SMBD_CONNECTED);
++              sc->status != SMBDIRECT_SOCKET_CONNECTED);
+       if (rc) {
+               log_rdma_mr(ERR, "wait_event_interruptible rc=%x\n", rc);
+               return NULL;
+       }
+-      if (info->transport_status != SMBD_CONNECTED) {
+-              log_rdma_mr(ERR, "info->transport_status=%x\n",
+-                      info->transport_status);
++      if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
++              log_rdma_mr(ERR, "sc->status=%x\n", sc->status);
+               return NULL;
+       }
+@@ -2245,6 +2267,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+                                struct iov_iter *iter,
+                                bool writing, bool need_invalidate)
+ {
++      struct smbdirect_socket *sc = &info->socket;
+       struct smbd_mr *smbdirect_mr;
+       int rc, num_pages;
+       enum dma_data_direction dir;
+@@ -2274,7 +2297,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+                   num_pages, iov_iter_count(iter), info->max_frmr_depth);
+       smbd_iter_to_mr(info, iter, &smbdirect_mr->sgt, info->max_frmr_depth);
+-      rc = ib_dma_map_sg(info->id->device, smbdirect_mr->sgt.sgl,
++      rc = ib_dma_map_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
+                          smbdirect_mr->sgt.nents, dir);
+       if (!rc) {
+               log_rdma_mr(ERR, "ib_dma_map_sg num_pages=%x dir=%x rc=%x\n",
+@@ -2310,7 +2333,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+        * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
+        * on the next ib_post_send when we actually send I/O to remote peer
+        */
+-      rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
++      rc = ib_post_send(sc->ib.qp, &reg_wr->wr, NULL);
+       if (!rc)
+               return smbdirect_mr;
+@@ -2319,7 +2342,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
+       /* If all failed, attempt to recover this MR by setting it MR_ERROR*/
+ map_mr_error:
+-      ib_dma_unmap_sg(info->id->device, smbdirect_mr->sgt.sgl,
++      ib_dma_unmap_sg(sc->ib.dev, smbdirect_mr->sgt.sgl,
+                       smbdirect_mr->sgt.nents, smbdirect_mr->dir);
+ dma_map_error:
+@@ -2357,6 +2380,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+ {
+       struct ib_send_wr *wr;
+       struct smbd_connection *info = smbdirect_mr->conn;
++      struct smbdirect_socket *sc = &info->socket;
+       int rc = 0;
+       if (smbdirect_mr->need_invalidate) {
+@@ -2370,7 +2394,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+               wr->send_flags = IB_SEND_SIGNALED;
+               init_completion(&smbdirect_mr->invalidate_done);
+-              rc = ib_post_send(info->id->qp, wr, NULL);
++              rc = ib_post_send(sc->ib.qp, wr, NULL);
+               if (rc) {
+                       log_rdma_mr(ERR, "ib_post_send failed rc=%x\n", rc);
+                       smbd_disconnect_rdma_connection(info);
+@@ -2387,7 +2411,7 @@ int smbd_deregister_mr(struct smbd_mr *smbdirect_mr)
+       if (smbdirect_mr->state == MR_INVALIDATED) {
+               ib_dma_unmap_sg(
+-                      info->id->device, smbdirect_mr->sgt.sgl,
++                      sc->ib.dev, smbdirect_mr->sgt.sgl,
+                       smbdirect_mr->sgt.nents,
+                       smbdirect_mr->dir);
+               smbdirect_mr->state = MR_READY;
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index 4da0974ce7305..ffc38a48b6140 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -15,6 +15,8 @@
+ #include <rdma/rdma_cm.h>
+ #include <linux/mempool.h>
++#include "../common/smbdirect/smbdirect_socket.h"
++
+ extern int rdma_readwrite_threshold;
+ extern int smbd_max_frmr_depth;
+ extern int smbd_keep_alive_interval;
+@@ -50,14 +52,8 @@ enum smbd_connection_status {
+  * 5. mempools for allocating packets
+  */
+ struct smbd_connection {
+-      enum smbd_connection_status transport_status;
+-
+-      /* RDMA related */
+-      struct rdma_cm_id *id;
+-      struct ib_qp_init_attr qp_attr;
+-      struct ib_pd *pd;
+-      struct ib_cq *send_cq, *recv_cq;
+-      struct ib_device_attr dev_attr;
++      struct smbdirect_socket socket;
++
+       int ri_rc;
+       struct completion ri_done;
+       wait_queue_head_t conn_wait;
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-client-make-use-of-common-smbdirect_socket_param.patch b/queue-6.12/smb-client-make-use-of-common-smbdirect_socket_param.patch
new file mode 100644 (file)
index 0000000..5db10b2
--- /dev/null
@@ -0,0 +1,494 @@
+From a1fa1698297356797d7a0379b7e056744fd133ac Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:40 +0200
+Subject: smb: client: make use of common smbdirect_socket_parameters
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit cc55f65dd352bdb7bdf8db1c36fb348c294c3b66 ]
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43e7e284fc77 ("cifs: Fix the smbd_response slab to allow usercopy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/cifs_debug.c | 21 +++++----
+ fs/smb/client/smb2ops.c    | 14 ++++--
+ fs/smb/client/smbdirect.c  | 91 ++++++++++++++++++++++----------------
+ fs/smb/client/smbdirect.h  | 10 +----
+ 4 files changed, 77 insertions(+), 59 deletions(-)
+
+diff --git a/fs/smb/client/cifs_debug.c b/fs/smb/client/cifs_debug.c
+index 56b0b5c82dd19..c0196be0e65fc 100644
+--- a/fs/smb/client/cifs_debug.c
++++ b/fs/smb/client/cifs_debug.c
+@@ -362,6 +362,10 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+       c = 0;
+       spin_lock(&cifs_tcp_ses_lock);
+       list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
++#ifdef CONFIG_CIFS_SMB_DIRECT
++              struct smbdirect_socket_parameters *sp;
++#endif
++
+               /* channel info will be printed as a part of sessions below */
+               if (SERVER_IS_CHAN(server))
+                       continue;
+@@ -383,6 +387,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+                       seq_printf(m, "\nSMBDirect transport not available");
+                       goto skip_rdma;
+               }
++              sp = &server->smbd_conn->socket.parameters;
+               seq_printf(m, "\nSMBDirect (in hex) protocol version: %x "
+                       "transport status: %x",
+@@ -390,18 +395,18 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
+                       server->smbd_conn->socket.status);
+               seq_printf(m, "\nConn receive_credit_max: %x "
+                       "send_credit_target: %x max_send_size: %x",
+-                      server->smbd_conn->receive_credit_max,
+-                      server->smbd_conn->send_credit_target,
+-                      server->smbd_conn->max_send_size);
++                      sp->recv_credit_max,
++                      sp->send_credit_target,
++                      sp->max_send_size);
+               seq_printf(m, "\nConn max_fragmented_recv_size: %x "
+                       "max_fragmented_send_size: %x max_receive_size:%x",
+-                      server->smbd_conn->max_fragmented_recv_size,
+-                      server->smbd_conn->max_fragmented_send_size,
+-                      server->smbd_conn->max_receive_size);
++                      sp->max_fragmented_recv_size,
++                      sp->max_fragmented_send_size,
++                      sp->max_recv_size);
+               seq_printf(m, "\nConn keep_alive_interval: %x "
+                       "max_readwrite_size: %x rdma_readwrite_threshold: %x",
+-                      server->smbd_conn->keep_alive_interval,
+-                      server->smbd_conn->max_readwrite_size,
++                      sp->keepalive_interval_msec * 1000,
++                      sp->max_read_write_size,
+                       server->smbd_conn->rdma_readwrite_threshold);
+               seq_printf(m, "\nDebug count_get_receive_buffer: %x "
+                       "count_put_receive_buffer: %x count_send_empty: %x",
+diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
+index 74bcc51ccd32f..e596bc4837b68 100644
+--- a/fs/smb/client/smb2ops.c
++++ b/fs/smb/client/smb2ops.c
+@@ -504,6 +504,9 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+       wsize = min_t(unsigned int, wsize, server->max_write);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+       if (server->rdma) {
++              struct smbdirect_socket_parameters *sp =
++                      &server->smbd_conn->socket.parameters;
++
+               if (server->sign)
+                       /*
+                        * Account for SMB2 data transfer packet header and
+@@ -511,12 +514,12 @@ smb3_negotiate_wsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+                        */
+                       wsize = min_t(unsigned int,
+                               wsize,
+-                              server->smbd_conn->max_fragmented_send_size -
++                              sp->max_fragmented_send_size -
+                                       SMB2_READWRITE_PDU_HEADER_SIZE -
+                                       sizeof(struct smb2_transform_hdr));
+               else
+                       wsize = min_t(unsigned int,
+-                              wsize, server->smbd_conn->max_readwrite_size);
++                              wsize, sp->max_read_write_size);
+       }
+ #endif
+       if (!(server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
+@@ -552,6 +555,9 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+       rsize = min_t(unsigned int, rsize, server->max_read);
+ #ifdef CONFIG_CIFS_SMB_DIRECT
+       if (server->rdma) {
++              struct smbdirect_socket_parameters *sp =
++                      &server->smbd_conn->socket.parameters;
++
+               if (server->sign)
+                       /*
+                        * Account for SMB2 data transfer packet header and
+@@ -559,12 +565,12 @@ smb3_negotiate_rsize(struct cifs_tcon *tcon, struct smb3_fs_context *ctx)
+                        */
+                       rsize = min_t(unsigned int,
+                               rsize,
+-                              server->smbd_conn->max_fragmented_recv_size -
++                              sp->max_fragmented_recv_size -
+                                       SMB2_READWRITE_PDU_HEADER_SIZE -
+                                       sizeof(struct smb2_transform_hdr));
+               else
+                       rsize = min_t(unsigned int,
+-                              rsize, server->smbd_conn->max_readwrite_size);
++                              rsize, sp->max_read_write_size);
+       }
+ #endif
+diff --git a/fs/smb/client/smbdirect.c b/fs/smb/client/smbdirect.c
+index ac489df8151a1..cbc85bca006f7 100644
+--- a/fs/smb/client/smbdirect.c
++++ b/fs/smb/client/smbdirect.c
+@@ -320,6 +320,8 @@ static bool process_negotiation_response(
+               struct smbd_response *response, int packet_length)
+ {
+       struct smbd_connection *info = response->info;
++      struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       struct smbdirect_negotiate_resp *packet = smbd_response_payload(response);
+       if (packet_length < sizeof(struct smbdirect_negotiate_resp)) {
+@@ -349,20 +351,20 @@ static bool process_negotiation_response(
+       atomic_set(&info->receive_credits, 0);
+-      if (le32_to_cpu(packet->preferred_send_size) > info->max_receive_size) {
++      if (le32_to_cpu(packet->preferred_send_size) > sp->max_recv_size) {
+               log_rdma_event(ERR, "error: preferred_send_size=%d\n",
+                       le32_to_cpu(packet->preferred_send_size));
+               return false;
+       }
+-      info->max_receive_size = le32_to_cpu(packet->preferred_send_size);
++      sp->max_recv_size = le32_to_cpu(packet->preferred_send_size);
+       if (le32_to_cpu(packet->max_receive_size) < SMBD_MIN_RECEIVE_SIZE) {
+               log_rdma_event(ERR, "error: max_receive_size=%d\n",
+                       le32_to_cpu(packet->max_receive_size));
+               return false;
+       }
+-      info->max_send_size = min_t(int, info->max_send_size,
+-                                      le32_to_cpu(packet->max_receive_size));
++      sp->max_send_size = min_t(u32, sp->max_send_size,
++                                le32_to_cpu(packet->max_receive_size));
+       if (le32_to_cpu(packet->max_fragmented_size) <
+                       SMBD_MIN_FRAGMENTED_SIZE) {
+@@ -370,18 +372,18 @@ static bool process_negotiation_response(
+                       le32_to_cpu(packet->max_fragmented_size));
+               return false;
+       }
+-      info->max_fragmented_send_size =
++      sp->max_fragmented_send_size =
+               le32_to_cpu(packet->max_fragmented_size);
+       info->rdma_readwrite_threshold =
+-              rdma_readwrite_threshold > info->max_fragmented_send_size ?
+-              info->max_fragmented_send_size :
++              rdma_readwrite_threshold > sp->max_fragmented_send_size ?
++              sp->max_fragmented_send_size :
+               rdma_readwrite_threshold;
+-      info->max_readwrite_size = min_t(u32,
++      sp->max_read_write_size = min_t(u32,
+                       le32_to_cpu(packet->max_readwrite_size),
+                       info->max_frmr_depth * PAGE_SIZE);
+-      info->max_frmr_depth = info->max_readwrite_size / PAGE_SIZE;
++      info->max_frmr_depth = sp->max_read_write_size / PAGE_SIZE;
+       return true;
+ }
+@@ -689,6 +691,7 @@ static int smbd_ia_open(
+ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+ {
+       struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       struct ib_send_wr send_wr;
+       int rc = -ENOMEM;
+       struct smbd_request *request;
+@@ -704,11 +707,11 @@ static int smbd_post_send_negotiate_req(struct smbd_connection *info)
+       packet->min_version = cpu_to_le16(SMBDIRECT_V1);
+       packet->max_version = cpu_to_le16(SMBDIRECT_V1);
+       packet->reserved = 0;
+-      packet->credits_requested = cpu_to_le16(info->send_credit_target);
+-      packet->preferred_send_size = cpu_to_le32(info->max_send_size);
+-      packet->max_receive_size = cpu_to_le32(info->max_receive_size);
++      packet->credits_requested = cpu_to_le16(sp->send_credit_target);
++      packet->preferred_send_size = cpu_to_le32(sp->max_send_size);
++      packet->max_receive_size = cpu_to_le32(sp->max_recv_size);
+       packet->max_fragmented_size =
+-              cpu_to_le32(info->max_fragmented_recv_size);
++              cpu_to_le32(sp->max_fragmented_recv_size);
+       request->num_sge = 1;
+       request->sge[0].addr = ib_dma_map_single(
+@@ -800,6 +803,7 @@ static int smbd_post_send(struct smbd_connection *info,
+               struct smbd_request *request)
+ {
+       struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       struct ib_send_wr send_wr;
+       int rc, i;
+@@ -831,7 +835,7 @@ static int smbd_post_send(struct smbd_connection *info,
+       } else
+               /* Reset timer for idle connection after packet is sent */
+               mod_delayed_work(info->workqueue, &info->idle_timer_work,
+-                      info->keep_alive_interval*HZ);
++                      msecs_to_jiffies(sp->keepalive_interval_msec));
+       return rc;
+ }
+@@ -841,6 +845,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+                              int *_remaining_data_length)
+ {
+       struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       int i, rc;
+       int header_length;
+       int data_length;
+@@ -868,7 +873,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+ wait_send_queue:
+       wait_event(info->wait_post_send,
+-              atomic_read(&info->send_pending) < info->send_credit_target ||
++              atomic_read(&info->send_pending) < sp->send_credit_target ||
+               sc->status != SMBDIRECT_SOCKET_CONNECTED);
+       if (sc->status != SMBDIRECT_SOCKET_CONNECTED) {
+@@ -878,7 +883,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+       }
+       if (unlikely(atomic_inc_return(&info->send_pending) >
+-                              info->send_credit_target)) {
++                              sp->send_credit_target)) {
+               atomic_dec(&info->send_pending);
+               goto wait_send_queue;
+       }
+@@ -917,7 +922,7 @@ static int smbd_post_send_iter(struct smbd_connection *info,
+       /* Fill in the packet header */
+       packet = smbd_request_payload(request);
+-      packet->credits_requested = cpu_to_le16(info->send_credit_target);
++      packet->credits_requested = cpu_to_le16(sp->send_credit_target);
+       new_credits = manage_credits_prior_sending(info);
+       atomic_add(new_credits, &info->receive_credits);
+@@ -1017,16 +1022,17 @@ static int smbd_post_recv(
+               struct smbd_connection *info, struct smbd_response *response)
+ {
+       struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       struct ib_recv_wr recv_wr;
+       int rc = -EIO;
+       response->sge.addr = ib_dma_map_single(
+                               sc->ib.dev, response->packet,
+-                              info->max_receive_size, DMA_FROM_DEVICE);
++                              sp->max_recv_size, DMA_FROM_DEVICE);
+       if (ib_dma_mapping_error(sc->ib.dev, response->sge.addr))
+               return rc;
+-      response->sge.length = info->max_receive_size;
++      response->sge.length = sp->max_recv_size;
+       response->sge.lkey = sc->ib.pd->local_dma_lkey;
+       response->cqe.done = recv_done;
+@@ -1274,6 +1280,8 @@ static void idle_connection_timer(struct work_struct *work)
+       struct smbd_connection *info = container_of(
+                                       work, struct smbd_connection,
+                                       idle_timer_work.work);
++      struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       if (info->keep_alive_requested != KEEP_ALIVE_NONE) {
+               log_keep_alive(ERR,
+@@ -1288,7 +1296,7 @@ static void idle_connection_timer(struct work_struct *work)
+       /* Setup the next idle timeout work */
+       queue_delayed_work(info->workqueue, &info->idle_timer_work,
+-                      info->keep_alive_interval*HZ);
++                      msecs_to_jiffies(sp->keepalive_interval_msec));
+ }
+ /*
+@@ -1300,6 +1308,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+ {
+       struct smbd_connection *info = server->smbd_conn;
+       struct smbdirect_socket *sc;
++      struct smbdirect_socket_parameters *sp;
+       struct smbd_response *response;
+       unsigned long flags;
+@@ -1308,6 +1317,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+               return;
+       }
+       sc = &info->socket;
++      sp = &sc->parameters;
+       log_rdma_event(INFO, "destroying rdma session\n");
+       if (sc->status != SMBDIRECT_SOCKET_DISCONNECTED) {
+@@ -1349,7 +1359,7 @@ void smbd_destroy(struct TCP_Server_Info *server)
+       log_rdma_event(INFO, "free receive buffers\n");
+       wait_event(info->wait_receive_queues,
+               info->count_receive_queue + info->count_empty_packet_queue
+-                      == info->receive_credit_max);
++                      == sp->recv_credit_max);
+       destroy_receive_buffers(info);
+       /*
+@@ -1437,6 +1447,8 @@ static void destroy_caches_and_workqueue(struct smbd_connection *info)
+ #define MAX_NAME_LEN  80
+ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+ {
++      struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       char name[MAX_NAME_LEN];
+       int rc;
+@@ -1451,7 +1463,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+               return -ENOMEM;
+       info->request_mempool =
+-              mempool_create(info->send_credit_target, mempool_alloc_slab,
++              mempool_create(sp->send_credit_target, mempool_alloc_slab,
+                       mempool_free_slab, info->request_cache);
+       if (!info->request_mempool)
+               goto out1;
+@@ -1461,13 +1473,13 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+               kmem_cache_create(
+                       name,
+                       sizeof(struct smbd_response) +
+-                              info->max_receive_size,
++                              sp->max_recv_size,
+                       0, SLAB_HWCACHE_ALIGN, NULL);
+       if (!info->response_cache)
+               goto out2;
+       info->response_mempool =
+-              mempool_create(info->receive_credit_max, mempool_alloc_slab,
++              mempool_create(sp->recv_credit_max, mempool_alloc_slab,
+                      mempool_free_slab, info->response_cache);
+       if (!info->response_mempool)
+               goto out3;
+@@ -1477,7 +1489,7 @@ static int allocate_caches_and_workqueue(struct smbd_connection *info)
+       if (!info->workqueue)
+               goto out4;
+-      rc = allocate_receive_buffers(info, info->receive_credit_max);
++      rc = allocate_receive_buffers(info, sp->recv_credit_max);
+       if (rc) {
+               log_rdma_event(ERR, "failed to allocate receive buffers\n");
+               goto out5;
+@@ -1505,6 +1517,7 @@ static struct smbd_connection *_smbd_get_connection(
+       int rc;
+       struct smbd_connection *info;
+       struct smbdirect_socket *sc;
++      struct smbdirect_socket_parameters *sp;
+       struct rdma_conn_param conn_param;
+       struct ib_qp_init_attr qp_attr;
+       struct sockaddr_in *addr_in = (struct sockaddr_in *) dstaddr;
+@@ -1515,6 +1528,7 @@ static struct smbd_connection *_smbd_get_connection(
+       if (!info)
+               return NULL;
+       sc = &info->socket;
++      sp = &sc->parameters;
+       sc->status = SMBDIRECT_SOCKET_CONNECTING;
+       rc = smbd_ia_open(info, dstaddr, port);
+@@ -1541,12 +1555,12 @@ static struct smbd_connection *_smbd_get_connection(
+               goto config_failed;
+       }
+-      info->receive_credit_max = smbd_receive_credit_max;
+-      info->send_credit_target = smbd_send_credit_target;
+-      info->max_send_size = smbd_max_send_size;
+-      info->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
+-      info->max_receive_size = smbd_max_receive_size;
+-      info->keep_alive_interval = smbd_keep_alive_interval;
++      sp->recv_credit_max = smbd_receive_credit_max;
++      sp->send_credit_target = smbd_send_credit_target;
++      sp->max_send_size = smbd_max_send_size;
++      sp->max_fragmented_recv_size = smbd_max_fragmented_recv_size;
++      sp->max_recv_size = smbd_max_receive_size;
++      sp->keepalive_interval_msec = smbd_keep_alive_interval * 1000;
+       if (sc->ib.dev->attrs.max_send_sge < SMBDIRECT_MAX_SEND_SGE ||
+           sc->ib.dev->attrs.max_recv_sge < SMBDIRECT_MAX_RECV_SGE) {
+@@ -1561,7 +1575,7 @@ static struct smbd_connection *_smbd_get_connection(
+       sc->ib.send_cq =
+               ib_alloc_cq_any(sc->ib.dev, info,
+-                              info->send_credit_target, IB_POLL_SOFTIRQ);
++                              sp->send_credit_target, IB_POLL_SOFTIRQ);
+       if (IS_ERR(sc->ib.send_cq)) {
+               sc->ib.send_cq = NULL;
+               goto alloc_cq_failed;
+@@ -1569,7 +1583,7 @@ static struct smbd_connection *_smbd_get_connection(
+       sc->ib.recv_cq =
+               ib_alloc_cq_any(sc->ib.dev, info,
+-                              info->receive_credit_max, IB_POLL_SOFTIRQ);
++                              sp->recv_credit_max, IB_POLL_SOFTIRQ);
+       if (IS_ERR(sc->ib.recv_cq)) {
+               sc->ib.recv_cq = NULL;
+               goto alloc_cq_failed;
+@@ -1578,8 +1592,8 @@ static struct smbd_connection *_smbd_get_connection(
+       memset(&qp_attr, 0, sizeof(qp_attr));
+       qp_attr.event_handler = smbd_qp_async_error_upcall;
+       qp_attr.qp_context = info;
+-      qp_attr.cap.max_send_wr = info->send_credit_target;
+-      qp_attr.cap.max_recv_wr = info->receive_credit_max;
++      qp_attr.cap.max_send_wr = sp->send_credit_target;
++      qp_attr.cap.max_recv_wr = sp->recv_credit_max;
+       qp_attr.cap.max_send_sge = SMBDIRECT_MAX_SEND_SGE;
+       qp_attr.cap.max_recv_sge = SMBDIRECT_MAX_RECV_SGE;
+       qp_attr.cap.max_inline_data = 0;
+@@ -1654,7 +1668,7 @@ static struct smbd_connection *_smbd_get_connection(
+       init_waitqueue_head(&info->wait_send_queue);
+       INIT_DELAYED_WORK(&info->idle_timer_work, idle_connection_timer);
+       queue_delayed_work(info->workqueue, &info->idle_timer_work,
+-              info->keep_alive_interval*HZ);
++              msecs_to_jiffies(sp->keepalive_interval_msec));
+       init_waitqueue_head(&info->wait_send_pending);
+       atomic_set(&info->send_pending, 0);
+@@ -1971,6 +1985,7 @@ int smbd_send(struct TCP_Server_Info *server,
+ {
+       struct smbd_connection *info = server->smbd_conn;
+       struct smbdirect_socket *sc = &info->socket;
++      struct smbdirect_socket_parameters *sp = &sc->parameters;
+       struct smb_rqst *rqst;
+       struct iov_iter iter;
+       unsigned int remaining_data_length, klen;
+@@ -1988,10 +2003,10 @@ int smbd_send(struct TCP_Server_Info *server,
+       for (i = 0; i < num_rqst; i++)
+               remaining_data_length += smb_rqst_len(server, &rqst_array[i]);
+-      if (unlikely(remaining_data_length > info->max_fragmented_send_size)) {
++      if (unlikely(remaining_data_length > sp->max_fragmented_send_size)) {
+               /* assertion: payload never exceeds negotiated maximum */
+               log_write(ERR, "payload size %d > max size %d\n",
+-                      remaining_data_length, info->max_fragmented_send_size);
++                      remaining_data_length, sp->max_fragmented_send_size);
+               return -EINVAL;
+       }
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index 4b559a4147af1..3d552ab27e0f3 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -69,15 +69,7 @@ struct smbd_connection {
+       spinlock_t lock_new_credits_offered;
+       int new_credits_offered;
+-      /* Connection parameters defined in [MS-SMBD] 3.1.1.1 */
+-      int receive_credit_max;
+-      int send_credit_target;
+-      int max_send_size;
+-      int max_fragmented_recv_size;
+-      int max_fragmented_send_size;
+-      int max_receive_size;
+-      int keep_alive_interval;
+-      int max_readwrite_size;
++      /* dynamic connection parameters defined in [MS-SMBD] 3.1.1.1 */
+       enum keep_alive_status keep_alive_requested;
+       int protocol;
+       atomic_t send_credits;
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-smbdirect-add-smbdirect.h-with-public-structures.patch b/queue-6.12/smb-smbdirect-add-smbdirect.h-with-public-structures.patch
new file mode 100644 (file)
index 0000000..41433ce
--- /dev/null
@@ -0,0 +1,54 @@
+From 606c04ee1ad88b2997fe35ae65d90b34065cf236 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:33 +0200
+Subject: smb: smbdirect: add smbdirect.h with public structures
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 7e136a718633b2c54764e185f3bfccf0763fc1dd ]
+
+Will be used in client and server in the next commits.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+CC: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43e7e284fc77 ("cifs: Fix the smbd_response slab to allow usercopy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/common/smbdirect/smbdirect.h | 17 +++++++++++++++++
+ 1 file changed, 17 insertions(+)
+ create mode 100644 fs/smb/common/smbdirect/smbdirect.h
+
+diff --git a/fs/smb/common/smbdirect/smbdirect.h b/fs/smb/common/smbdirect/smbdirect.h
+new file mode 100644
+index 0000000000000..eedbdf0d04337
+--- /dev/null
++++ b/fs/smb/common/smbdirect/smbdirect.h
+@@ -0,0 +1,17 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (C) 2017, Microsoft Corporation.
++ *   Copyright (C) 2018, LG Electronics.
++ */
++
++#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
++#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__
++
++/* SMB-DIRECT buffer descriptor V1 structure [MS-SMBD] 2.2.3.1 */
++struct smbdirect_buffer_descriptor_v1 {
++      __le64 offset;
++      __le32 token;
++      __le32 length;
++} __packed;
++
++#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ */
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-smbdirect-add-smbdirect_pdu.h-with-protocol-defi.patch b/queue-6.12/smb-smbdirect-add-smbdirect_pdu.h-with-protocol-defi.patch
new file mode 100644 (file)
index 0000000..5a45d5c
--- /dev/null
@@ -0,0 +1,94 @@
+From bde16a316d50fe5ca7478e5a284b317dffe537fc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:30 +0200
+Subject: smb: smbdirect: add smbdirect_pdu.h with protocol definitions
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 00fab6cf323fa5850e6cbe283b23e605e6e97912 ]
+
+This is just a start moving into a common smbdirect layer.
+
+It will be used in the next commits...
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43e7e284fc77 ("cifs: Fix the smbd_response slab to allow usercopy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/common/smbdirect/smbdirect_pdu.h | 55 +++++++++++++++++++++++++
+ 1 file changed, 55 insertions(+)
+ create mode 100644 fs/smb/common/smbdirect/smbdirect_pdu.h
+
+diff --git a/fs/smb/common/smbdirect/smbdirect_pdu.h b/fs/smb/common/smbdirect/smbdirect_pdu.h
+new file mode 100644
+index 0000000000000..ae9fdb05ce231
+--- /dev/null
++++ b/fs/smb/common/smbdirect/smbdirect_pdu.h
+@@ -0,0 +1,55 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (c) 2017 Stefan Metzmacher
++ */
++
++#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
++#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__
++
++#define SMBDIRECT_V1 0x0100
++
++/* SMBD negotiation request packet [MS-SMBD] 2.2.1 */
++struct smbdirect_negotiate_req {
++      __le16 min_version;
++      __le16 max_version;
++      __le16 reserved;
++      __le16 credits_requested;
++      __le32 preferred_send_size;
++      __le32 max_receive_size;
++      __le32 max_fragmented_size;
++} __packed;
++
++/* SMBD negotiation response packet [MS-SMBD] 2.2.2 */
++struct smbdirect_negotiate_resp {
++      __le16 min_version;
++      __le16 max_version;
++      __le16 negotiated_version;
++      __le16 reserved;
++      __le16 credits_requested;
++      __le16 credits_granted;
++      __le32 status;
++      __le32 max_readwrite_size;
++      __le32 preferred_send_size;
++      __le32 max_receive_size;
++      __le32 max_fragmented_size;
++} __packed;
++
++#define SMBDIRECT_DATA_MIN_HDR_SIZE 0x14
++#define SMBDIRECT_DATA_OFFSET       0x18
++
++#define SMBDIRECT_FLAG_RESPONSE_REQUESTED 0x0001
++
++/* SMBD data transfer packet with payload [MS-SMBD] 2.2.3 */
++struct smbdirect_data_transfer {
++      __le16 credits_requested;
++      __le16 credits_granted;
++      __le16 flags;
++      __le16 reserved;
++      __le32 remaining_data_length;
++      __le32 data_offset;
++      __le32 data_length;
++      __le32 padding;
++      __u8 buffer[];
++} __packed;
++
++#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_PDU_H__ */
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-smbdirect-add-smbdirect_socket.h.patch b/queue-6.12/smb-smbdirect-add-smbdirect_socket.h.patch
new file mode 100644 (file)
index 0000000..ada57cd
--- /dev/null
@@ -0,0 +1,84 @@
+From dcc1f16267e514cb7f8e247d266d19b7b2350e8f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:36 +0200
+Subject: smb: smbdirect: add smbdirect_socket.h
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit 22234e37d7e97652cb53133009da5e14793d3c10 ]
+
+This abstracts the common smbdirect layer.
+
+Currently with just a few things in it,
+but that will change over time until everything is
+in common.
+
+Will be used in client and server in the next commits
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43e7e284fc77 ("cifs: Fix the smbd_response slab to allow usercopy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/common/smbdirect/smbdirect_socket.h | 41 ++++++++++++++++++++++
+ 1 file changed, 41 insertions(+)
+ create mode 100644 fs/smb/common/smbdirect/smbdirect_socket.h
+
+diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
+new file mode 100644
+index 0000000000000..69a55561f91ae
+--- /dev/null
++++ b/fs/smb/common/smbdirect/smbdirect_socket.h
+@@ -0,0 +1,41 @@
++/* SPDX-License-Identifier: GPL-2.0-or-later */
++/*
++ *   Copyright (c) 2025 Stefan Metzmacher
++ */
++
++#ifndef __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
++#define __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__
++
++enum smbdirect_socket_status {
++      SMBDIRECT_SOCKET_CREATED,
++      SMBDIRECT_SOCKET_CONNECTING,
++      SMBDIRECT_SOCKET_CONNECTED,
++      SMBDIRECT_SOCKET_NEGOTIATE_FAILED,
++      SMBDIRECT_SOCKET_DISCONNECTING,
++      SMBDIRECT_SOCKET_DISCONNECTED,
++      SMBDIRECT_SOCKET_DESTROYED
++};
++
++struct smbdirect_socket {
++      enum smbdirect_socket_status status;
++
++      /* RDMA related */
++      struct {
++              struct rdma_cm_id *cm_id;
++      } rdma;
++
++      /* IB verbs related */
++      struct {
++              struct ib_pd *pd;
++              struct ib_cq *send_cq;
++              struct ib_cq *recv_cq;
++
++              /*
++               * shortcuts for rdma.cm_id->{qp,device};
++               */
++              struct ib_qp *qp;
++              struct ib_device *dev;
++      } ib;
++};
++
++#endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
+-- 
+2.39.5
+
diff --git a/queue-6.12/smb-smbdirect-introduce-smbdirect_socket_parameters.patch b/queue-6.12/smb-smbdirect-introduce-smbdirect_socket_parameters.patch
new file mode 100644 (file)
index 0000000..9cc74fd
--- /dev/null
@@ -0,0 +1,86 @@
+From a4ba74476d927b0e590f442c30d0778707fefb5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 May 2025 18:01:39 +0200
+Subject: smb: smbdirect: introduce smbdirect_socket_parameters
+
+From: Stefan Metzmacher <metze@samba.org>
+
+[ Upstream commit dce8047f4725d4469c0813ff50c4115fc2d0b628 ]
+
+This is the next step in the direction of a common smbdirect layer.
+
+Cc: Steve French <smfrench@gmail.com>
+Cc: Tom Talpey <tom@talpey.com>
+Cc: Long Li <longli@microsoft.com>
+Cc: Namjae Jeon <linkinjeon@kernel.org>
+Cc: Hyunchul Lee <hyc.lee@gmail.com>
+Cc: Meetakshi Setiya <meetakshisetiyaoss@gmail.com>
+Cc: linux-cifs@vger.kernel.org
+Cc: samba-technical@lists.samba.org
+Signed-off-by: Stefan Metzmacher <metze@samba.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 43e7e284fc77 ("cifs: Fix the smbd_response slab to allow usercopy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/smb/client/smbdirect.h                  |  1 +
+ fs/smb/common/smbdirect/smbdirect.h        | 20 ++++++++++++++++++++
+ fs/smb/common/smbdirect/smbdirect_socket.h |  2 ++
+ 3 files changed, 23 insertions(+)
+
+diff --git a/fs/smb/client/smbdirect.h b/fs/smb/client/smbdirect.h
+index ffc38a48b6140..4b559a4147af1 100644
+--- a/fs/smb/client/smbdirect.h
++++ b/fs/smb/client/smbdirect.h
+@@ -15,6 +15,7 @@
+ #include <rdma/rdma_cm.h>
+ #include <linux/mempool.h>
++#include "../common/smbdirect/smbdirect.h"
+ #include "../common/smbdirect/smbdirect_socket.h"
+ extern int rdma_readwrite_threshold;
+diff --git a/fs/smb/common/smbdirect/smbdirect.h b/fs/smb/common/smbdirect/smbdirect.h
+index eedbdf0d04337..b9a385344ff31 100644
+--- a/fs/smb/common/smbdirect/smbdirect.h
++++ b/fs/smb/common/smbdirect/smbdirect.h
+@@ -14,4 +14,24 @@ struct smbdirect_buffer_descriptor_v1 {
+       __le32 length;
+ } __packed;
++/*
++ * Connection parameters mostly from [MS-SMBD] 3.1.1.1
++ *
++ * These are setup and negotiated at the beginning of a
++ * connection and remain constant unless explicitly changed.
++ *
++ * Some values are important for the upper layer.
++ */
++struct smbdirect_socket_parameters {
++      __u16 recv_credit_max;
++      __u16 send_credit_target;
++      __u32 max_send_size;
++      __u32 max_fragmented_send_size;
++      __u32 max_recv_size;
++      __u32 max_fragmented_recv_size;
++      __u32 max_read_write_size;
++      __u32 keepalive_interval_msec;
++      __u32 keepalive_timeout_msec;
++} __packed;
++
+ #endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_H__ */
+diff --git a/fs/smb/common/smbdirect/smbdirect_socket.h b/fs/smb/common/smbdirect/smbdirect_socket.h
+index 69a55561f91ae..e5b15cc44a7ba 100644
+--- a/fs/smb/common/smbdirect/smbdirect_socket.h
++++ b/fs/smb/common/smbdirect/smbdirect_socket.h
+@@ -36,6 +36,8 @@ struct smbdirect_socket {
+               struct ib_qp *qp;
+               struct ib_device *dev;
+       } ib;
++
++      struct smbdirect_socket_parameters parameters;
+ };
+ #endif /* __FS_SMB_COMMON_SMBDIRECT_SMBDIRECT_SOCKET_H__ */
+-- 
+2.39.5
+
diff --git a/queue-6.12/um-ubd-add-missing-error-check-in-start_io_thread.patch b/queue-6.12/um-ubd-add-missing-error-check-in-start_io_thread.patch
new file mode 100644 (file)
index 0000000..560f4cf
--- /dev/null
@@ -0,0 +1,37 @@
+From 7fcf9368cade42141f55f25980f62d9710bcf8ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Jun 2025 20:44:25 +0800
+Subject: um: ubd: Add missing error check in start_io_thread()
+
+From: Tiwei Bie <tiwei.btw@antgroup.com>
+
+[ Upstream commit c55c7a85e02a7bfee20a3ffebdff7cbeb41613ef ]
+
+The subsequent call to os_set_fd_block() overwrites the previous
+return value. OR the two return values together to fix it.
+
+Fixes: f88f0bdfc32f ("um: UBD Improvements")
+Signed-off-by: Tiwei Bie <tiwei.btw@antgroup.com>
+Link: https://patch.msgid.link/20250606124428.148164-2-tiwei.btw@antgroup.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/um/drivers/ubd_user.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/um/drivers/ubd_user.c b/arch/um/drivers/ubd_user.c
+index b4f8b8e605644..592b899820d64 100644
+--- a/arch/um/drivers/ubd_user.c
++++ b/arch/um/drivers/ubd_user.c
+@@ -41,7 +41,7 @@ int start_io_thread(unsigned long sp, int *fd_out)
+       *fd_out = fds[1];
+       err = os_set_fd_block(*fd_out, 0);
+-      err = os_set_fd_block(kernel_fd, 0);
++      err |= os_set_fd_block(kernel_fd, 0);
+       if (err) {
+               printk("start_io_thread - failed to set nonblocking I/O.\n");
+               goto out_close;
+-- 
+2.39.5
+
diff --git a/queue-6.12/vsock-uapi-fix-linux-vm_sockets.h-userspace-compilat.patch b/queue-6.12/vsock-uapi-fix-linux-vm_sockets.h-userspace-compilat.patch
new file mode 100644 (file)
index 0000000..1e45ffa
--- /dev/null
@@ -0,0 +1,54 @@
+From 73dba960f21f854064e9cfe89f18fb00ce40f6b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Jun 2025 12:00:53 +0200
+Subject: vsock/uapi: fix linux/vm_sockets.h userspace compilation errors
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Stefano Garzarella <sgarzare@redhat.com>
+
+[ Upstream commit 22bbc1dcd0d6785fb390c41f0dd5b5e218d23bdd ]
+
+If a userspace application just include <linux/vm_sockets.h> will fail
+to build with the following errors:
+
+    /usr/include/linux/vm_sockets.h:182:39: error: invalid application of ‘sizeof’ to incomplete type ‘struct sockaddr’
+      182 |         unsigned char svm_zero[sizeof(struct sockaddr) -
+          |                                       ^~~~~~
+    /usr/include/linux/vm_sockets.h:183:39: error: ‘sa_family_t’ undeclared here (not in a function)
+      183 |                                sizeof(sa_family_t) -
+          |
+
+Include <sys/socket.h> for userspace (guarded by ifndef __KERNEL__)
+where `struct sockaddr` and `sa_family_t` are defined.
+We already do something similar in <linux/mptcp.h> and <linux/if.h>.
+
+Fixes: d021c344051a ("VSOCK: Introduce VM Sockets")
+Reported-by: Daan De Meyer <daan.j.demeyer@gmail.com>
+Signed-off-by: Stefano Garzarella <sgarzare@redhat.com>
+Link: https://patch.msgid.link/20250623100053.40979-1-sgarzare@redhat.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/uapi/linux/vm_sockets.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
+index ed07181d4eff9..e05280e415228 100644
+--- a/include/uapi/linux/vm_sockets.h
++++ b/include/uapi/linux/vm_sockets.h
+@@ -17,6 +17,10 @@
+ #ifndef _UAPI_VM_SOCKETS_H
+ #define _UAPI_VM_SOCKETS_H
++#ifndef __KERNEL__
++#include <sys/socket.h>        /* for struct sockaddr and sa_family_t */
++#endif
++
+ #include <linux/socket.h>
+ #include <linux/types.h>
+-- 
+2.39.5
+
diff --git a/queue-6.12/wifi-mac80211-add-link-iteration-macro-for-link-data.patch b/queue-6.12/wifi-mac80211-add-link-iteration-macro-for-link-data.patch
new file mode 100644 (file)
index 0000000..7adaaea
--- /dev/null
@@ -0,0 +1,48 @@
+From e69b8feebe4d7d54f4bb422f94e5c6315f1389d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Mar 2025 14:31:23 -0700
+Subject: wifi: mac80211: Add link iteration macro for link data
+
+From: Muna Sinada <muna.sinada@oss.qualcomm.com>
+
+[ Upstream commit f61c7b3d442bef91dd432d468d08f72eadcc3209 ]
+
+Currently before iterating through valid links we are utilizing
+open-coding when checking if vif valid_links is a non-zero value.
+
+Add new macro, for_each_link_data(), which iterates through link_id
+and checks if it is set on vif valid_links. If it is a valid link then
+access link data for that link id.
+
+Signed-off-by: Muna Sinada <muna.sinada@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250325213125.1509362-2-muna.sinada@oss.qualcomm.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Stable-dep-of: d87c3ca0f8f1 ("wifi: mac80211: finish link init before RCU publish")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/ieee80211_i.h | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index bfe0514efca37..41e69e066b386 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -1209,6 +1209,15 @@ struct ieee80211_sub_if_data *vif_to_sdata(struct ieee80211_vif *p)
+       if ((_link = wiphy_dereference((local)->hw.wiphy,               \
+                                      ___sdata->link[___link_id])))
++#define for_each_link_data(sdata, __link)                                     \
++      struct ieee80211_sub_if_data *__sdata = sdata;                          \
++      for (int __link_id = 0;                                                 \
++           __link_id < ARRAY_SIZE((__sdata)->link); __link_id++)              \
++              if ((!(__sdata)->vif.valid_links ||                             \
++                   (__sdata)->vif.valid_links & BIT(__link_id)) &&            \
++                  ((__link) = sdata_dereference((__sdata)->link[__link_id],   \
++                                                (__sdata))))
++
+ static inline int
+ ieee80211_get_mbssid_beacon_len(struct cfg80211_mbssid_elems *elems,
+                               struct cfg80211_rnr_elems *rnr_elems,
+-- 
+2.39.5
+
diff --git a/queue-6.12/wifi-mac80211-create-separate-links-for-vlan-interfa.patch b/queue-6.12/wifi-mac80211-create-separate-links-for-vlan-interfa.patch
new file mode 100644 (file)
index 0000000..165a493
--- /dev/null
@@ -0,0 +1,228 @@
+From 373ef933c8dd331da3e4a68bc8293708a00f1ad7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Mar 2025 14:31:24 -0700
+Subject: wifi: mac80211: Create separate links for VLAN interfaces
+
+From: Muna Sinada <muna.sinada@oss.qualcomm.com>
+
+[ Upstream commit 90233b0ad215efc9ea56a7c0b09021bcd4eea4ac ]
+
+Currently, MLD links for an AP_VLAN interface type is not fully
+supported.
+
+Add allocation of separate links for each VLAN interface and copy
+chanctx and chandef of AP bss to VLAN where necessary. Separate
+links are created because for Dynamic VLAN each link will have its own
+default_multicast_key.
+
+Signed-off-by: Muna Sinada <muna.sinada@oss.qualcomm.com>
+Link: https://patch.msgid.link/20250325213125.1509362-3-muna.sinada@oss.qualcomm.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Stable-dep-of: d87c3ca0f8f1 ("wifi: mac80211: finish link init before RCU publish")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/chan.c        |  3 ++
+ net/mac80211/ieee80211_i.h |  3 ++
+ net/mac80211/iface.c       | 12 ++++-
+ net/mac80211/link.c        | 90 ++++++++++++++++++++++++++++++++++++--
+ 4 files changed, 103 insertions(+), 5 deletions(-)
+
+diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
+index cca6d14084d21..282e8c13e2bfc 100644
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -2097,6 +2097,9 @@ void ieee80211_link_release_channel(struct ieee80211_link_data *link)
+ {
+       struct ieee80211_sub_if_data *sdata = link->sdata;
++      if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++              return;
++
+       lockdep_assert_wiphy(sdata->local->hw.wiphy);
+       if (rcu_access_pointer(link->conf->chanctx_conf))
+diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
+index 41e69e066b386..2f017dbbcb975 100644
+--- a/net/mac80211/ieee80211_i.h
++++ b/net/mac80211/ieee80211_i.h
+@@ -2070,6 +2070,9 @@ static inline void ieee80211_vif_clear_links(struct ieee80211_sub_if_data *sdata
+       ieee80211_vif_set_links(sdata, 0, 0);
+ }
++void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata);
++void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata);
++
+ /* tx handling */
+ void ieee80211_clear_tx_pending(struct ieee80211_local *local);
+ void ieee80211_tx_pending(struct tasklet_struct *t);
+diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
+index 7e1e561ef76c1..209d6ffa8e426 100644
+--- a/net/mac80211/iface.c
++++ b/net/mac80211/iface.c
+@@ -494,6 +494,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do
+                       break;
+               list_del_rcu(&sdata->u.mntr.list);
+               break;
++      case NL80211_IFTYPE_AP_VLAN:
++              ieee80211_apvlan_link_clear(sdata);
++              break;
+       default:
+               break;
+       }
+@@ -1268,6 +1271,8 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+               sdata->crypto_tx_tailroom_needed_cnt +=
+                       master->crypto_tx_tailroom_needed_cnt;
++              ieee80211_apvlan_link_setup(sdata);
++
+               break;
+               }
+       case NL80211_IFTYPE_AP:
+@@ -1322,7 +1327,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
+       case NL80211_IFTYPE_AP_VLAN:
+               /* no need to tell driver, but set carrier and chanctx */
+               if (sdata->bss->active) {
+-                      ieee80211_link_vlan_copy_chanctx(&sdata->deflink);
++                      struct ieee80211_link_data *link;
++
++                      for_each_link_data(sdata, link) {
++                              ieee80211_link_vlan_copy_chanctx(link);
++                      }
++
+                       netif_carrier_on(dev);
+                       ieee80211_set_vif_encap_ops(sdata);
+               } else {
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 46092fbcde90e..0525f9e44c37b 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -12,6 +12,71 @@
+ #include "key.h"
+ #include "debugfs_netdev.h"
++static void ieee80211_update_apvlan_links(struct ieee80211_sub_if_data *sdata)
++{
++      struct ieee80211_sub_if_data *vlan;
++      struct ieee80211_link_data *link;
++      u16 ap_bss_links = sdata->vif.valid_links;
++      u16 new_links, vlan_links;
++      unsigned long add;
++
++      list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list) {
++              int link_id;
++
++              if (!vlan)
++                      continue;
++
++              /* No support for 4addr with MLO yet */
++              if (vlan->wdev.use_4addr)
++                      return;
++
++              vlan_links = vlan->vif.valid_links;
++
++              new_links = ap_bss_links;
++
++              add = new_links & ~vlan_links;
++              if (!add)
++                      continue;
++
++              ieee80211_vif_set_links(vlan, add, 0);
++
++              for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
++                      link = sdata_dereference(vlan->link[link_id], vlan);
++                      ieee80211_link_vlan_copy_chanctx(link);
++              }
++      }
++}
++
++void ieee80211_apvlan_link_setup(struct ieee80211_sub_if_data *sdata)
++{
++      struct ieee80211_sub_if_data *ap_bss = container_of(sdata->bss,
++                                          struct ieee80211_sub_if_data, u.ap);
++      u16 new_links = ap_bss->vif.valid_links;
++      unsigned long add;
++      int link_id;
++
++      if (!ap_bss->vif.valid_links)
++              return;
++
++      add = new_links;
++      for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) {
++              sdata->wdev.valid_links |= BIT(link_id);
++              ether_addr_copy(sdata->wdev.links[link_id].addr,
++                              ap_bss->wdev.links[link_id].addr);
++      }
++
++      ieee80211_vif_set_links(sdata, new_links, 0);
++}
++
++void ieee80211_apvlan_link_clear(struct ieee80211_sub_if_data *sdata)
++{
++      if (!sdata->wdev.valid_links)
++              return;
++
++      sdata->wdev.valid_links = 0;
++      ieee80211_vif_clear_links(sdata);
++}
++
+ void ieee80211_link_setup(struct ieee80211_link_data *link)
+ {
+       if (link->sdata->vif.type == NL80211_IFTYPE_STATION)
+@@ -31,6 +96,17 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+       rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
+       rcu_assign_pointer(sdata->link[link_id], link);
++      if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
++              struct ieee80211_sub_if_data *ap_bss;
++              struct ieee80211_bss_conf *ap_bss_conf;
++
++              ap_bss = container_of(sdata->bss,
++                                    struct ieee80211_sub_if_data, u.ap);
++              ap_bss_conf = sdata_dereference(ap_bss->vif.link_conf[link_id],
++                                              ap_bss);
++              memcpy(link_conf, ap_bss_conf, sizeof(*link_conf));
++      }
++
+       link->sdata = sdata;
+       link->link_id = link_id;
+       link->conf = link_conf;
+@@ -51,6 +127,7 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+       if (!deflink) {
+               switch (sdata->vif.type) {
+               case NL80211_IFTYPE_AP:
++              case NL80211_IFTYPE_AP_VLAN:
+                       ether_addr_copy(link_conf->addr,
+                                       sdata->wdev.links[link_id].addr);
+                       link_conf->bssid = link_conf->addr;
+@@ -174,6 +251,7 @@ static void ieee80211_set_vif_links_bitmaps(struct ieee80211_sub_if_data *sdata,
+       switch (sdata->vif.type) {
+       case NL80211_IFTYPE_AP:
++      case NL80211_IFTYPE_AP_VLAN:
+               /* in an AP all links are always active */
+               sdata->vif.active_links = valid_links;
+@@ -275,12 +353,16 @@ static int ieee80211_vif_update_links(struct ieee80211_sub_if_data *sdata,
+               ieee80211_set_vif_links_bitmaps(sdata, new_links, dormant_links);
+               /* tell the driver */
+-              ret = drv_change_vif_links(sdata->local, sdata,
+-                                         old_links & old_active,
+-                                         new_links & sdata->vif.active_links,
+-                                         old);
++              if (sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
++                      ret = drv_change_vif_links(sdata->local, sdata,
++                                                 old_links & old_active,
++                                                 new_links & sdata->vif.active_links,
++                                                 old);
+               if (!new_links)
+                       ieee80211_debugfs_recreate_netdev(sdata, false);
++
++              if (sdata->vif.type == NL80211_IFTYPE_AP)
++                      ieee80211_update_apvlan_links(sdata);
+       }
+       if (ret) {
+-- 
+2.39.5
+
diff --git a/queue-6.12/wifi-mac80211-finish-link-init-before-rcu-publish.patch b/queue-6.12/wifi-mac80211-finish-link-init-before-rcu-publish.patch
new file mode 100644 (file)
index 0000000..47450e6
--- /dev/null
@@ -0,0 +1,48 @@
+From 84db280220fb87d1787dfd1f29092ac29920b8ae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Jun 2025 13:07:49 +0200
+Subject: wifi: mac80211: finish link init before RCU publish
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+[ Upstream commit d87c3ca0f8f1ca4c25f2ed819e954952f4d8d709 ]
+
+Since the link/conf pointers can be accessed without any
+protection other than RCU, make sure the data is actually
+set up before publishing the structures.
+
+Fixes: b2e8434f1829 ("wifi: mac80211: set up/tear down client vif links properly")
+Link: https://patch.msgid.link/20250624130749.9a308b713c74.I4a80f5eead112a38730939ea591d2e275c721256@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/link.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/mac80211/link.c b/net/mac80211/link.c
+index 0525f9e44c37b..9484449d6a347 100644
+--- a/net/mac80211/link.c
++++ b/net/mac80211/link.c
+@@ -93,9 +93,6 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+       if (link_id < 0)
+               link_id = 0;
+-      rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
+-      rcu_assign_pointer(sdata->link[link_id], link);
+-
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+               struct ieee80211_sub_if_data *ap_bss;
+               struct ieee80211_bss_conf *ap_bss_conf;
+@@ -142,6 +139,9 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata,
+               ieee80211_link_debugfs_add(link);
+       }
++
++      rcu_assign_pointer(sdata->vif.link_conf[link_id], link_conf);
++      rcu_assign_pointer(sdata->link[link_id], link);
+ }
+ void ieee80211_link_stop(struct ieee80211_link_data *link)
+-- 
+2.39.5
+
diff --git a/queue-6.12/wifi-mac80211-fix-beacon-interval-calculation-overfl.patch b/queue-6.12/wifi-mac80211-fix-beacon-interval-calculation-overfl.patch
new file mode 100644 (file)
index 0000000..7a3e49a
--- /dev/null
@@ -0,0 +1,38 @@
+From a845d991ae52852e7641d4f988a752939fb68322 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 21 Jun 2025 22:32:09 +1000
+Subject: wifi: mac80211: fix beacon interval calculation overflow
+
+From: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+
+[ Upstream commit 7a3750ff0f2e8fee338a9c168f429f6c37f0e820 ]
+
+As we are converting from TU to usecs, a beacon interval of
+100*1024 usecs will lead to integer wrapping. To fix change
+to use a u32.
+
+Fixes: 057d5f4ba1e4 ("mac80211: sync dtim_count to TSF")
+Signed-off-by: Lachlan Hodges <lachlan.hodges@morsemicro.com>
+Link: https://patch.msgid.link/20250621123209.511796-1-lachlan.hodges@morsemicro.com
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mac80211/util.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index a98ae563613c0..77638e965726c 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -3908,7 +3908,7 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local,
+ {
+       u64 tsf = drv_get_tsf(local, sdata);
+       u64 dtim_count = 0;
+-      u16 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
++      u32 beacon_int = sdata->vif.bss_conf.beacon_int * 1024;
+       u8 dtim_period = sdata->vif.bss_conf.dtim_period;
+       struct ps_data *ps;
+       u8 bcns_from_dtim;
+-- 
+2.39.5
+