]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 6.1
authorSasha Levin <sashal@kernel.org>
Wed, 5 Jun 2024 12:09:30 +0000 (08:09 -0400)
committerSasha Levin <sashal@kernel.org>
Wed, 5 Jun 2024 12:09:30 +0000 (08:09 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
43 files changed:
queue-6.1/af_unix-read-sk-sk_hash-under-bindlock-during-bind.patch [new file with mode: 0644]
queue-6.1/alsa-core-remove-debugfs-at-disconnection.patch [new file with mode: 0644]
queue-6.1/alsa-hda-realtek-add-quirk-for-asus-rog-g634z.patch [new file with mode: 0644]
queue-6.1/alsa-hda-realtek-adjust-g814jzr-to-use-spi-init-for-.patch [new file with mode: 0644]
queue-6.1/alsa-hda-realtek-amend-g634-quirk-to-enable-rear-spe.patch [new file with mode: 0644]
queue-6.1/alsa-jack-use-guard-for-locking.patch [new file with mode: 0644]
queue-6.1/bpf-allow-delete-from-sockmap-sockhash-only-if-updat.patch [new file with mode: 0644]
queue-6.1/bpf-fix-potential-integer-overflow-in-resolve_btfids.patch [new file with mode: 0644]
queue-6.1/dma-buf-sw-sync-don-t-enable-irq-from-sync_print_obj.patch [new file with mode: 0644]
queue-6.1/dma-mapping-benchmark-fix-node-id-validation.patch [new file with mode: 0644]
queue-6.1/dma-mapping-benchmark-handle-numa_no_node-correctly.patch [new file with mode: 0644]
queue-6.1/drm-i915-guc-avoid-field_prep-warning.patch [new file with mode: 0644]
queue-6.1/enic-validate-length-of-nl-attributes-in-enic_set_vf.patch [new file with mode: 0644]
queue-6.1/hwmon-shtc1-fix-property-misspelling.patch [new file with mode: 0644]
queue-6.1/ice-fix-accounting-if-a-vlan-already-exists.patch [new file with mode: 0644]
queue-6.1/ipvlan-dont-use-skb-sk-in-ipvlan_process_v-4-6-_outb.patch [new file with mode: 0644]
queue-6.1/kconfig-fix-comparison-to-constant-symbols-m-n.patch [new file with mode: 0644]
queue-6.1/net-dsa-microchip-fix-rgmii-error-in-ksz-dsa-driver.patch [new file with mode: 0644]
queue-6.1/net-ena-add-dynamic-recycling-mechanism-for-rx-buffe.patch [new file with mode: 0644]
queue-6.1/net-ena-fix-redundant-device-numa-node-override.patch [new file with mode: 0644]
queue-6.1/net-ena-reduce-lines-with-longer-column-width-bounda.patch [new file with mode: 0644]
queue-6.1/net-fec-add-fec_enet_deinit.patch [new file with mode: 0644]
queue-6.1/net-mlx5-lag-do-bond-only-if-slaves-agree-on-roce-st.patch [new file with mode: 0644]
queue-6.1/net-mlx5e-fix-ipsec-tunnel-mode-offload-feature-chec.patch [new file with mode: 0644]
queue-6.1/net-mlx5e-fix-udp-gso-for-encapsulated-packets.patch [new file with mode: 0644]
queue-6.1/net-mlx5e-use-rx_missed_errors-instead-of-rx_dropped.patch [new file with mode: 0644]
queue-6.1/net-phy-micrel-set-soft_reset-callback-to-genphy_sof.patch [new file with mode: 0644]
queue-6.1/net-usb-smsc95xx-fix-changing-led_sel-bit-value-upda.patch [new file with mode: 0644]
queue-6.1/netfilter-nfnetlink_queue-acquire-rcu_read_lock-in-i.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_fib-allow-from-forward-input-without-i.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_payload-move-struct-nft_payload_set-de.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_payload-rebuild-vlan-header-on-h_proto.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_payload-rebuild-vlan-header-when-neede.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_payload-restore-vlan-q-in-q-match-supp.patch [new file with mode: 0644]
queue-6.1/netfilter-nft_payload-skbuff-vlan-metadata-mangle-su.patch [new file with mode: 0644]
queue-6.1/netfilter-tproxy-bail-out-if-ip-has-been-disabled-on.patch [new file with mode: 0644]
queue-6.1/nvmet-fix-ns-enable-disable-possible-hang.patch [new file with mode: 0644]
queue-6.1/powerpc-pseries-lparcfg-drop-error-message-from-gues.patch [new file with mode: 0644]
queue-6.1/powerpc-uaccess-use-yz-asm-constraint-for-ld.patch [new file with mode: 0644]
queue-6.1/riscv-prevent-pt_regs-corruption-for-secondary-idle-.patch [new file with mode: 0644]
queue-6.1/series
queue-6.1/spi-don-t-mark-message-dma-mapped-when-no-transfer-i.patch [new file with mode: 0644]
queue-6.1/spi-stm32-don-t-warn-about-spurious-interrupts.patch [new file with mode: 0644]

diff --git a/queue-6.1/af_unix-read-sk-sk_hash-under-bindlock-during-bind.patch b/queue-6.1/af_unix-read-sk-sk_hash-under-bindlock-during-bind.patch
new file mode 100644 (file)
index 0000000..5a8ba6b
--- /dev/null
@@ -0,0 +1,132 @@
+From 14c078612fba47af4478303743f7cbc6a4d11f8c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 00:42:18 +0900
+Subject: af_unix: Read sk->sk_hash under bindlock during bind().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 51d1b25a720982324871338b1a36b197ec9bd6f0 ]
+
+syzkaller reported data-race of sk->sk_hash in unix_autobind() [0],
+and the same ones exist in unix_bind_bsd() and unix_bind_abstract().
+
+The three bind() functions prefetch sk->sk_hash locklessly and
+use it later after validating that unix_sk(sk)->addr is NULL under
+unix_sk(sk)->bindlock.
+
+The prefetched sk->sk_hash is the hash value of unbound socket set
+in unix_create1() and does not change until bind() completes.
+
+There could be a chance that sk->sk_hash changes after the lockless
+read.  However, in such a case, non-NULL unix_sk(sk)->addr is visible
+under unix_sk(sk)->bindlock, and bind() returns -EINVAL without using
+the prefetched value.
+
+The KCSAN splat is false-positive, but let's silence it by reading
+sk->sk_hash under unix_sk(sk)->bindlock.
+
+[0]:
+BUG: KCSAN: data-race in unix_autobind / unix_autobind
+
+write to 0xffff888034a9fb88 of 4 bytes by task 4468 on cpu 0:
+ __unix_set_addr_hash net/unix/af_unix.c:331 [inline]
+ unix_autobind+0x47a/0x7d0 net/unix/af_unix.c:1185
+ unix_dgram_connect+0x7e3/0x890 net/unix/af_unix.c:1373
+ __sys_connect_file+0xd7/0xe0 net/socket.c:2048
+ __sys_connect+0x114/0x140 net/socket.c:2065
+ __do_sys_connect net/socket.c:2075 [inline]
+ __se_sys_connect net/socket.c:2072 [inline]
+ __x64_sys_connect+0x40/0x50 net/socket.c:2072
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0x4f/0x110 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x46/0x4e
+
+read to 0xffff888034a9fb88 of 4 bytes by task 4465 on cpu 1:
+ unix_autobind+0x28/0x7d0 net/unix/af_unix.c:1134
+ unix_dgram_connect+0x7e3/0x890 net/unix/af_unix.c:1373
+ __sys_connect_file+0xd7/0xe0 net/socket.c:2048
+ __sys_connect+0x114/0x140 net/socket.c:2065
+ __do_sys_connect net/socket.c:2075 [inline]
+ __se_sys_connect net/socket.c:2072 [inline]
+ __x64_sys_connect+0x40/0x50 net/socket.c:2072
+ do_syscall_x64 arch/x86/entry/common.c:52 [inline]
+ do_syscall_64+0x4f/0x110 arch/x86/entry/common.c:83
+ entry_SYSCALL_64_after_hwframe+0x46/0x4e
+
+value changed: 0x000000e4 -> 0x000001e3
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 1 PID: 4465 Comm: syz-executor.0 Not tainted 6.8.0-12822-gcd51db110a7e #12
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+
+Fixes: afd20b9290e1 ("af_unix: Replace the big lock with small locks.")
+Reported-by: syzkaller <syzkaller@googlegroups.com>
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://lore.kernel.org/r/20240522154218.78088-1-kuniyu@amazon.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/unix/af_unix.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 97d22bdfdc73b..4a8ace9d32391 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -1121,8 +1121,8 @@ static struct sock *unix_find_other(struct net *net,
+ static int unix_autobind(struct sock *sk)
+ {
+-      unsigned int new_hash, old_hash = sk->sk_hash;
+       struct unix_sock *u = unix_sk(sk);
++      unsigned int new_hash, old_hash;
+       struct net *net = sock_net(sk);
+       struct unix_address *addr;
+       u32 lastnum, ordernum;
+@@ -1145,6 +1145,7 @@ static int unix_autobind(struct sock *sk)
+       addr->name->sun_family = AF_UNIX;
+       refcount_set(&addr->refcnt, 1);
++      old_hash = sk->sk_hash;
+       ordernum = get_random_u32();
+       lastnum = ordernum & 0xFFFFF;
+ retry:
+@@ -1185,8 +1186,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ {
+       umode_t mode = S_IFSOCK |
+              (SOCK_INODE(sk->sk_socket)->i_mode & ~current_umask());
+-      unsigned int new_hash, old_hash = sk->sk_hash;
+       struct unix_sock *u = unix_sk(sk);
++      unsigned int new_hash, old_hash;
+       struct net *net = sock_net(sk);
+       struct user_namespace *ns; // barf...
+       struct unix_address *addr;
+@@ -1227,6 +1228,7 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+       if (u->addr)
+               goto out_unlock;
++      old_hash = sk->sk_hash;
+       new_hash = unix_bsd_hash(d_backing_inode(dentry));
+       unix_table_double_lock(net, old_hash, new_hash);
+       u->path.mnt = mntget(parent.mnt);
+@@ -1254,8 +1256,8 @@ static int unix_bind_bsd(struct sock *sk, struct sockaddr_un *sunaddr,
+ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+                             int addr_len)
+ {
+-      unsigned int new_hash, old_hash = sk->sk_hash;
+       struct unix_sock *u = unix_sk(sk);
++      unsigned int new_hash, old_hash;
+       struct net *net = sock_net(sk);
+       struct unix_address *addr;
+       int err;
+@@ -1273,6 +1275,7 @@ static int unix_bind_abstract(struct sock *sk, struct sockaddr_un *sunaddr,
+               goto out_mutex;
+       }
++      old_hash = sk->sk_hash;
+       new_hash = unix_abstract_hash(addr->name, addr->len, sk->sk_type);
+       unix_table_double_lock(net, old_hash, new_hash);
+-- 
+2.43.0
+
diff --git a/queue-6.1/alsa-core-remove-debugfs-at-disconnection.patch b/queue-6.1/alsa-core-remove-debugfs-at-disconnection.patch
new file mode 100644 (file)
index 0000000..4e162bf
--- /dev/null
@@ -0,0 +1,134 @@
+From 51cf0cc57fb29586a835355ae5346301ad3cf236 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 May 2024 17:11:46 +0200
+Subject: ALSA: core: Remove debugfs at disconnection
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 495000a38634e640e2fd02f7e4f1512ccc92d770 ]
+
+The card-specific debugfs entries are removed at the last stage of
+card free phase, and it's performed after synchronization of the
+closes of all opened fds.  This works fine for most cases, but it can
+be potentially problematic for a hotplug device like USB-audio.  Due
+to the nature of snd_card_free_when_closed(), the card free isn't
+called immediately after the driver removal for a hotplug device, but
+it's left until the last fd is closed.  It implies that the card
+debugfs entries also remain.  Meanwhile, when a new device is inserted
+before the last close and the very same card slot is assigned, the
+driver tries to create the card debugfs root again on the very same
+path.  This conflicts with the remaining entry, and results in the
+kernel warning such as:
+  debugfs: Directory 'card0' with parent 'sound' already present!
+with the missing debugfs entry afterwards.
+
+For avoiding such conflicts, remove debugfs entries at the device
+disconnection phase instead.  The jack kctl debugfs entries get
+removed in snd_jack_dev_disconnect() instead of each kctl
+private_free.
+
+Fixes: 2d670ea2bd53 ("ALSA: jack: implement software jack injection via debugfs")
+Link: https://lore.kernel.org/r/20240524151256.32521-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/init.c |  9 +++++----
+ sound/core/jack.c | 21 ++++++++++++++-------
+ 2 files changed, 19 insertions(+), 11 deletions(-)
+
+diff --git a/sound/core/init.c b/sound/core/init.c
+index 83e45efed61ed..3f08104e9366b 100644
+--- a/sound/core/init.c
++++ b/sound/core/init.c
+@@ -541,6 +541,11 @@ int snd_card_disconnect(struct snd_card *card)
+               synchronize_irq(card->sync_irq);
+       snd_info_card_disconnect(card);
++#ifdef CONFIG_SND_DEBUG
++      debugfs_remove(card->debugfs_root);
++      card->debugfs_root = NULL;
++#endif
++
+       if (card->registered) {
+               device_del(&card->card_dev);
+               card->registered = false;
+@@ -602,10 +607,6 @@ static int snd_card_do_free(struct snd_card *card)
+               dev_warn(card->dev, "unable to free card info\n");
+               /* Not fatal error */
+       }
+-#ifdef CONFIG_SND_DEBUG
+-      debugfs_remove(card->debugfs_root);
+-      card->debugfs_root = NULL;
+-#endif
+       if (card->release_completion)
+               complete(card->release_completion);
+       if (!card->managed)
+diff --git a/sound/core/jack.c b/sound/core/jack.c
+index 191357d619131..bd795452e57bf 100644
+--- a/sound/core/jack.c
++++ b/sound/core/jack.c
+@@ -37,11 +37,15 @@ static const int jack_switch_types[SND_JACK_SWITCH_TYPES] = {
+ };
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
++static void snd_jack_remove_debugfs(struct snd_jack *jack);
++
+ static int snd_jack_dev_disconnect(struct snd_device *device)
+ {
+-#ifdef CONFIG_SND_JACK_INPUT_DEV
+       struct snd_jack *jack = device->device_data;
++      snd_jack_remove_debugfs(jack);
++
++#ifdef CONFIG_SND_JACK_INPUT_DEV
+       guard(mutex)(&jack->input_dev_lock);
+       if (!jack->input_dev)
+               return 0;
+@@ -383,10 +387,14 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+       return 0;
+ }
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+-      debugfs_remove(jack_kctl->jack_debugfs_root);
+-      jack_kctl->jack_debugfs_root = NULL;
++      struct snd_jack_kctl *jack_kctl;
++
++      list_for_each_entry(jack_kctl, &jack->kctl_list, list) {
++              debugfs_remove(jack_kctl->jack_debugfs_root);
++              jack_kctl->jack_debugfs_root = NULL;
++      }
+ }
+ #else /* CONFIG_SND_JACK_INJECTION_DEBUG */
+ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+@@ -395,7 +403,7 @@ static int snd_jack_debugfs_add_inject_node(struct snd_jack *jack,
+       return 0;
+ }
+-static void snd_jack_debugfs_clear_inject_node(struct snd_jack_kctl *jack_kctl)
++static void snd_jack_remove_debugfs(struct snd_jack *jack)
+ {
+ }
+ #endif /* CONFIG_SND_JACK_INJECTION_DEBUG */
+@@ -406,7 +414,6 @@ static void snd_jack_kctl_private_free(struct snd_kcontrol *kctl)
+       jack_kctl = kctl->private_data;
+       if (jack_kctl) {
+-              snd_jack_debugfs_clear_inject_node(jack_kctl);
+               list_del(&jack_kctl->list);
+               kfree(jack_kctl);
+       }
+@@ -499,8 +506,8 @@ int snd_jack_new(struct snd_card *card, const char *id, int type,
+               .dev_free = snd_jack_dev_free,
+ #ifdef CONFIG_SND_JACK_INPUT_DEV
+               .dev_register = snd_jack_dev_register,
+-              .dev_disconnect = snd_jack_dev_disconnect,
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
++              .dev_disconnect = snd_jack_dev_disconnect,
+       };
+       if (initial_kctl) {
+-- 
+2.43.0
+
diff --git a/queue-6.1/alsa-hda-realtek-add-quirk-for-asus-rog-g634z.patch b/queue-6.1/alsa-hda-realtek-add-quirk-for-asus-rog-g634z.patch
new file mode 100644 (file)
index 0000000..b37e30b
--- /dev/null
@@ -0,0 +1,39 @@
+From a58bb384cae407415e5d9bdb244df1226d928b4d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Jun 2023 18:03:20 +1200
+Subject: ALSA: hda/realtek: Add quirk for ASUS ROG G634Z
+
+From: Luke D. Jones <luke@ljones.dev>
+
+[ Upstream commit 555434fd5c6b3589d9511ab6e88faf50346e19da ]
+
+Adds the required quirk to enable the Cirrus amp and correct pins
+on the ASUS ROG G634Z series.
+
+While this works if the related _DSD properties are made available, these
+aren't included in the ACPI of these laptops (yet).
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+Link: https://lore.kernel.org/r/20230619060320.1336455-1-luke@ljones.dev
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Stable-dep-of: 2be46155d792 ("ALSA: hda/realtek: Adjust G814JZR to use SPI init for amp")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 7aa961f613f88..4d7c23b20cce0 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9860,6 +9860,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+       SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
++      SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_HEADSET_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+-- 
+2.43.0
+
diff --git a/queue-6.1/alsa-hda-realtek-adjust-g814jzr-to-use-spi-init-for-.patch b/queue-6.1/alsa-hda-realtek-adjust-g814jzr-to-use-spi-init-for-.patch
new file mode 100644 (file)
index 0000000..cb82965
--- /dev/null
@@ -0,0 +1,37 @@
+From 53e0235f4039c06b4a9c943b6fcb67b46834acf1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 May 2024 21:10:32 +1200
+Subject: ALSA: hda/realtek: Adjust G814JZR to use SPI init for amp
+
+From: Luke D. Jones <luke@ljones.dev>
+
+[ Upstream commit 2be46155d792d629e8fe3188c2cde176833afe36 ]
+
+The 2024 ASUS ROG G814J model is much the same as the 2023 model
+and the 2023 16" version. We can use the same Cirrus Amp quirk.
+
+Fixes: 811dd426a9b1 ("ALSA: hda/realtek: Add quirks for Asus ROG 2024 laptops using CS35L41")
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+Link: https://lore.kernel.org/r/20240526091032.114545-1-luke@ljones.dev
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index c08bf42c602dd..3a7104f72cabd 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -9893,7 +9893,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2),
+       SND_PCI_QUIRK(0x1043, 0x3a20, "ASUS G614JZR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x3a30, "ASUS G814JVR/JIR", ALC245_FIXUP_CS35L41_SPI_2),
+-      SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC245_FIXUP_CS35L41_SPI_2),
++      SND_PCI_QUIRK(0x1043, 0x3a40, "ASUS G814JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+       SND_PCI_QUIRK(0x1043, 0x3a50, "ASUS G834JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x3a60, "ASUS G634JYR/JZR", ALC245_FIXUP_CS35L41_SPI_2),
+       SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC),
+-- 
+2.43.0
+
diff --git a/queue-6.1/alsa-hda-realtek-amend-g634-quirk-to-enable-rear-spe.patch b/queue-6.1/alsa-hda-realtek-amend-g634-quirk-to-enable-rear-spe.patch
new file mode 100644 (file)
index 0000000..ec068e4
--- /dev/null
@@ -0,0 +1,61 @@
+From 193fce1c0442d339a4594496de6174eba6cd2da6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Jul 2023 16:46:17 +1200
+Subject: ALSA: hda/realtek: Amend G634 quirk to enable rear speakers
+
+From: Luke D. Jones <luke@ljones.dev>
+
+[ Upstream commit b759a5f097cd42c666f1ebca8da50ff507435fbe ]
+
+Amends the last quirk for the G634 with 0x1caf subsys to enable the rear
+speakers via pincfg.
+
+Signed-off-by: Luke D. Jones <luke@ljones.dev>
+Link: https://lore.kernel.org/r/20230704044619.19343-4-luke@ljones.dev
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Stable-dep-of: 2be46155d792 ("ALSA: hda/realtek: Adjust G814JZR to use SPI init for amp")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index 4d7c23b20cce0..c08bf42c602dd 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -7207,6 +7207,7 @@ enum {
+       ALC285_FIXUP_SPEAKER2_TO_DAC1,
+       ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1,
+       ALC285_FIXUP_ASUS_HEADSET_MIC,
++      ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS,
+       ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1,
+       ALC285_FIXUP_ASUS_I2C_HEADSET_MIC,
+       ALC280_FIXUP_HP_HEADSET_MIC,
+@@ -8214,6 +8215,15 @@ static const struct hda_fixup alc269_fixups[] = {
+               .chained = true,
+               .chain_id = ALC285_FIXUP_ASUS_SPEAKER2_TO_DAC1
+       },
++      [ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS] = {
++              .type = HDA_FIXUP_PINS,
++              .v.pins = (const struct hda_pintbl[]) {
++                      { 0x14, 0x90170120 },
++                      { }
++              },
++              .chained = true,
++              .chain_id = ALC285_FIXUP_ASUS_HEADSET_MIC
++      },
+       [ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc285_fixup_speaker2_to_dac1,
+@@ -9860,7 +9870,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+       SND_PCI_QUIRK(0x1043, 0x1c23, "Asus X55U", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+       SND_PCI_QUIRK(0x1043, 0x1c62, "ASUS GU603", ALC289_FIXUP_ASUS_GA401),
+       SND_PCI_QUIRK(0x1043, 0x1c92, "ASUS ROG Strix G15", ALC285_FIXUP_ASUS_G533Z_PINS),
+-      SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_HEADSET_MIC),
++      SND_PCI_QUIRK(0x1043, 0x1caf, "ASUS G634JYR/JZR", ALC285_FIXUP_ASUS_SPI_REAR_SPEAKERS),
+       SND_PCI_QUIRK(0x1043, 0x1ccd, "ASUS X555UB", ALC256_FIXUP_ASUS_MIC),
+       SND_PCI_QUIRK(0x1043, 0x1d1f, "ASUS ROG Strix G17 2023 (G713PV)", ALC287_FIXUP_CS35L41_I2C_2),
+       SND_PCI_QUIRK(0x1043, 0x1d42, "ASUS Zephyrus G14 2022", ALC289_FIXUP_ASUS_GA401),
+-- 
+2.43.0
+
diff --git a/queue-6.1/alsa-jack-use-guard-for-locking.patch b/queue-6.1/alsa-jack-use-guard-for-locking.patch
new file mode 100644 (file)
index 0000000..be313f4
--- /dev/null
@@ -0,0 +1,91 @@
+From b616269fe522318d0428e7ee834c54c69e35cbec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 27 Feb 2024 09:52:52 +0100
+Subject: ALSA: jack: Use guard() for locking
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit 7234795b59f7b0b14569ec46dce56300a4988067 ]
+
+We can simplify the code gracefully with new guard() macro and co for
+automatic cleanup of locks.
+
+Only the code refactoring, and no functional changes.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Link: https://lore.kernel.org/r/20240227085306.9764-11-tiwai@suse.de
+Stable-dep-of: 495000a38634 ("ALSA: core: Remove debugfs at disconnection")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/core/jack.c | 25 +++++++------------------
+ 1 file changed, 7 insertions(+), 18 deletions(-)
+
+diff --git a/sound/core/jack.c b/sound/core/jack.c
+index 03d155ed362b4..191357d619131 100644
+--- a/sound/core/jack.c
++++ b/sound/core/jack.c
+@@ -42,11 +42,9 @@ static int snd_jack_dev_disconnect(struct snd_device *device)
+ #ifdef CONFIG_SND_JACK_INPUT_DEV
+       struct snd_jack *jack = device->device_data;
+-      mutex_lock(&jack->input_dev_lock);
+-      if (!jack->input_dev) {
+-              mutex_unlock(&jack->input_dev_lock);
++      guard(mutex)(&jack->input_dev_lock);
++      if (!jack->input_dev)
+               return 0;
+-      }
+       /* If the input device is registered with the input subsystem
+        * then we need to use a different deallocator. */
+@@ -55,7 +53,6 @@ static int snd_jack_dev_disconnect(struct snd_device *device)
+       else
+               input_free_device(jack->input_dev);
+       jack->input_dev = NULL;
+-      mutex_unlock(&jack->input_dev_lock);
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+       return 0;
+ }
+@@ -94,11 +91,9 @@ static int snd_jack_dev_register(struct snd_device *device)
+       snprintf(jack->name, sizeof(jack->name), "%s %s",
+                card->shortname, jack->id);
+-      mutex_lock(&jack->input_dev_lock);
+-      if (!jack->input_dev) {
+-              mutex_unlock(&jack->input_dev_lock);
++      guard(mutex)(&jack->input_dev_lock);
++      if (!jack->input_dev)
+               return 0;
+-      }
+       jack->input_dev->name = jack->name;
+@@ -123,7 +118,6 @@ static int snd_jack_dev_register(struct snd_device *device)
+       if (err == 0)
+               jack->registered = 1;
+-      mutex_unlock(&jack->input_dev_lock);
+       return err;
+ }
+ #endif /* CONFIG_SND_JACK_INPUT_DEV */
+@@ -588,14 +582,9 @@ EXPORT_SYMBOL(snd_jack_new);
+ void snd_jack_set_parent(struct snd_jack *jack, struct device *parent)
+ {
+       WARN_ON(jack->registered);
+-      mutex_lock(&jack->input_dev_lock);
+-      if (!jack->input_dev) {
+-              mutex_unlock(&jack->input_dev_lock);
+-              return;
+-      }
+-
+-      jack->input_dev->dev.parent = parent;
+-      mutex_unlock(&jack->input_dev_lock);
++      guard(mutex)(&jack->input_dev_lock);
++      if (jack->input_dev)
++              jack->input_dev->dev.parent = parent;
+ }
+ EXPORT_SYMBOL(snd_jack_set_parent);
+-- 
+2.43.0
+
diff --git a/queue-6.1/bpf-allow-delete-from-sockmap-sockhash-only-if-updat.patch b/queue-6.1/bpf-allow-delete-from-sockmap-sockhash-only-if-updat.patch
new file mode 100644 (file)
index 0000000..be319b6
--- /dev/null
@@ -0,0 +1,79 @@
+From f6ed41aa0ade7af390e68fff82815577e4ebfb83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 May 2024 13:20:07 +0200
+Subject: bpf: Allow delete from sockmap/sockhash only if update is allowed
+
+From: Jakub Sitnicki <jakub@cloudflare.com>
+
+[ Upstream commit 98e948fb60d41447fd8d2d0c3b8637fc6b6dc26d ]
+
+We have seen an influx of syzkaller reports where a BPF program attached to
+a tracepoint triggers a locking rule violation by performing a map_delete
+on a sockmap/sockhash.
+
+We don't intend to support this artificial use scenario. Extend the
+existing verifier allowed-program-type check for updating sockmap/sockhash
+to also cover deleting from a map.
+
+From now on only BPF programs which were previously allowed to update
+sockmap/sockhash can delete from these map types.
+
+Fixes: ff9105993240 ("bpf, sockmap: Prevent lock inversion deadlock in map delete elem")
+Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Reported-by: syzbot+ec941d6e24f633a59172@syzkaller.appspotmail.com
+Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: syzbot+ec941d6e24f633a59172@syzkaller.appspotmail.com
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=ec941d6e24f633a59172
+Link: https://lore.kernel.org/bpf/20240527-sockmap-verify-deletes-v1-1-944b372f2101@cloudflare.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 18b3f429abe17..1d851e2f48590 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6492,7 +6492,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+       enum bpf_attach_type eatype = env->prog->expected_attach_type;
+       enum bpf_prog_type type = resolve_prog_type(env->prog);
+-      if (func_id != BPF_FUNC_map_update_elem)
++      if (func_id != BPF_FUNC_map_update_elem &&
++          func_id != BPF_FUNC_map_delete_elem)
+               return false;
+       /* It's not possible to get access to a locked struct sock in these
+@@ -6503,6 +6504,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+               if (eatype == BPF_TRACE_ITER)
+                       return true;
+               break;
++      case BPF_PROG_TYPE_SOCK_OPS:
++              /* map_update allowed only via dedicated helpers with event type checks */
++              if (func_id == BPF_FUNC_map_delete_elem)
++                      return true;
++              break;
+       case BPF_PROG_TYPE_SOCKET_FILTER:
+       case BPF_PROG_TYPE_SCHED_CLS:
+       case BPF_PROG_TYPE_SCHED_ACT:
+@@ -6598,7 +6604,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+       case BPF_MAP_TYPE_SOCKMAP:
+               if (func_id != BPF_FUNC_sk_redirect_map &&
+                   func_id != BPF_FUNC_sock_map_update &&
+-                  func_id != BPF_FUNC_map_delete_elem &&
+                   func_id != BPF_FUNC_msg_redirect_map &&
+                   func_id != BPF_FUNC_sk_select_reuseport &&
+                   func_id != BPF_FUNC_map_lookup_elem &&
+@@ -6608,7 +6613,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+       case BPF_MAP_TYPE_SOCKHASH:
+               if (func_id != BPF_FUNC_sk_redirect_hash &&
+                   func_id != BPF_FUNC_sock_hash_update &&
+-                  func_id != BPF_FUNC_map_delete_elem &&
+                   func_id != BPF_FUNC_msg_redirect_hash &&
+                   func_id != BPF_FUNC_sk_select_reuseport &&
+                   func_id != BPF_FUNC_map_lookup_elem &&
+-- 
+2.43.0
+
diff --git a/queue-6.1/bpf-fix-potential-integer-overflow-in-resolve_btfids.patch b/queue-6.1/bpf-fix-potential-integer-overflow-in-resolve_btfids.patch
new file mode 100644 (file)
index 0000000..bae661d
--- /dev/null
@@ -0,0 +1,41 @@
+From 95379d3ca4746eb0b2df465e2b7ecdb7d5f0aedc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 May 2024 09:09:31 +0200
+Subject: bpf: Fix potential integer overflow in resolve_btfids
+
+From: Friedrich Vock <friedrich.vock@gmx.de>
+
+[ Upstream commit 44382b3ed6b2787710c8ade06c0e97f5970a47c8 ]
+
+err is a 32-bit integer, but elf_update returns an off_t, which is 64-bit
+at least on 64-bit platforms. If symbols_patch is called on a binary between
+2-4GB in size, the result will be negative when cast to a 32-bit integer,
+which the code assumes means an error occurred. This can wrongly trigger
+build failures when building very large kernel images.
+
+Fixes: fbbb68de80a4 ("bpf: Add resolve_btfids tool to resolve BTF IDs in ELF object")
+Signed-off-by: Friedrich Vock <friedrich.vock@gmx.de>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20240514070931.199694-1-friedrich.vock@gmx.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/bpf/resolve_btfids/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index ef0764d6891e4..82bffa7cf8659 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -728,7 +728,7 @@ static int sets_patch(struct object *obj)
+ static int symbols_patch(struct object *obj)
+ {
+-      int err;
++      off_t err;
+       if (__symbols_patch(obj, &obj->structs)  ||
+           __symbols_patch(obj, &obj->unions)   ||
+-- 
+2.43.0
+
diff --git a/queue-6.1/dma-buf-sw-sync-don-t-enable-irq-from-sync_print_obj.patch b/queue-6.1/dma-buf-sw-sync-don-t-enable-irq-from-sync_print_obj.patch
new file mode 100644 (file)
index 0000000..a946d4c
--- /dev/null
@@ -0,0 +1,55 @@
+From 90715ae323b1a8b6300e2112b3986da701c13d9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 May 2024 23:08:31 +0900
+Subject: dma-buf/sw-sync: don't enable IRQ from sync_print_obj()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit b794918961516f667b0c745aebdfebbb8a98df39 ]
+
+Since commit a6aa8fca4d79 ("dma-buf/sw-sync: Reduce irqsave/irqrestore from
+known context") by error replaced spin_unlock_irqrestore() with
+spin_unlock_irq() for both sync_debugfs_show() and sync_print_obj() despite
+sync_print_obj() is called from sync_debugfs_show(), lockdep complains
+inconsistent lock state warning.
+
+Use plain spin_{lock,unlock}() for sync_print_obj(), for
+sync_debugfs_show() is already using spin_{lock,unlock}_irq().
+
+Reported-by: syzbot <syzbot+a225ee3df7e7f9372dbe@syzkaller.appspotmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=a225ee3df7e7f9372dbe
+Fixes: a6aa8fca4d79 ("dma-buf/sw-sync: Reduce irqsave/irqrestore from known context")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/c2e46020-aaa6-4e06-bf73-f05823f913f0@I-love.SAKURA.ne.jp
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma-buf/sync_debug.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
+index 101394f16930f..237bce21d1e72 100644
+--- a/drivers/dma-buf/sync_debug.c
++++ b/drivers/dma-buf/sync_debug.c
+@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+       seq_printf(s, "%s: %d\n", obj->name, obj->value);
+-      spin_lock_irq(&obj->lock);
++      spin_lock(&obj->lock); /* Caller already disabled IRQ. */
+       list_for_each(pos, &obj->pt_list) {
+               struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+               sync_print_fence(s, &pt->base, false);
+       }
+-      spin_unlock_irq(&obj->lock);
++      spin_unlock(&obj->lock);
+ }
+ static void sync_print_sync_file(struct seq_file *s,
+-- 
+2.43.0
+
diff --git a/queue-6.1/dma-mapping-benchmark-fix-node-id-validation.patch b/queue-6.1/dma-mapping-benchmark-fix-node-id-validation.patch
new file mode 100644 (file)
index 0000000..86d7b73
--- /dev/null
@@ -0,0 +1,64 @@
+From fdd3bf1e30b6f114b435edeb14d4a31e7a2332f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 May 2024 14:47:03 +0300
+Subject: dma-mapping: benchmark: fix node id validation
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit 1ff05e723f7ca30644b8ec3fb093f16312e408ad ]
+
+While validating node ids in map_benchmark_ioctl(), node_possible() may
+be provided with invalid argument outside of [0,MAX_NUMNODES-1] range
+leading to:
+
+BUG: KASAN: wild-memory-access in map_benchmark_ioctl (kernel/dma/map_benchmark.c:214)
+Read of size 8 at addr 1fffffff8ccb6398 by task dma_map_benchma/971
+CPU: 7 PID: 971 Comm: dma_map_benchma Not tainted 6.9.0-rc6 #37
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
+Call Trace:
+ <TASK>
+dump_stack_lvl (lib/dump_stack.c:117)
+kasan_report (mm/kasan/report.c:603)
+kasan_check_range (mm/kasan/generic.c:189)
+variable_test_bit (arch/x86/include/asm/bitops.h:227) [inline]
+arch_test_bit (arch/x86/include/asm/bitops.h:239) [inline]
+_test_bit at (include/asm-generic/bitops/instrumented-non-atomic.h:142) [inline]
+node_state (include/linux/nodemask.h:423) [inline]
+map_benchmark_ioctl (kernel/dma/map_benchmark.c:214)
+full_proxy_unlocked_ioctl (fs/debugfs/file.c:333)
+__x64_sys_ioctl (fs/ioctl.c:890)
+do_syscall_64 (arch/x86/entry/common.c:83)
+entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
+
+Compare node ids with sane bounds first. NUMA_NO_NODE is considered a
+special valid case meaning that benchmarking kthreads won't be bound to a
+cpuset of a given node.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 65789daa8087 ("dma-mapping: add benchmark support for streaming DMA APIs")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/map_benchmark.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 0520a8f4fb1df..11ad1c43833d1 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -208,7 +208,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+               }
+               if (map->bparam.node != NUMA_NO_NODE &&
+-                  !node_possible(map->bparam.node)) {
++                  (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
++                   !node_possible(map->bparam.node))) {
+                       pr_err("invalid numa node\n");
+                       return -EINVAL;
+               }
+-- 
+2.43.0
+
diff --git a/queue-6.1/dma-mapping-benchmark-handle-numa_no_node-correctly.patch b/queue-6.1/dma-mapping-benchmark-handle-numa_no_node-correctly.patch
new file mode 100644 (file)
index 0000000..544733f
--- /dev/null
@@ -0,0 +1,70 @@
+From 8ab70bdd1109a6b8c583293aa774c988b735eba8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 May 2024 14:47:04 +0300
+Subject: dma-mapping: benchmark: handle NUMA_NO_NODE correctly
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit e64746e74f717961250a155e14c156616fcd981f ]
+
+cpumask_of_node() can be called for NUMA_NO_NODE inside do_map_benchmark()
+resulting in the following sanitizer report:
+
+UBSAN: array-index-out-of-bounds in ./arch/x86/include/asm/topology.h:72:28
+index -1 is out of range for type 'cpumask [64][1]'
+CPU: 1 PID: 990 Comm: dma_map_benchma Not tainted 6.9.0-rc6 #29
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
+Call Trace:
+ <TASK>
+dump_stack_lvl (lib/dump_stack.c:117)
+ubsan_epilogue (lib/ubsan.c:232)
+__ubsan_handle_out_of_bounds (lib/ubsan.c:429)
+cpumask_of_node (arch/x86/include/asm/topology.h:72) [inline]
+do_map_benchmark (kernel/dma/map_benchmark.c:104)
+map_benchmark_ioctl (kernel/dma/map_benchmark.c:246)
+full_proxy_unlocked_ioctl (fs/debugfs/file.c:333)
+__x64_sys_ioctl (fs/ioctl.c:890)
+do_syscall_64 (arch/x86/entry/common.c:83)
+entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
+
+Use cpumask_of_node() in place when binding a kernel thread to a cpuset
+of a particular node.
+
+Note that the provided node id is checked inside map_benchmark_ioctl().
+It's just a NUMA_NO_NODE case which is not handled properly later.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 65789daa8087 ("dma-mapping: add benchmark support for streaming DMA APIs")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Acked-by: Barry Song <baohua@kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/map_benchmark.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 11ad1c43833d1..af661734e8f90 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -101,7 +101,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+       struct task_struct **tsk;
+       int threads = map->bparam.threads;
+       int node = map->bparam.node;
+-      const cpumask_t *cpu_mask = cpumask_of_node(node);
+       u64 loops;
+       int ret = 0;
+       int i;
+@@ -122,7 +121,7 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+               }
+               if (node != NUMA_NO_NODE)
+-                      kthread_bind_mask(tsk[i], cpu_mask);
++                      kthread_bind_mask(tsk[i], cpumask_of_node(node));
+       }
+       /* clear the old value in the previous benchmark */
+-- 
+2.43.0
+
diff --git a/queue-6.1/drm-i915-guc-avoid-field_prep-warning.patch b/queue-6.1/drm-i915-guc-avoid-field_prep-warning.patch
new file mode 100644 (file)
index 0000000..f44ba01
--- /dev/null
@@ -0,0 +1,58 @@
+From 642d20fd11b3f1cb34ece6cd02dc0865b76a1e68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Apr 2024 09:48:09 -0700
+Subject: drm/i915/guc: avoid FIELD_PREP warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit d4f36db62396b73bed383c0b6e48d36278cafa78 ]
+
+With gcc-7 and earlier, there are lots of warnings like
+
+In file included from <command-line>:0:0:
+In function '__guc_context_policy_add_priority.isra.66',
+    inlined from '__guc_context_set_prio.isra.67' at drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3292:3,
+    inlined from 'guc_context_set_prio' at drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:3320:2:
+include/linux/compiler_types.h:399:38: error: call to '__compiletime_assert_631' declared with attribute error: FIELD_PREP: mask is not constant
+  _compiletime_assert(condition, msg, __compiletime_assert_, __COUNTER__)
+                                      ^
+...
+drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c:2422:3: note: in expansion of macro 'FIELD_PREP'
+   FIELD_PREP(GUC_KLV_0_KEY, GUC_CONTEXT_POLICIES_KLV_ID_##id) | \
+   ^~~~~~~~~~
+
+Make sure that GUC_KLV_0_KEY is an unsigned value to avoid the warning.
+
+Fixes: 77b6f79df66e ("drm/i915/guc: Update to GuC version 69.0.3")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Reviewed-by: Michal Wajdeczko <michal.wajdeczko@intel.com>
+Signed-off-by: Julia Filipchuk <julia.filipchuk@intel.com>
+Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20240430164809.482131-1-julia.filipchuk@intel.com
+(cherry picked from commit 364e039827ef628c650c21c1afe1c54d9c3296d9)
+Signed-off-by: Jani Nikula <jani.nikula@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+index 4a59478c3b5c4..bbeceb640d31e 100644
+--- a/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
++++ b/drivers/gpu/drm/i915/gt/uc/abi/guc_klvs_abi.h
+@@ -29,9 +29,9 @@
+  */
+ #define GUC_KLV_LEN_MIN                               1u
+-#define GUC_KLV_0_KEY                         (0xffff << 16)
+-#define GUC_KLV_0_LEN                         (0xffff << 0)
+-#define GUC_KLV_n_VALUE                               (0xffffffff << 0)
++#define GUC_KLV_0_KEY                         (0xffffu << 16)
++#define GUC_KLV_0_LEN                         (0xffffu << 0)
++#define GUC_KLV_n_VALUE                               (0xffffffffu << 0)
+ /**
+  * DOC: GuC Self Config KLVs
+-- 
+2.43.0
+
diff --git a/queue-6.1/enic-validate-length-of-nl-attributes-in-enic_set_vf.patch b/queue-6.1/enic-validate-length-of-nl-attributes-in-enic_set_vf.patch
new file mode 100644 (file)
index 0000000..d9502f9
--- /dev/null
@@ -0,0 +1,69 @@
+From 525ef4586b51056656f33fdbc88a2a03a39ebde0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 10:30:44 +0300
+Subject: enic: Validate length of nl attributes in enic_set_vf_port
+
+From: Roded Zats <rzats@paloaltonetworks.com>
+
+[ Upstream commit e8021b94b0412c37bcc79027c2e382086b6ce449 ]
+
+enic_set_vf_port assumes that the nl attribute IFLA_PORT_PROFILE
+is of length PORT_PROFILE_MAX and that the nl attributes
+IFLA_PORT_INSTANCE_UUID, IFLA_PORT_HOST_UUID are of length PORT_UUID_MAX.
+These attributes are validated (in the function do_setlink in rtnetlink.c)
+using the nla_policy ifla_port_policy. The policy defines IFLA_PORT_PROFILE
+as NLA_STRING, IFLA_PORT_INSTANCE_UUID as NLA_BINARY and
+IFLA_PORT_HOST_UUID as NLA_STRING. That means that the length validation
+using the policy is for the max size of the attributes and not on exact
+size so the length of these attributes might be less than the sizes that
+enic_set_vf_port expects. This might cause an out of bands
+read access in the memcpys of the data of these
+attributes in enic_set_vf_port.
+
+Fixes: f8bd909183ac ("net: Add ndo_{set|get}_vf_port support for enic dynamic vnics")
+Signed-off-by: Roded Zats <rzats@paloaltonetworks.com>
+Link: https://lore.kernel.org/r/20240522073044.33519-1-rzats@paloaltonetworks.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cisco/enic/enic_main.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index 29500d32e3626..2065c26f394db 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
+       pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+       if (port[IFLA_PORT_PROFILE]) {
++              if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
++                      memcpy(pp, &prev_pp, sizeof(*pp));
++                      return -EINVAL;
++              }
+               pp->set |= ENIC_SET_NAME;
+               memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
+                       PORT_PROFILE_MAX);
+       }
+       if (port[IFLA_PORT_INSTANCE_UUID]) {
++              if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
++                      memcpy(pp, &prev_pp, sizeof(*pp));
++                      return -EINVAL;
++              }
+               pp->set |= ENIC_SET_INSTANCE;
+               memcpy(pp->instance_uuid,
+                       nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
+       }
+       if (port[IFLA_PORT_HOST_UUID]) {
++              if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
++                      memcpy(pp, &prev_pp, sizeof(*pp));
++                      return -EINVAL;
++              }
+               pp->set |= ENIC_SET_HOST;
+               memcpy(pp->host_uuid,
+                       nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
+-- 
+2.43.0
+
diff --git a/queue-6.1/hwmon-shtc1-fix-property-misspelling.patch b/queue-6.1/hwmon-shtc1-fix-property-misspelling.patch
new file mode 100644 (file)
index 0000000..e7380d4
--- /dev/null
@@ -0,0 +1,36 @@
+From a3a88bf4aca0a80d41fcd83a62760d8a5a9ef997 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 08:20:14 -0700
+Subject: hwmon: (shtc1) Fix property misspelling
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 52a2c70c3ec555e670a34dd1ab958986451d2dd2 ]
+
+The property name is "sensirion,low-precision", not
+"sensicon,low-precision".
+
+Cc: Chris Ruehl <chris.ruehl@gtsys.com.hk>
+Fixes: be7373b60df5 ("hwmon: shtc1: add support for device tree bindings")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/shtc1.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
+index 18546ebc8e9f7..0365643029aee 100644
+--- a/drivers/hwmon/shtc1.c
++++ b/drivers/hwmon/shtc1.c
+@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
+       if (np) {
+               data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
+-              data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
++              data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
+       } else {
+               if (client->dev.platform_data)
+                       data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+-- 
+2.43.0
+
diff --git a/queue-6.1/ice-fix-accounting-if-a-vlan-already-exists.patch b/queue-6.1/ice-fix-accounting-if-a-vlan-already-exists.patch
new file mode 100644 (file)
index 0000000..7cdacd5
--- /dev/null
@@ -0,0 +1,74 @@
+From 33c7022f0797713236ebcb578c874bc4ef519320 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 10:45:30 -0700
+Subject: ice: fix accounting if a VLAN already exists
+
+From: Jacob Keller <jacob.e.keller@intel.com>
+
+[ Upstream commit 82617b9a04649e83ee8731918aeadbb6e6d7cbc7 ]
+
+The ice_vsi_add_vlan() function is used to add a VLAN filter for the target
+VSI. This function prepares a filter in the switch table for the given VSI.
+If it succeeds, the vsi->num_vlan counter is incremented.
+
+It is not considered an error to add a VLAN which already exists in the
+switch table, so the function explicitly checks and ignores -EEXIST. The
+vsi->num_vlan counter is still incremented.
+
+This seems incorrect, as it means we can double-count in the case where the
+same VLAN is added twice by the caller. The actual table will have one less
+filter than the count.
+
+The ice_vsi_del_vlan() function similarly checks and handles the -ENOENT
+condition for when deleting a filter that doesn't exist. This flow only
+decrements the vsi->num_vlan if it actually deleted a filter.
+
+The vsi->num_vlan counter is used only in a few places, primarily related
+to tracking the number of non-zero VLANs. If the vsi->num_vlans gets out of
+sync, then ice_vsi_num_non_zero_vlans() will incorrectly report more VLANs
+than are present, and ice_vsi_has_non_zero_vlans() could return true
+potentially in cases where there are only VLAN 0 filters left.
+
+Fix this by only incrementing the vsi->num_vlan in the case where we
+actually added an entry, and not in the case where the entry already
+existed.
+
+Fixes: a1ffafb0b4a4 ("ice: Support configuring the device to Double VLAN Mode")
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Pucha Himasekhar Reddy <himasekharx.reddy.pucha@intel.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Link: https://lore.kernel.org/r/20240523-net-2024-05-23-intel-net-fixes-v1-2-17a923e0bb5f@intel.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+index 239266e9d5f12..80c16e04f6702 100644
+--- a/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
++++ b/drivers/net/ethernet/intel/ice/ice_vsi_vlan_lib.c
+@@ -45,14 +45,15 @@ int ice_vsi_add_vlan(struct ice_vsi *vsi, struct ice_vlan *vlan)
+               return -EINVAL;
+       err = ice_fltr_add_vlan(vsi, vlan);
+-      if (err && err != -EEXIST) {
++      if (!err)
++              vsi->num_vlan++;
++      else if (err == -EEXIST)
++              err = 0;
++      else
+               dev_err(ice_pf_to_dev(vsi->back), "Failure Adding VLAN %d on VSI %i, status %d\n",
+                       vlan->vid, vsi->vsi_num, err);
+-              return err;
+-      }
+-      vsi->num_vlan++;
+-      return 0;
++      return err;
+ }
+ /**
+-- 
+2.43.0
+
diff --git a/queue-6.1/ipvlan-dont-use-skb-sk-in-ipvlan_process_v-4-6-_outb.patch b/queue-6.1/ipvlan-dont-use-skb-sk-in-ipvlan_process_v-4-6-_outb.patch
new file mode 100644 (file)
index 0000000..3c1c9e4
--- /dev/null
@@ -0,0 +1,107 @@
+From 3da23435e1b34cedc029e92ea95ad6016f856475 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 17:56:33 +0800
+Subject: ipvlan: Dont Use skb->sk in ipvlan_process_v{4,6}_outbound
+
+From: Yue Haibing <yuehaibing@huawei.com>
+
+[ Upstream commit b3dc6e8003b500861fa307e9a3400c52e78e4d3a ]
+
+Raw packet from PF_PACKET socket ontop of an IPv6-backed ipvlan device will
+hit WARN_ON_ONCE() in sk_mc_loop() through sch_direct_xmit() path.
+
+WARNING: CPU: 2 PID: 0 at net/core/sock.c:775 sk_mc_loop+0x2d/0x70
+Modules linked in: sch_netem ipvlan rfkill cirrus drm_shmem_helper sg drm_kms_helper
+CPU: 2 PID: 0 Comm: swapper/2 Kdump: loaded Not tainted 6.9.0+ #279
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
+RIP: 0010:sk_mc_loop+0x2d/0x70
+Code: fa 0f 1f 44 00 00 65 0f b7 15 f7 96 a3 4f 31 c0 66 85 d2 75 26 48 85 ff 74 1c
+RSP: 0018:ffffa9584015cd78 EFLAGS: 00010212
+RAX: 0000000000000011 RBX: ffff91e585793e00 RCX: 0000000002c6a001
+RDX: 0000000000000000 RSI: 0000000000000040 RDI: ffff91e589c0f000
+RBP: ffff91e5855bd100 R08: 0000000000000000 R09: 3d00545216f43d00
+R10: ffff91e584fdcc50 R11: 00000060dd8616f4 R12: ffff91e58132d000
+R13: ffff91e584fdcc68 R14: ffff91e5869ce800 R15: ffff91e589c0f000
+FS:  0000000000000000(0000) GS:ffff91e898100000(0000) knlGS:0000000000000000
+CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f788f7c44c0 CR3: 0000000008e1a000 CR4: 00000000000006f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+<IRQ>
+ ? __warn (kernel/panic.c:693)
+ ? sk_mc_loop (net/core/sock.c:760)
+ ? report_bug (lib/bug.c:201 lib/bug.c:219)
+ ? handle_bug (arch/x86/kernel/traps.c:239)
+ ? exc_invalid_op (arch/x86/kernel/traps.c:260 (discriminator 1))
+ ? asm_exc_invalid_op (./arch/x86/include/asm/idtentry.h:621)
+ ? sk_mc_loop (net/core/sock.c:760)
+ ip6_finish_output2 (net/ipv6/ip6_output.c:83 (discriminator 1))
+ ? nf_hook_slow (net/netfilter/core.c:626)
+ ip6_finish_output (net/ipv6/ip6_output.c:222)
+ ? __pfx_ip6_finish_output (net/ipv6/ip6_output.c:215)
+ ipvlan_xmit_mode_l3 (drivers/net/ipvlan/ipvlan_core.c:602) ipvlan
+ ipvlan_start_xmit (drivers/net/ipvlan/ipvlan_main.c:226) ipvlan
+ dev_hard_start_xmit (net/core/dev.c:3594)
+ sch_direct_xmit (net/sched/sch_generic.c:343)
+ __qdisc_run (net/sched/sch_generic.c:416)
+ net_tx_action (net/core/dev.c:5286)
+ handle_softirqs (kernel/softirq.c:555)
+ __irq_exit_rcu (kernel/softirq.c:589)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1043)
+
+The warning triggers as this:
+packet_sendmsg
+   packet_snd //skb->sk is packet sk
+      __dev_queue_xmit
+         __dev_xmit_skb //q->enqueue is not NULL
+             __qdisc_run
+               sch_direct_xmit
+                 dev_hard_start_xmit
+                   ipvlan_start_xmit
+                      ipvlan_xmit_mode_l3 //l3 mode
+                        ipvlan_process_outbound //vepa flag
+                          ipvlan_process_v6_outbound
+                            ip6_local_out
+                                __ip6_finish_output
+                                  ip6_finish_output2 //multicast packet
+                                    sk_mc_loop //sk->sk_family is AF_PACKET
+
+Call ip{6}_local_out() with NULL sk in ipvlan as other tunnels to fix this.
+
+Fixes: 2ad7bf363841 ("ipvlan: Initial check-in of the IPVLAN driver.")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Yue Haibing <yuehaibing@huawei.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20240529095633.613103-1-yuehaibing@huawei.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index d447f3076e24a..1d49771d07f4c 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -439,7 +439,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+       memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+-      err = ip_local_out(net, skb->sk, skb);
++      err = ip_local_out(net, NULL, skb);
+       if (unlikely(net_xmit_eval(err)))
+               DEV_STATS_INC(dev, tx_errors);
+       else
+@@ -494,7 +494,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+       memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+-      err = ip6_local_out(dev_net(dev), skb->sk, skb);
++      err = ip6_local_out(dev_net(dev), NULL, skb);
+       if (unlikely(net_xmit_eval(err)))
+               DEV_STATS_INC(dev, tx_errors);
+       else
+-- 
+2.43.0
+
diff --git a/queue-6.1/kconfig-fix-comparison-to-constant-symbols-m-n.patch b/queue-6.1/kconfig-fix-comparison-to-constant-symbols-m-n.patch
new file mode 100644 (file)
index 0000000..7c6bdd3
--- /dev/null
@@ -0,0 +1,128 @@
+From d779856f49330eba0feeae90ffb522f2f564e9b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 May 2024 18:22:27 +0900
+Subject: kconfig: fix comparison to constant symbols, 'm', 'n'
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit aabdc960a283ba78086b0bf66ee74326f49e218e ]
+
+Currently, comparisons to 'm' or 'n' result in incorrect output.
+
+[Test Code]
+
+    config MODULES
+            def_bool y
+            modules
+
+    config A
+            def_tristate m
+
+    config B
+            def_bool A > n
+
+CONFIG_B is unset, while CONFIG_B=y is expected.
+
+The reason for the issue is because Kconfig compares the tristate values
+as strings.
+
+Currently, the .type fields in the constant symbol definitions,
+symbol_{yes,mod,no} are unspecified, i.e., S_UNKNOWN.
+
+When expr_calc_value() evaluates 'A > n', it checks the types of 'A' and
+'n' to determine how to compare them.
+
+The left-hand side, 'A', is a tristate symbol with a value of 'm', which
+corresponds to a numeric value of 1. (Internally, 'y', 'm', and 'n' are
+represented as 2, 1, and 0, respectively.)
+
+The right-hand side, 'n', has an unknown type, so it is treated as the
+string "n" during the comparison.
+
+expr_calc_value() compares two values numerically only when both can
+have numeric values. Otherwise, they are compared as strings.
+
+    symbol    numeric value    ASCII code
+    -------------------------------------
+      y           2             0x79
+      m           1             0x6d
+      n           0             0x6e
+
+'m' is greater than 'n' if compared numerically (since 1 is greater
+than 0), but smaller than 'n' if compared as strings (since the ASCII
+code 0x6d is smaller than 0x6e).
+
+Specifying .type=S_TRISTATE for symbol_{yes,mod,no} fixes the above
+test code.
+
+Doing so, however, would cause a regression to the following test code.
+
+[Test Code 2]
+
+    config MODULES
+            def_bool n
+            modules
+
+    config A
+            def_tristate n
+
+    config B
+            def_bool A = m
+
+You would get CONFIG_B=y, while CONFIG_B should not be set.
+
+The reason is because sym_get_string_value() turns 'm' into 'n' when the
+module feature is disabled. Consequently, expr_calc_value() evaluates
+'A = n' instead of 'A = m'. This oddity has been hidden because the type
+of 'm' was previously S_UNKNOWN instead of S_TRISTATE.
+
+sym_get_string_value() should not tweak the string because the tristate
+value has already been correctly calculated. There is no reason to
+return the string "n" where its tristate value is mod.
+
+Fixes: 31847b67bec0 ("kconfig: allow use of relations other than (in)equality")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/kconfig/symbol.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index a76925b46ce63..7b1df55b01767 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -13,18 +13,21 @@
+ struct symbol symbol_yes = {
+       .name = "y",
++      .type = S_TRISTATE,
+       .curr = { "y", yes },
+       .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+ struct symbol symbol_mod = {
+       .name = "m",
++      .type = S_TRISTATE,
+       .curr = { "m", mod },
+       .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+ struct symbol symbol_no = {
+       .name = "n",
++      .type = S_TRISTATE,
+       .curr = { "n", no },
+       .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+@@ -775,8 +778,7 @@ const char *sym_get_string_value(struct symbol *sym)
+               case no:
+                       return "n";
+               case mod:
+-                      sym_calc_value(modules_sym);
+-                      return (modules_sym->curr.tri == no) ? "n" : "m";
++                      return "m";
+               case yes:
+                       return "y";
+               }
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-dsa-microchip-fix-rgmii-error-in-ksz-dsa-driver.patch b/queue-6.1/net-dsa-microchip-fix-rgmii-error-in-ksz-dsa-driver.patch
new file mode 100644 (file)
index 0000000..39427e8
--- /dev/null
@@ -0,0 +1,39 @@
+From 05cc87b631f7785f475f93321b3d20395680d2a6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 14:34:26 -0700
+Subject: net: dsa: microchip: fix RGMII error in KSZ DSA driver
+
+From: Tristram Ha <tristram.ha@microchip.com>
+
+[ Upstream commit 278d65ccdadb5f0fa0ceaf7b9cc97b305cd72822 ]
+
+The driver should return RMII interface when XMII is running in RMII mode.
+
+Fixes: 0ab7f6bf1675 ("net: dsa: microchip: ksz9477: use common xmii function")
+Signed-off-by: Tristram Ha <tristram.ha@microchip.com>
+Acked-by: Arun Ramadoss <arun.ramadoss@microchip.com>
+Acked-by: Jerry Ray <jerry.ray@microchip.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/1716932066-3342-1-git-send-email-Tristram.Ha@microchip.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/dsa/microchip/ksz_common.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c
+index dc9eea3c8ab16..f9f43897f86c1 100644
+--- a/drivers/net/dsa/microchip/ksz_common.c
++++ b/drivers/net/dsa/microchip/ksz_common.c
+@@ -2540,7 +2540,7 @@ phy_interface_t ksz_get_xmii(struct ksz_device *dev, int port, bool gbit)
+               else
+                       interface = PHY_INTERFACE_MODE_MII;
+       } else if (val == bitval[P_RMII_SEL]) {
+-              interface = PHY_INTERFACE_MODE_RGMII;
++              interface = PHY_INTERFACE_MODE_RMII;
+       } else {
+               interface = PHY_INTERFACE_MODE_RGMII;
+               if (data8 & P_RGMII_ID_EG_ENABLE)
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-ena-add-dynamic-recycling-mechanism-for-rx-buffe.patch b/queue-6.1/net-ena-add-dynamic-recycling-mechanism-for-rx-buffe.patch
new file mode 100644 (file)
index 0000000..260e3c9
--- /dev/null
@@ -0,0 +1,429 @@
+From 9590f568c220f3533f0c1bdf47f56fd90385f1f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 12:14:48 +0000
+Subject: net: ena: Add dynamic recycling mechanism for rx buffers
+
+From: David Arinzon <darinzon@amazon.com>
+
+[ Upstream commit f7d625adeb7bc6a9ec83d32d9615889969d64484 ]
+
+The current implementation allocates page-sized rx buffers.
+As traffic may consist of different types and sizes of packets,
+in various cases, buffers are not fully used.
+
+This change (Dynamic RX Buffers - DRB) uses part of the allocated rx
+page needed for the incoming packet, and returns the rest of the
+unused page to be used again as an rx buffer for future packets.
+A threshold of 2K for unused space has been set in order to declare
+whether the remainder of the page can be reused again as an rx buffer.
+
+As a page may be reused, dma_sync_single_for_cpu() is added in order
+to sync the memory to the CPU side after it was owned by the HW.
+In addition, when the rx page can no longer be reused, it is being
+unmapped using dma_page_unmap(), which implicitly syncs and then
+unmaps the entire page. In case the kernel still handles the skbs
+pointing to the previous buffers from that rx page, it may access
+garbage pointers, caused by the implicit sync overwriting them.
+The implicit dma sync is removed by replacing dma_page_unmap() with
+dma_unmap_page_attrs() with DMA_ATTR_SKIP_CPU_SYNC flag.
+
+The functionality is disabled for XDP traffic to avoid handling
+several descriptors per packet.
+
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: Shay Agroskin <shayagr@amazon.com>
+Signed-off-by: David Arinzon <darinzon@amazon.com>
+Link: https://lore.kernel.org/r/20230612121448.28829-1-darinzon@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2dc8b1e7177d ("net: ena: Fix redundant device NUMA node override")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../device_drivers/ethernet/amazon/ena.rst    |  32 +++++
+ .../net/ethernet/amazon/ena/ena_admin_defs.h  |   6 +-
+ drivers/net/ethernet/amazon/ena/ena_netdev.c  | 136 ++++++++++++------
+ drivers/net/ethernet/amazon/ena/ena_netdev.h  |   4 +
+ 4 files changed, 136 insertions(+), 42 deletions(-)
+
+diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+index 8bcb173e0353f..4914926776326 100644
+--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
++++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+@@ -205,6 +205,7 @@ Adaptive coalescing can be switched on/off through `ethtool(8)`'s
+ More information about Adaptive Interrupt Moderation (DIM) can be found in
+ Documentation/networking/net_dim.rst
++.. _`RX copybreak`:
+ RX copybreak
+ ============
+ The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
+@@ -315,3 +316,34 @@ Rx
+ - The new SKB is updated with the necessary information (protocol,
+   checksum hw verify result, etc), and then passed to the network
+   stack, using the NAPI interface function :code:`napi_gro_receive()`.
++
++Dynamic RX Buffers (DRB)
++------------------------
++
++Each RX descriptor in the RX ring is a single memory page (which is either 4KB
++or 16KB long depending on system's configurations).
++To reduce the memory allocations required when dealing with a high rate of small
++packets, the driver tries to reuse the remaining RX descriptor's space if more
++than 2KB of this page remain unused.
++
++A simple example of this mechanism is the following sequence of events:
++
++::
++
++        1. Driver allocates page-sized RX buffer and passes it to hardware
++                +----------------------+
++                |4KB RX Buffer         |
++                +----------------------+
++
++        2. A 300Bytes packet is received on this buffer
++
++        3. The driver increases the ref count on this page and returns it back to
++           HW as an RX buffer of size 4KB - 300Bytes = 3796 Bytes
++               +----+--------------------+
++               |****|3796 Bytes RX Buffer|
++               +----+--------------------+
++
++This mechanism isn't used when an XDP program is loaded, or when the
++RX packet is less than rx_copybreak bytes (in which case the packet is
++copied out of the RX buffer into the linear part of a new skb allocated
++for it and the RX buffer remains the same size, see `RX copybreak`_).
+diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+index 466ad9470d1f4..6de0d590be34f 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
++++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+@@ -869,7 +869,9 @@ struct ena_admin_host_info {
+        * 2 : interrupt_moderation
+        * 3 : rx_buf_mirroring
+        * 4 : rss_configurable_function_key
+-       * 31:5 : reserved
++       * 5 : reserved
++       * 6 : rx_page_reuse
++       * 31:7 : reserved
+        */
+       u32 driver_supported_features;
+ };
+@@ -1184,6 +1186,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
+ #define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK           BIT(3)
+ #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
+ #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
++#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT             6
++#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK              BIT(6)
+ /* aenq_common_desc */
+ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK               BIT(0)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 5e37b18ac3adf..6ebe0ac8e53e9 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1022,7 +1022,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+       int tailroom;
+       /* restore page offset value in case it has been changed by device */
+-      rx_info->page_offset = headroom;
++      rx_info->buf_offset = headroom;
+       /* if previous allocated page is not used */
+       if (unlikely(rx_info->page))
+@@ -1039,6 +1039,8 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+       tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+       rx_info->page = page;
++      rx_info->dma_addr = dma;
++      rx_info->page_offset = 0;
+       ena_buf = &rx_info->ena_buf;
+       ena_buf->paddr = dma + headroom;
+       ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
+@@ -1046,14 +1048,12 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+       return 0;
+ }
+-static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
+-                            struct ena_rx_buffer *rx_info)
++static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
++                                  struct ena_rx_buffer *rx_info,
++                                  unsigned long attrs)
+ {
+-      struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+-
+-      dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
+-                     ENA_PAGE_SIZE,
+-                     DMA_BIDIRECTIONAL);
++      dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
++                           DMA_BIDIRECTIONAL, attrs);
+ }
+ static void ena_free_rx_page(struct ena_ring *rx_ring,
+@@ -1067,7 +1067,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
+               return;
+       }
+-      ena_unmap_rx_buff(rx_ring, rx_info);
++      ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0);
+       __free_page(page);
+       rx_info->page = NULL;
+@@ -1413,14 +1413,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+       return tx_pkts;
+ }
+-static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
++static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len)
+ {
+       struct sk_buff *skb;
+       if (!first_frag)
+-              skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
++              skb = napi_alloc_skb(rx_ring->napi, len);
+       else
+-              skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
++              skb = napi_build_skb(first_frag, len);
+       if (unlikely(!skb)) {
+               ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
+@@ -1429,24 +1429,47 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
+               netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
+                         "Failed to allocate skb. first_frag %s\n",
+                         first_frag ? "provided" : "not provided");
+-              return NULL;
+       }
+       return skb;
+ }
++static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len,
++                                    u16 len, int pkt_offset)
++{
++      struct ena_com_buf *ena_buf = &rx_info->ena_buf;
++
++      /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
++       * for data + headroom + tailroom.
++       */
++      if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) {
++              page_ref_inc(rx_info->page);
++              rx_info->page_offset += buf_len;
++              ena_buf->paddr += buf_len;
++              ena_buf->len -= buf_len;
++              return true;
++      }
++
++      return false;
++}
++
+ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+                                 struct ena_com_rx_buf_info *ena_bufs,
+                                 u32 descs,
+                                 u16 *next_to_clean)
+ {
++      int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++      bool is_xdp_loaded = ena_xdp_present_ring(rx_ring);
+       struct ena_rx_buffer *rx_info;
+       struct ena_adapter *adapter;
++      int page_offset, pkt_offset;
++      dma_addr_t pre_reuse_paddr;
+       u16 len, req_id, buf = 0;
++      bool reuse_rx_buf_page;
+       struct sk_buff *skb;
+-      void *page_addr;
+-      u32 page_offset;
+-      void *data_addr;
++      void *buf_addr;
++      int buf_offset;
++      u16 buf_len;
+       len = ena_bufs[buf].len;
+       req_id = ena_bufs[buf].req_id;
+@@ -1466,34 +1489,30 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+                 "rx_info %p page %p\n",
+                 rx_info, rx_info->page);
+-      /* save virt address of first buffer */
+-      page_addr = page_address(rx_info->page);
++      buf_offset = rx_info->buf_offset;
++      pkt_offset = buf_offset - rx_ring->rx_headroom;
+       page_offset = rx_info->page_offset;
+-      data_addr = page_addr + page_offset;
+-
+-      prefetch(data_addr);
++      buf_addr = page_address(rx_info->page) + page_offset;
+       if (len <= rx_ring->rx_copybreak) {
+-              skb = ena_alloc_skb(rx_ring, NULL);
++              skb = ena_alloc_skb(rx_ring, NULL, len);
+               if (unlikely(!skb))
+                       return NULL;
+-              netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+-                        "RX allocated small packet. len %d. data_len %d\n",
+-                        skb->len, skb->data_len);
+-
+               /* sync this buffer for CPU use */
+               dma_sync_single_for_cpu(rx_ring->dev,
+-                                      dma_unmap_addr(&rx_info->ena_buf, paddr),
++                                      dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+                                       len,
+                                       DMA_FROM_DEVICE);
+-              skb_copy_to_linear_data(skb, data_addr, len);
++              skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
+               dma_sync_single_for_device(rx_ring->dev,
+-                                         dma_unmap_addr(&rx_info->ena_buf, paddr),
++                                         dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+                                          len,
+                                          DMA_FROM_DEVICE);
+               skb_put(skb, len);
++              netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
++                        "RX allocated small packet. len %d.\n", skb->len);
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+               rx_ring->free_ids[*next_to_clean] = req_id;
+               *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
+@@ -1501,14 +1520,28 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+               return skb;
+       }
+-      ena_unmap_rx_buff(rx_ring, rx_info);
++      buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
++
++      pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
++
++      /* If XDP isn't loaded try to reuse part of the RX buffer */
++      reuse_rx_buf_page = !is_xdp_loaded &&
++                          ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
+-      skb = ena_alloc_skb(rx_ring, page_addr);
++      dma_sync_single_for_cpu(rx_ring->dev,
++                              pre_reuse_paddr + pkt_offset,
++                              len,
++                              DMA_FROM_DEVICE);
++
++      if (!reuse_rx_buf_page)
++              ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
++
++      skb = ena_alloc_skb(rx_ring, buf_addr, buf_len);
+       if (unlikely(!skb))
+               return NULL;
+       /* Populate skb's linear part */
+-      skb_reserve(skb, page_offset);
++      skb_reserve(skb, buf_offset);
+       skb_put(skb, len);
+       skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+@@ -1517,7 +1550,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+                         "RX skb updated. len %d. data_len %d\n",
+                         skb->len, skb->data_len);
+-              rx_info->page = NULL;
++              if (!reuse_rx_buf_page)
++                      rx_info->page = NULL;
+               rx_ring->free_ids[*next_to_clean] = req_id;
+               *next_to_clean =
+@@ -1532,10 +1566,28 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+               rx_info = &rx_ring->rx_buffer_info[req_id];
+-              ena_unmap_rx_buff(rx_ring, rx_info);
++              /* rx_info->buf_offset includes rx_ring->rx_headroom */
++              buf_offset = rx_info->buf_offset;
++              pkt_offset = buf_offset - rx_ring->rx_headroom;
++              buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
++              page_offset = rx_info->page_offset;
++
++              pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
++
++              reuse_rx_buf_page = !is_xdp_loaded &&
++                                  ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
++
++              dma_sync_single_for_cpu(rx_ring->dev,
++                                      pre_reuse_paddr + pkt_offset,
++                                      len,
++                                      DMA_FROM_DEVICE);
++
++              if (!reuse_rx_buf_page)
++                      ena_unmap_rx_buff_attrs(rx_ring, rx_info,
++                                              DMA_ATTR_SKIP_CPU_SYNC);
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+-                              rx_info->page_offset, len, ENA_PAGE_SIZE);
++                              page_offset + buf_offset, len, buf_len);
+       } while (1);
+@@ -1641,14 +1693,14 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u
+       rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+       xdp_prepare_buff(xdp, page_address(rx_info->page),
+-                       rx_info->page_offset,
++                       rx_info->buf_offset,
+                        rx_ring->ena_bufs[0].len, false);
+       ret = ena_xdp_execute(rx_ring, xdp);
+       /* The xdp program might expand the headers */
+       if (ret == ENA_XDP_PASS) {
+-              rx_info->page_offset = xdp->data - xdp->data_hard_start;
++              rx_info->buf_offset = xdp->data - xdp->data_hard_start;
+               rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
+       }
+@@ -1703,7 +1755,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+               /* First descriptor might have an offset set by the device */
+               rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+-              rx_info->page_offset += ena_rx_ctx.pkt_offset;
++              rx_info->buf_offset += ena_rx_ctx.pkt_offset;
+               netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+                         "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
+@@ -1733,8 +1785,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+                                * from RX side.
+                                */
+                               if (xdp_verdict & ENA_XDP_FORWARDED) {
+-                                      ena_unmap_rx_buff(rx_ring,
+-                                                        &rx_ring->rx_buffer_info[req_id]);
++                                      ena_unmap_rx_buff_attrs(rx_ring,
++                                                              &rx_ring->rx_buffer_info[req_id],
++                                                              0);
+                                       rx_ring->rx_buffer_info[req_id].page = NULL;
+                               }
+                       }
+@@ -3216,7 +3269,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
+               ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
+               ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
+               ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
+-              ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
++              ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
++              ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+       rc = ena_com_set_host_attributes(ena_dev);
+       if (rc) {
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index 2cb141079474c..73bfd7229c6aa 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -51,6 +51,8 @@
+ #define ENA_DEFAULT_RING_SIZE (1024)
+ #define ENA_MIN_RING_SIZE     (256)
++#define ENA_MIN_RX_BUF_SIZE (2048)
++
+ #define ENA_MIN_NUM_IO_QUEUES (1)
+ #define ENA_TX_WAKEUP_THRESH          (MAX_SKB_FRAGS + 2)
+@@ -175,7 +177,9 @@ struct ena_tx_buffer {
+ struct ena_rx_buffer {
+       struct sk_buff *skb;
+       struct page *page;
++      dma_addr_t dma_addr;
+       u32 page_offset;
++      u32 buf_offset;
+       struct ena_com_buf ena_buf;
+ } ____cacheline_aligned;
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-ena-fix-redundant-device-numa-node-override.patch b/queue-6.1/net-ena-fix-redundant-device-numa-node-override.patch
new file mode 100644 (file)
index 0000000..ff3c6c2
--- /dev/null
@@ -0,0 +1,85 @@
+From 2a405a6dcb1ea71cd59b4a6d4d9820853b41366c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 20:09:12 +0300
+Subject: net: ena: Fix redundant device NUMA node override
+
+From: Shay Agroskin <shayagr@amazon.com>
+
+[ Upstream commit 2dc8b1e7177d4f49f492ce648440caf2de0c3616 ]
+
+The driver overrides the NUMA node id of the device regardless of
+whether it knows its correct value (often setting it to -1 even though
+the node id is advertised in 'struct device'). This can lead to
+suboptimal configurations.
+
+This patch fixes this behavior and makes the shared memory allocation
+functions use the NUMA node id advertised by the underlying device.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Shay Agroskin <shayagr@amazon.com>
+Link: https://lore.kernel.org/r/20240528170912.1204417-1-shayagr@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index e733419dd3f49..276f6a8631fb1 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+                             struct ena_com_io_sq *io_sq)
+ {
+       size_t size;
+-      int dev_node = 0;
+       memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+       size = io_sq->desc_entry_size * io_sq->q_depth;
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+-              dev_node = dev_to_node(ena_dev->dmadev);
+-              set_dev_node(ena_dev->dmadev, ctx->numa_node);
+               io_sq->desc_addr.virt_addr =
+                       dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
+                                          GFP_KERNEL);
+-              set_dev_node(ena_dev->dmadev, dev_node);
+               if (!io_sq->desc_addr.virt_addr) {
+                       io_sq->desc_addr.virt_addr =
+                               dma_alloc_coherent(ena_dev->dmadev, size,
+@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+               size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+                       io_sq->bounce_buf_ctrl.buffers_num;
+-              dev_node = dev_to_node(ena_dev->dmadev);
+-              set_dev_node(ena_dev->dmadev, ctx->numa_node);
+               io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+-              set_dev_node(ena_dev->dmadev, dev_node);
+               if (!io_sq->bounce_buf_ctrl.base_buffer)
+                       io_sq->bounce_buf_ctrl.base_buffer =
+                               devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+                             struct ena_com_io_cq *io_cq)
+ {
+       size_t size;
+-      int prev_node = 0;
+       memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+       size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+-      prev_node = dev_to_node(ena_dev->dmadev);
+-      set_dev_node(ena_dev->dmadev, ctx->numa_node);
+       io_cq->cdesc_addr.virt_addr =
+               dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+-      set_dev_node(ena_dev->dmadev, prev_node);
+       if (!io_cq->cdesc_addr.virt_addr) {
+               io_cq->cdesc_addr.virt_addr =
+                       dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-ena-reduce-lines-with-longer-column-width-bounda.patch b/queue-6.1/net-ena-reduce-lines-with-longer-column-width-bounda.patch
new file mode 100644 (file)
index 0000000..884f6f3
--- /dev/null
@@ -0,0 +1,1265 @@
+From 2921b4cf786aa4a72dca033cc0c68c26a8edf42e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jan 2024 09:53:53 +0000
+Subject: net: ena: Reduce lines with longer column width boundary
+
+From: David Arinzon <darinzon@amazon.com>
+
+[ Upstream commit 50613650c3d6255cef13a129ccaa919ca73a6743 ]
+
+This patch reduces some of the lines by removing newlines
+where more variables or print strings can be pushed back
+to the previous line while still adhering to the styling
+guidelines.
+
+Signed-off-by: David Arinzon <darinzon@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 2dc8b1e7177d ("net: ena: Fix redundant device NUMA node override")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c     | 315 +++++++-----------
+ drivers/net/ethernet/amazon/ena/ena_eth_com.c |  49 ++-
+ drivers/net/ethernet/amazon/ena/ena_eth_com.h |  15 +-
+ drivers/net/ethernet/amazon/ena/ena_netdev.c  |  32 +-
+ 4 files changed, 151 insertions(+), 260 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 4db689372980e..e733419dd3f49 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
+       struct ena_com_admin_sq *sq = &admin_queue->sq;
+       u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+-      sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+-                                       &sq->dma_addr, GFP_KERNEL);
++      sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
+       if (!sq->entries) {
+               netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
+       struct ena_com_admin_cq *cq = &admin_queue->cq;
+       u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+-      cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+-                                       &cq->dma_addr, GFP_KERNEL);
++      cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
+       if (!cq->entries) {
+               netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+       ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+       size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+-      aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
+-                                         &aenq->dma_addr, GFP_KERNEL);
++      aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
+       if (!aenq->entries) {
+               netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+       aenq_caps = 0;
+       aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+-      aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
+-                    << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+-                   ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
++      aenq_caps |=
++              (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
++              ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+       writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+       if (unlikely(!aenq_handlers)) {
+-              netdev_err(ena_dev->net_device,
+-                         "AENQ handlers pointer is NULL\n");
++              netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
+               return -EINVAL;
+       }
+@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
+       }
+       if (unlikely(!admin_queue->comp_ctx)) {
+-              netdev_err(admin_queue->ena_dev->net_device,
+-                         "Completion context is NULL\n");
++              netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
+               return NULL;
+       }
+       if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
+-              netdev_err(admin_queue->ena_dev->net_device,
+-                         "Completion context is occupied\n");
++              netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
+               return NULL;
+       }
+@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
+       /* In case of queue FULL */
+       cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
+       if (cnt >= admin_queue->q_depth) {
+-              netdev_dbg(admin_queue->ena_dev->net_device,
+-                         "Admin queue is full.\n");
++              netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
+               admin_queue->stats.out_of_space++;
+               return ERR_PTR(-ENOSPC);
+       }
+@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
+       struct ena_comp_ctx *comp_ctx;
+       u16 i;
+-      admin_queue->comp_ctx =
+-              devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
++      admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+       if (unlikely(!admin_queue->comp_ctx)) {
+               netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+               return -ENOMEM;
+@@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+               dev_node = dev_to_node(ena_dev->dmadev);
+               set_dev_node(ena_dev->dmadev, ctx->numa_node);
+               io_sq->desc_addr.virt_addr =
+-                      dma_alloc_coherent(ena_dev->dmadev, size,
+-                                         &io_sq->desc_addr.phys_addr,
++                      dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
+                                          GFP_KERNEL);
+               set_dev_node(ena_dev->dmadev, dev_node);
+               if (!io_sq->desc_addr.virt_addr) {
+                       io_sq->desc_addr.virt_addr =
+                               dma_alloc_coherent(ena_dev->dmadev, size,
+-                                                 &io_sq->desc_addr.phys_addr,
+-                                                 GFP_KERNEL);
++                                                 &io_sq->desc_addr.phys_addr, GFP_KERNEL);
+               }
+               if (!io_sq->desc_addr.virt_addr) {
+-                      netdev_err(ena_dev->net_device,
+-                                 "Memory allocation failed\n");
++                      netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+                       return -ENOMEM;
+               }
+       }
+@@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+               dev_node = dev_to_node(ena_dev->dmadev);
+               set_dev_node(ena_dev->dmadev, ctx->numa_node);
+-              io_sq->bounce_buf_ctrl.base_buffer =
+-                      devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
++              io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+               set_dev_node(ena_dev->dmadev, dev_node);
+               if (!io_sq->bounce_buf_ctrl.base_buffer)
+                       io_sq->bounce_buf_ctrl.base_buffer =
+                               devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+               if (!io_sq->bounce_buf_ctrl.base_buffer) {
+-                      netdev_err(ena_dev->net_device,
+-                                 "Bounce buffer memory allocation failed\n");
++                      netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
+                       return -ENOMEM;
+               }
+@@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+       prev_node = dev_to_node(ena_dev->dmadev);
+       set_dev_node(ena_dev->dmadev, ctx->numa_node);
+       io_cq->cdesc_addr.virt_addr =
+-              dma_alloc_coherent(ena_dev->dmadev, size,
+-                                 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
++              dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+       set_dev_node(ena_dev->dmadev, prev_node);
+       if (!io_cq->cdesc_addr.virt_addr) {
+               io_cq->cdesc_addr.virt_addr =
+-                      dma_alloc_coherent(ena_dev->dmadev, size,
+-                                         &io_cq->cdesc_addr.phys_addr,
++                      dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
+                                          GFP_KERNEL);
+       }
+@@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+                                       u8 comp_status)
+ {
+       if (unlikely(comp_status != 0))
+-              netdev_err(admin_queue->ena_dev->net_device,
+-                         "Admin command failed[%u]\n", comp_status);
++              netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
++                         comp_status);
+       switch (comp_status) {
+       case ENA_ADMIN_SUCCESS:
+@@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+       }
+       if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+-              netdev_err(admin_queue->ena_dev->net_device,
+-                         "Command was aborted\n");
++              netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
+               spin_lock_irqsave(&admin_queue->q_lock, flags);
+               admin_queue->stats.aborted_cmd++;
+               spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+               goto err;
+       }
+-      WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
+-           comp_ctx->status);
++      WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
+       ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
+ err:
+@@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+                                           sizeof(resp));
+       if (unlikely(ret))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to set LLQ configurations: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
+       return ret;
+ }
+@@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                       llq_default_cfg->llq_header_location;
+       } else {
+               netdev_err(ena_dev->net_device,
+-                         "Invalid header location control, supported: 0x%x\n",
+-                         supported_feat);
++                         "Invalid header location control, supported: 0x%x\n", supported_feat);
+               return -EINVAL;
+       }
+@@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                       netdev_err(ena_dev->net_device,
+                                  "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+-                                 llq_default_cfg->llq_stride_ctrl,
+-                                 supported_feat, llq_info->desc_stride_ctrl);
++                                 llq_default_cfg->llq_stride_ctrl, supported_feat,
++                                 llq_info->desc_stride_ctrl);
+               }
+       } else {
+               llq_info->desc_stride_ctrl = 0;
+@@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                       llq_info->desc_list_entry_size = 256;
+               } else {
+                       netdev_err(ena_dev->net_device,
+-                                 "Invalid entry_size_ctrl, supported: 0x%x\n",
+-                                 supported_feat);
++                                 "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
+                       return -EINVAL;
+               }
+@@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+               netdev_err(ena_dev->net_device,
+                          "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+-                         llq_default_cfg->llq_num_decs_before_header,
+-                         supported_feat, llq_info->descs_num_before_header);
++                         llq_default_cfg->llq_num_decs_before_header, supported_feat,
++                         llq_info->descs_num_before_header);
+       }
+       /* Check for accelerated queue supported */
+       llq_accel_mode_get = llq_features->accel_mode.u.get;
+@@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+       rc = ena_com_set_llq(ena_dev);
+       if (rc)
+-              netdev_err(ena_dev->net_device,
+-                         "Cannot set LLQ configuration: %d\n", rc);
++              netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
+       return rc;
+ }
+@@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+       int ret;
+       wait_for_completion_timeout(&comp_ctx->wait_event,
+-                                  usecs_to_jiffies(
+-                                          admin_queue->completion_timeout));
++                                  usecs_to_jiffies(admin_queue->completion_timeout));
+       /* In case the command wasn't completed find out the root cause.
+        * There might be 2 kinds of errors
+@@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+               if (comp_ctx->status == ENA_CMD_COMPLETED) {
+                       netdev_err(admin_queue->ena_dev->net_device,
+                                  "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+-                                 comp_ctx->cmd_opcode,
+-                                 admin_queue->auto_polling ? "ON" : "OFF");
++                                 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
+                       /* Check if fallback to polling is enabled */
+                       if (admin_queue->auto_polling)
+                               admin_queue->polling = true;
+@@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+       if (unlikely(i == timeout)) {
+               netdev_err(ena_dev->net_device,
+                          "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
+-                         mmio_read->seq_num, offset, read_resp->req_id,
+-                         read_resp->reg_off);
++                         mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
+               ret = ENA_MMIO_READ_TIMEOUT;
+               goto err;
+       }
+       if (read_resp->reg_off != offset) {
+-              netdev_err(ena_dev->net_device,
+-                         "Read failure: wrong offset provided\n");
++              netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
+               ret = ENA_MMIO_READ_TIMEOUT;
+       } else {
+               ret = read_resp->reg_val;
+@@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+                                           sizeof(destroy_resp));
+       if (unlikely(ret && (ret != -ENODEV)))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to destroy io sq error: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
+       return ret;
+ }
+@@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+       if (io_cq->cdesc_addr.virt_addr) {
+               size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+-              dma_free_coherent(ena_dev->dmadev, size,
+-                                io_cq->cdesc_addr.virt_addr,
++              dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
+                                 io_cq->cdesc_addr.phys_addr);
+               io_cq->cdesc_addr.virt_addr = NULL;
+@@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+       if (io_sq->desc_addr.virt_addr) {
+               size = io_sq->desc_entry_size * io_sq->q_depth;
+-              dma_free_coherent(ena_dev->dmadev, size,
+-                                io_sq->desc_addr.virt_addr,
++              dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
+                                 io_sq->desc_addr.phys_addr);
+               io_sq->desc_addr.virt_addr = NULL;
+@@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+               val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+               if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+-                      netdev_err(ena_dev->net_device,
+-                                 "Reg read timeout occurred\n");
++                      netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+                       return -ETIME;
+               }
+@@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+       int ret;
+       if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+-              netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+-                         feature_id);
++              netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
+               return -EOPNOTSUPP;
+       }
+@@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+       if (unlikely(ret))
+               netdev_err(ena_dev->net_device,
+-                         "Failed to submit get_feature command %d error: %d\n",
+-                         feature_id, ret);
++                         "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
+       return ret;
+ }
+@@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+ {
+       struct ena_rss *rss = &ena_dev->rss;
+-      if (!ena_com_check_supported_feature_id(ena_dev,
+-                                              ENA_ADMIN_RSS_HASH_FUNCTION))
++      if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
+               return -EOPNOTSUPP;
+-      rss->hash_key =
+-              dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+-                                 &rss->hash_key_dma_addr, GFP_KERNEL);
++      rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
++                                         &rss->hash_key_dma_addr, GFP_KERNEL);
+       if (unlikely(!rss->hash_key))
+               return -ENOMEM;
+@@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+       struct ena_rss *rss = &ena_dev->rss;
+       if (rss->hash_key)
+-              dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+-                                rss->hash_key, rss->hash_key_dma_addr);
++              dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
++                                rss->hash_key_dma_addr);
+       rss->hash_key = NULL;
+ }
+@@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+ {
+       struct ena_rss *rss = &ena_dev->rss;
+-      rss->hash_ctrl =
+-              dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+-                                 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
++      rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
++                                          &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+       if (unlikely(!rss->hash_ctrl))
+               return -ENOMEM;
+@@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+       struct ena_rss *rss = &ena_dev->rss;
+       if (rss->hash_ctrl)
+-              dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+-                                rss->hash_ctrl, rss->hash_ctrl_dma_addr);
++              dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
++                                rss->hash_ctrl_dma_addr);
+       rss->hash_ctrl = NULL;
+ }
+@@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+       tbl_size = (1ULL << log_size) *
+               sizeof(struct ena_admin_rss_ind_table_entry);
+-      rss->rss_ind_tbl =
+-              dma_alloc_coherent(ena_dev->dmadev, tbl_size,
+-                                 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
++      rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
++                                            GFP_KERNEL);
+       if (unlikely(!rss->rss_ind_tbl))
+               goto mem_err1;
+       tbl_size = (1ULL << log_size) * sizeof(u16);
+-      rss->host_rss_ind_tbl =
+-              devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
++      rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+       if (unlikely(!rss->host_rss_ind_tbl))
+               goto mem_err2;
+@@ -1197,8 +1161,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+       tbl_size = (1ULL << log_size) *
+               sizeof(struct ena_admin_rss_ind_table_entry);
+-      dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+-                        rss->rss_ind_tbl_dma_addr);
++      dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
+       rss->rss_ind_tbl = NULL;
+ mem_err1:
+       rss->tbl_log_size = 0;
+@@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+                                          &create_cmd.sq_ba,
+                                          io_sq->desc_addr.phys_addr);
+               if (unlikely(ret)) {
+-                      netdev_err(ena_dev->net_device,
+-                                 "Memory address set failed\n");
++                      netdev_err(ena_dev->net_device, "Memory address set failed\n");
+                       return ret;
+               }
+       }
+@@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+                                           (struct ena_admin_acq_entry *)&cmd_completion,
+                                           sizeof(cmd_completion));
+       if (unlikely(ret)) {
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to create IO SQ. error: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
+               return ret;
+       }
+@@ -1292,8 +1253,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+                       cmd_completion.llq_descriptors_offset);
+       }
+-      netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
+-                 io_sq->idx, io_sq->q_depth);
++      netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+       return ret;
+ }
+@@ -1420,8 +1380,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+                                           (struct ena_admin_acq_entry *)&cmd_completion,
+                                           sizeof(cmd_completion));
+       if (unlikely(ret)) {
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to create IO CQ. error: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
+               return ret;
+       }
+@@ -1440,8 +1399,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+                       (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+                       cmd_completion.numa_node_register_offset);
+-      netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
+-                 io_cq->idx, io_cq->q_depth);
++      netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+       return ret;
+ }
+@@ -1451,8 +1409,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+                           struct ena_com_io_cq **io_cq)
+ {
+       if (qid >= ENA_TOTAL_NUM_QUEUES) {
+-              netdev_err(ena_dev->net_device,
+-                         "Invalid queue number %d but the max is %d\n", qid,
++              netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
+                          ENA_TOTAL_NUM_QUEUES);
+               return -EINVAL;
+       }
+@@ -1492,8 +1449,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+       spin_lock_irqsave(&admin_queue->q_lock, flags);
+       while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
+               spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+-              ena_delay_exponential_backoff_us(exp++,
+-                                               ena_dev->ena_min_poll_delay_us);
++              ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
+               spin_lock_irqsave(&admin_queue->q_lock, flags);
+       }
+       spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -1519,8 +1475,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+                                           sizeof(destroy_resp));
+       if (unlikely(ret && (ret != -ENODEV)))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to destroy IO CQ. error: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
+       return ret;
+ }
+@@ -1588,8 +1543,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+                                           sizeof(resp));
+       if (unlikely(ret))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to config AENQ ret: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
+       return ret;
+ }
+@@ -1610,8 +1564,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+       netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
+       if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+-              netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
+-                         width);
++              netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
+               return -EINVAL;
+       }
+@@ -1633,19 +1586,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
+       ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+                                         ENA_REGS_CONTROLLER_VERSION_OFF);
+-      if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+-                   (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
++      if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+               netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+               return -ETIME;
+       }
+       dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
+-               (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+-                       ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
++               (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+                ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+-      dev_info(ena_dev->dmadev,
+-               "ENA controller version: %d.%d.%d implementation version %d\n",
++      dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
+                (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+                        ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+                (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+@@ -1694,20 +1644,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+       size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+       if (sq->entries)
+-              dma_free_coherent(ena_dev->dmadev, size, sq->entries,
+-                                sq->dma_addr);
++              dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
+       sq->entries = NULL;
+       size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+       if (cq->entries)
+-              dma_free_coherent(ena_dev->dmadev, size, cq->entries,
+-                                cq->dma_addr);
++              dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
+       cq->entries = NULL;
+       size = ADMIN_AENQ_SIZE(aenq->q_depth);
+       if (ena_dev->aenq.entries)
+-              dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
+-                                aenq->dma_addr);
++              dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
+       aenq->entries = NULL;
+ }
+@@ -1733,10 +1680,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+       struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+       spin_lock_init(&mmio_read->lock);
+-      mmio_read->read_resp =
+-              dma_alloc_coherent(ena_dev->dmadev,
+-                                 sizeof(*mmio_read->read_resp),
+-                                 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
++      mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
++                                                &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+       if (unlikely(!mmio_read->read_resp))
+               goto err;
+@@ -1767,8 +1712,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+       writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+       writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+-      dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+-                        mmio_read->read_resp, mmio_read->read_resp_dma_addr);
++      dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
++                        mmio_read->read_resp_dma_addr);
+       mmio_read->read_resp = NULL;
+ }
+@@ -1800,8 +1745,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
+       }
+       if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+-              netdev_err(ena_dev->net_device,
+-                         "Device isn't ready, abort com init\n");
++              netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
+               return -ENODEV;
+       }
+@@ -1878,8 +1822,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+       int ret;
+       if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+-              netdev_err(ena_dev->net_device,
+-                         "Qid (%d) is bigger than max num of queues (%d)\n",
++              netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+                          ctx->qid, ENA_TOTAL_NUM_QUEUES);
+               return -EINVAL;
+       }
+@@ -1905,8 +1848,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+       if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+               /* header length is limited to 8 bits */
+-              io_sq->tx_max_header_size =
+-                      min_t(u32, ena_dev->tx_max_header_size, SZ_256);
++              io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+       ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+       if (ret)
+@@ -1938,8 +1880,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+       struct ena_com_io_cq *io_cq;
+       if (qid >= ENA_TOTAL_NUM_QUEUES) {
+-              netdev_err(ena_dev->net_device,
+-                         "Qid (%d) is bigger than max num of queues (%d)\n",
++              netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+                          qid, ENA_TOTAL_NUM_QUEUES);
+               return;
+       }
+@@ -1983,8 +1924,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+               if (rc)
+                       return rc;
+-              if (get_resp.u.max_queue_ext.version !=
+-                  ENA_FEATURE_MAX_QUEUE_EXT_VER)
++              if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+                       return -EINVAL;
+               memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
+@@ -2025,18 +1965,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+       rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
+       if (!rc)
+-              memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+-                     sizeof(get_resp.u.hw_hints));
++              memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
+       else if (rc == -EOPNOTSUPP)
+-              memset(&get_feat_ctx->hw_hints, 0x0,
+-                     sizeof(get_feat_ctx->hw_hints));
++              memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+       else
+               return rc;
+       rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
+       if (!rc)
+-              memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+-                     sizeof(get_resp.u.llq));
++              memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
+       else if (rc == -EOPNOTSUPP)
+               memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+       else
+@@ -2084,8 +2021,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+       aenq_common = &aenq_e->aenq_common_desc;
+       /* Go over all the events */
+-      while ((READ_ONCE(aenq_common->flags) &
+-              ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
++      while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+               /* Make sure the phase bit (ownership) is as expected before
+                * reading the rest of the descriptor.
+                */
+@@ -2094,8 +2030,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+               timestamp = (u64)aenq_common->timestamp_low |
+                       ((u64)aenq_common->timestamp_high << 32);
+-              netdev_dbg(ena_dev->net_device,
+-                         "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
++              netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+                          aenq_common->group, aenq_common->syndrome, timestamp);
+               /* Handle specific event*/
+@@ -2124,8 +2059,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+       /* write the aenq doorbell after all AENQ descriptors were read */
+       mb();
+-      writel_relaxed((u32)aenq->head,
+-                     ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
++      writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ }
+ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+@@ -2137,15 +2071,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+       stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+       cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+-      if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+-                   (cap == ENA_MMIO_READ_TIMEOUT))) {
++      if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
+               netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
+               return -ETIME;
+       }
+       if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+-              netdev_err(ena_dev->net_device,
+-                         "Device isn't ready, can't reset device\n");
++              netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
+               return -EINVAL;
+       }
+@@ -2168,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+       rc = wait_for_reset_state(ena_dev, timeout,
+                                 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+       if (rc != 0) {
+-              netdev_err(ena_dev->net_device,
+-                         "Reset indication didn't turn on\n");
++              netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
+               return rc;
+       }
+@@ -2177,8 +2108,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+       writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+       rc = wait_for_reset_state(ena_dev, timeout, 0);
+       if (rc != 0) {
+-              netdev_err(ena_dev->net_device,
+-                         "Reset indication didn't turn off\n");
++              netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
+               return rc;
+       }
+@@ -2215,8 +2145,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+                                            sizeof(*get_resp));
+       if (unlikely(ret))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to get stats. error: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+       return ret;
+ }
+@@ -2228,8 +2157,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+       int ret;
+       if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+-              netdev_err(ena_dev->net_device,
+-                         "Capability %d isn't supported\n",
++              netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+                          ENA_ADMIN_ENI_STATS);
+               return -EOPNOTSUPP;
+       }
+@@ -2266,8 +2194,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+       int ret;
+       if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+-              netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+-                         ENA_ADMIN_MTU);
++              netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
+               return -EOPNOTSUPP;
+       }
+@@ -2286,8 +2213,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+                                           sizeof(resp));
+       if (unlikely(ret))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to set mtu %d. error: %d\n", mtu, ret);
++              netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
+       return ret;
+ }
+@@ -2301,8 +2227,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+       ret = ena_com_get_feature(ena_dev, &resp,
+                                 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
+       if (unlikely(ret)) {
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to get offload capabilities %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
+               return ret;
+       }
+@@ -2320,8 +2245,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+       struct ena_admin_get_feat_resp get_resp;
+       int ret;
+-      if (!ena_com_check_supported_feature_id(ena_dev,
+-                                              ENA_ADMIN_RSS_HASH_FUNCTION)) {
++      if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
+               netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+                          ENA_ADMIN_RSS_HASH_FUNCTION);
+               return -EOPNOTSUPP;
+@@ -2334,8 +2258,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+               return ret;
+       if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
+-              netdev_err(ena_dev->net_device,
+-                         "Func hash %d isn't supported by device, abort\n",
++              netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
+                          rss->hash_func);
+               return -EOPNOTSUPP;
+       }
+@@ -2365,8 +2288,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+       if (unlikely(ret)) {
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to set hash function %d. error: %d\n",
++              netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
+                          rss->hash_func, ret);
+               return -EINVAL;
+       }
+@@ -2398,16 +2320,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+               return rc;
+       if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
+-              netdev_err(ena_dev->net_device,
+-                         "Flow hash function %d isn't supported\n", func);
++              netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
+               return -EOPNOTSUPP;
+       }
+       if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+               if (key_len != sizeof(hash_key->key)) {
+                       netdev_err(ena_dev->net_device,
+-                                 "key len (%u) doesn't equal the supported size (%zu)\n",
+-                                 key_len, sizeof(hash_key->key));
++                                 "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
++                                 sizeof(hash_key->key));
+                       return -EINVAL;
+               }
+               memcpy(hash_key->key, key, key_len);
+@@ -2495,8 +2416,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+       struct ena_admin_set_feat_resp resp;
+       int ret;
+-      if (!ena_com_check_supported_feature_id(ena_dev,
+-                                              ENA_ADMIN_RSS_HASH_INPUT)) {
++      if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
+               netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+                          ENA_ADMIN_RSS_HASH_INPUT);
+               return -EOPNOTSUPP;
+@@ -2527,8 +2447,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+       if (unlikely(ret))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to set hash input. error: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
+       return ret;
+ }
+@@ -2605,8 +2524,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+       int rc;
+       if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+-              netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
+-                         proto);
++              netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
+               return -EINVAL;
+       }
+@@ -2658,8 +2576,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+       struct ena_admin_set_feat_resp resp;
+       int ret;
+-      if (!ena_com_check_supported_feature_id(
+-                  ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
++      if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+               netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+                          ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
+               return -EOPNOTSUPP;
+@@ -2699,8 +2616,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+                                           sizeof(resp));
+       if (unlikely(ret))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to set indirect table. error: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
+       return ret;
+ }
+@@ -2779,9 +2695,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+ {
+       struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+-      host_attr->host_info =
+-              dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+-                                 &host_attr->host_info_dma_addr, GFP_KERNEL);
++      host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
++                                                &host_attr->host_info_dma_addr, GFP_KERNEL);
+       if (unlikely(!host_attr->host_info))
+               return -ENOMEM;
+@@ -2827,8 +2742,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+       if (host_attr->debug_area_virt_addr) {
+               dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
+-                                host_attr->debug_area_virt_addr,
+-                                host_attr->debug_area_dma_addr);
++                                host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
+               host_attr->debug_area_virt_addr = NULL;
+       }
+ }
+@@ -2877,8 +2791,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+                                           sizeof(resp));
+       if (unlikely(ret))
+-              netdev_err(ena_dev->net_device,
+-                         "Failed to set host attributes: %d\n", ret);
++              netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
+       return ret;
+ }
+@@ -2896,8 +2809,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
+                                                         u32 *intr_moder_interval)
+ {
+       if (!intr_delay_resolution) {
+-              netdev_err(ena_dev->net_device,
+-                         "Illegal interrupt delay granularity value\n");
++              netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
+               return -EFAULT;
+       }
+@@ -2935,14 +2847,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+       if (rc) {
+               if (rc == -EOPNOTSUPP) {
+-                      netdev_dbg(ena_dev->net_device,
+-                                 "Feature %d isn't supported\n",
++                      netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+                                  ENA_ADMIN_INTERRUPT_MODERATION);
+                       rc = 0;
+               } else {
+                       netdev_err(ena_dev->net_device,
+-                                 "Failed to get interrupt moderation admin cmd. rc: %d\n",
+-                                 rc);
++                                 "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
+               }
+               /* no moderation supported, disable adaptive support */
+@@ -2990,8 +2900,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+               (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
+       if (unlikely(ena_dev->tx_max_header_size == 0)) {
+-              netdev_err(ena_dev->net_device,
+-                         "The size of the LLQ entry is smaller than needed\n");
++              netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
+               return -EINVAL;
+       }
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+index f9f886289b970..933e619b3a313 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+       cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+                       + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+-      desc_phase = (READ_ONCE(cdesc->status) &
+-                    ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
++      desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+                    ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+       if (desc_phase != expected_phase)
+@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+               io_sq->entries_in_tx_burst_left--;
+               netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                         "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
+-                         io_sq->qid, io_sq->entries_in_tx_burst_left);
++                         "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
++                         io_sq->entries_in_tx_burst_left);
+       }
+       /* Make sure everything was written into the bounce buffer before
+@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+       wmb();
+       /* The line is completed. Copy it to dev */
+-      __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+-                       bounce_buffer, (llq_info->desc_list_entry_size) / 8);
++      __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
++                       (llq_info->desc_list_entry_size) / 8);
+       io_sq->tail++;
+@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+       header_offset =
+               llq_info->descs_num_before_header * io_sq->desc_entry_size;
+-      if (unlikely((header_offset + header_len) >
+-                   llq_info->desc_list_entry_size)) {
++      if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
+               netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+                          "Trying to write header larger than llq entry can accommodate\n");
+               return -EFAULT;
+       }
+       if (unlikely(!bounce_buffer)) {
+-              netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                         "Bounce buffer is NULL\n");
++              netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+               return -EFAULT;
+       }
+@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+       bounce_buffer = pkt_ctrl->curr_bounce_buf;
+       if (unlikely(!bounce_buffer)) {
+-              netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                         "Bounce buffer is NULL\n");
++              netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+               return NULL;
+       }
+@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+               ena_com_cq_inc_head(io_cq);
+               count++;
+-              last = (READ_ONCE(cdesc->status) &
+-                      ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
++              last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+                      ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+       } while (!last);
+@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
+       netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+                  "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
+-                 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+-                 ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+-                 ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
++                 ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
++                 ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ }
+ /*****************************************************************************/
+@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+       if (unlikely(header_len > io_sq->tx_max_header_size)) {
+               netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                         "Header size is too large %d max header: %d\n",
+-                         header_len, io_sq->tx_max_header_size);
++                         "Header size is too large %d max header: %d\n", header_len,
++                         io_sq->tx_max_header_size);
+               return -EINVAL;
+       }
+-      if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+-                   !buffer_to_push)) {
++      if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
+               netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+                          "Push header wasn't provided in LLQ mode\n");
+               return -EINVAL;
+@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+       }
+       netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-                 "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+-                 nb_hw_desc);
++                 "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
+       if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+               netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-                         "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+-                         ena_rx_ctx->max_bufs);
++                         "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
+               return -ENOSPC;
+       }
+@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+       io_sq->next_to_comp += nb_hw_desc;
+       netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-                 "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+-                 io_sq->qid, io_sq->next_to_comp);
++                 "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
++                 io_sq->next_to_comp);
+       /* Get rx flags from the last pkt */
+       ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
+@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+       desc->req_id = req_id;
+       netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
+-                 __func__, io_sq->qid, req_id);
++                 "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
++                 req_id);
+       desc->buff_addr_lo = (u32)ena_buf->paddr;
+       desc->buff_addr_hi =
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+index 689313ee25a80..07029eee78caf 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+@@ -141,8 +141,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
+       }
+       netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                 "Queue: %d num_descs: %d num_entries_needed: %d\n",
+-                 io_sq->qid, num_descs, num_entries_needed);
++                 "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
++                 num_entries_needed);
+       return num_entries_needed > io_sq->entries_in_tx_burst_left;
+ }
+@@ -153,15 +153,14 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+       u16 tail = io_sq->tail;
+       netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                 "Write submission queue doorbell for queue: %d tail: %d\n",
+-                 io_sq->qid, tail);
++                 "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
+       writel(tail, io_sq->db_addr);
+       if (is_llq_max_tx_burst_exists(io_sq)) {
+               netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+-                         "Reset available entries in tx burst for queue %d to %d\n",
+-                         io_sq->qid, max_entries_in_tx_burst);
++                         "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
++                         max_entries_in_tx_burst);
+               io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
+       }
+@@ -244,8 +243,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+       *req_id = READ_ONCE(cdesc->req_id);
+       if (unlikely(*req_id >= io_cq->q_depth)) {
+-              netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+-                         "Invalid req id %d\n", cdesc->req_id);
++              netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
++                         cdesc->req_id);
+               return -EINVAL;
+       }
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 6ebe0ac8e53e9..c4a396f25921c 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -164,11 +164,9 @@ static int ena_xmit_common(struct net_device *dev,
+       if (unlikely(rc)) {
+               netif_err(adapter, tx_queued, dev,
+                         "Failed to prepare tx bufs\n");
+-              ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
+-                                &ring->syncp);
++              ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
+               if (rc != -ENOMEM)
+-                      ena_reset_device(adapter,
+-                                       ENA_REGS_RESET_DRIVER_INVALID_STATE);
++                      ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+               return rc;
+       }
+@@ -992,8 +990,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
+        */
+       page = dev_alloc_page();
+       if (!page) {
+-              ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
+-                                &rx_ring->syncp);
++              ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
+               return ERR_PTR(-ENOSPC);
+       }
+@@ -1052,8 +1049,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
+                                   struct ena_rx_buffer *rx_info,
+                                   unsigned long attrs)
+ {
+-      dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
+-                           DMA_BIDIRECTIONAL, attrs);
++      dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
++                           attrs);
+ }
+ static void ena_free_rx_page(struct ena_ring *rx_ring,
+@@ -1344,8 +1341,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+                                               &req_id);
+               if (rc) {
+                       if (unlikely(rc == -EINVAL))
+-                              handle_invalid_req_id(tx_ring, req_id, NULL,
+-                                                    false);
++                              handle_invalid_req_id(tx_ring, req_id, NULL, false);
+                       break;
+               }
+@@ -1583,8 +1579,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+                                       DMA_FROM_DEVICE);
+               if (!reuse_rx_buf_page)
+-                      ena_unmap_rx_buff_attrs(rx_ring, rx_info,
+-                                              DMA_ATTR_SKIP_CPU_SYNC);
++                      ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
+               skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+                               page_offset + buf_offset, len, buf_len);
+@@ -1849,8 +1844,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+       adapter = netdev_priv(rx_ring->netdev);
+       if (rc == -ENOSPC) {
+-              ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
+-                                &rx_ring->syncp);
++              ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
+               ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+       } else {
+               ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+@@ -2395,8 +2389,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
+       if (!ena_dev->rss.tbl_log_size) {
+               rc = ena_rss_init_default(adapter);
+               if (rc && (rc != -EOPNOTSUPP)) {
+-                      netif_err(adapter, ifup, adapter->netdev,
+-                                "Failed to init RSS rc: %d\n", rc);
++                      netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
+                       return rc;
+               }
+       }
+@@ -3313,8 +3306,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
+       rc = ena_com_set_host_attributes(adapter->ena_dev);
+       if (rc) {
+               if (rc == -EOPNOTSUPP)
+-                      netif_warn(adapter, drv, adapter->netdev,
+-                                 "Cannot set host attributes\n");
++                      netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
+               else
+                       netif_err(adapter, drv, adapter->netdev,
+                                 "Cannot set host attributes\n");
+@@ -4186,8 +4178,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
+               }
+       }
+-      rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
+-                                      ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
++      rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
++                                      0xFFFFFFFF);
+       if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+               dev_err(dev, "Cannot fill hash function\n");
+               goto err_fill_indir;
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-fec-add-fec_enet_deinit.patch b/queue-6.1/net-fec-add-fec_enet_deinit.patch
new file mode 100644 (file)
index 0000000..767b103
--- /dev/null
@@ -0,0 +1,63 @@
+From 424a6817be5f166fef18803e08decfbb66bf2a5c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 May 2024 13:05:28 +0800
+Subject: net:fec: Add fec_enet_deinit()
+
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+
+[ Upstream commit bf0497f53c8535f99b72041529d3f7708a6e2c0d ]
+
+When fec_probe() fails or fec_drv_remove() needs to release the
+fec queue and remove a NAPI context, therefore add a function
+corresponding to fec_enet_init() and call fec_enet_deinit() which
+does the opposite to release memory and remove a NAPI context.
+
+Fixes: 59d0f7465644 ("net: fec: init multi queue date structure")
+Signed-off-by: Xiaolei Wang <xiaolei.wang@windriver.com>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20240524050528.4115581-1-xiaolei.wang@windriver.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index ebff14b0837d9..0a3df468316e5 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3732,6 +3732,14 @@ static int fec_enet_init(struct net_device *ndev)
+       return ret;
+ }
++static void fec_enet_deinit(struct net_device *ndev)
++{
++      struct fec_enet_private *fep = netdev_priv(ndev);
++
++      netif_napi_del(&fep->napi);
++      fec_enet_free_queue(ndev);
++}
++
+ #ifdef CONFIG_OF
+ static int fec_reset_phy(struct platform_device *pdev)
+ {
+@@ -4136,6 +4144,7 @@ fec_probe(struct platform_device *pdev)
+       fec_enet_mii_remove(fep);
+ failed_mii_init:
+ failed_irq:
++      fec_enet_deinit(ndev);
+ failed_init:
+       fec_ptp_stop(pdev);
+ failed_reset:
+@@ -4199,6 +4208,7 @@ fec_drv_remove(struct platform_device *pdev)
+       pm_runtime_put_noidle(&pdev->dev);
+       pm_runtime_disable(&pdev->dev);
++      fec_enet_deinit(ndev);
+       free_netdev(ndev);
+       return 0;
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-mlx5-lag-do-bond-only-if-slaves-agree-on-roce-st.patch b/queue-6.1/net-mlx5-lag-do-bond-only-if-slaves-agree-on-roce-st.patch
new file mode 100644 (file)
index 0000000..2f6a6f6
--- /dev/null
@@ -0,0 +1,74 @@
+From 31fb117ad131a50c7d2e4cd4450d6da11a979f27 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 22:26:52 +0300
+Subject: net/mlx5: Lag, do bond only if slaves agree on roce state
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maher Sanalla <msanalla@nvidia.com>
+
+[ Upstream commit 51ef9305b8f40946d65c40368ffb4c14636d369a ]
+
+Currently, the driver does not enforce that lag bond slaves must have
+matching roce capabilities. Yet, in mlx5_do_bond(), the driver attempts
+to enable roce on all vports of the bond slaves, causing the following
+syndrome when one slave has no roce fw support:
+
+mlx5_cmd_out_err:809:(pid 25427): MODIFY_NIC_VPORT_CONTEXT(0×755) op_mod(0×0)
+failed, status bad parameter(0×3), syndrome (0xc1f678), err(-22)
+
+Thus, create HW lag only if bond's slaves agree on roce state,
+either all slaves have roce support resulting in a roce lag bond,
+or none do, resulting in a raw eth bond.
+
+Fixes: 7907f23adc18 ("net/mlx5: Implement RoCE LAG feature")
+Signed-off-by: Maher Sanalla <msanalla@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+index 01c0e1ee918d8..a283d8ae466b6 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c
+@@ -696,6 +696,7 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+       struct mlx5_core_dev *dev;
+       u8 mode;
+ #endif
++      bool roce_support;
+       int i;
+       for (i = 0; i < ldev->ports; i++)
+@@ -722,6 +723,11 @@ static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev)
+               if (mlx5_sriov_is_enabled(ldev->pf[i].dev))
+                       return false;
+ #endif
++      roce_support = mlx5_get_roce_state(ldev->pf[MLX5_LAG_P1].dev);
++      for (i = 1; i < ldev->ports; i++)
++              if (mlx5_get_roce_state(ldev->pf[i].dev) != roce_support)
++                      return false;
++
+       return true;
+ }
+@@ -884,8 +890,10 @@ static void mlx5_do_bond(struct mlx5_lag *ldev)
+               } else if (roce_lag) {
+                       dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+                       mlx5_rescan_drivers_locked(dev0);
+-                      for (i = 1; i < ldev->ports; i++)
+-                              mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++                      for (i = 1; i < ldev->ports; i++) {
++                              if (mlx5_get_roce_state(ldev->pf[i].dev))
++                                      mlx5_nic_vport_enable_roce(ldev->pf[i].dev);
++                      }
+               } else if (shared_fdb) {
+                       dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV;
+                       mlx5_rescan_drivers_locked(dev0);
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-mlx5e-fix-ipsec-tunnel-mode-offload-feature-chec.patch b/queue-6.1/net-mlx5e-fix-ipsec-tunnel-mode-offload-feature-chec.patch
new file mode 100644 (file)
index 0000000..56e77bf
--- /dev/null
@@ -0,0 +1,55 @@
+From 8803ec4e34d218aa151df2fa4b25c18f2be6a5e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 22:26:56 +0300
+Subject: net/mlx5e: Fix IPsec tunnel mode offload feature check
+
+From: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+
+[ Upstream commit 9a52f6d44f4521773b4699b4ed34b8e21d5a175c ]
+
+Remove faulty check disabling checksum offload and GSO for offload of
+simple IPsec tunnel L4 traffic. Comment previously describing the deleted
+code incorrectly claimed the check prevented double tunnel (or three layers
+of ip headers).
+
+Fixes: f1267798c980 ("net/mlx5: Fix checksum issue of VXLAN and IPsec crypto offload")
+Signed-off-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h    | 17 +++++------------
+ 1 file changed, 5 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+index 1878a70b9031d..43ccdf0e6cff8 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+@@ -97,18 +97,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+               if (!x || !x->xso.offload_handle)
+                       goto out_disable;
+-              if (xo->inner_ipproto) {
+-                      /* Cannot support tunnel packet over IPsec tunnel mode
+-                       * because we cannot offload three IP header csum
+-                       */
+-                      if (x->props.mode == XFRM_MODE_TUNNEL)
+-                              goto out_disable;
+-
+-                      /* Only support UDP or TCP L4 checksum */
+-                      if (xo->inner_ipproto != IPPROTO_UDP &&
+-                          xo->inner_ipproto != IPPROTO_TCP)
+-                              goto out_disable;
+-              }
++              /* Only support UDP or TCP L4 checksum */
++              if (xo->inner_ipproto &&
++                  xo->inner_ipproto != IPPROTO_UDP &&
++                  xo->inner_ipproto != IPPROTO_TCP)
++                      goto out_disable;
+               return features;
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-mlx5e-fix-udp-gso-for-encapsulated-packets.patch b/queue-6.1/net-mlx5e-fix-udp-gso-for-encapsulated-packets.patch
new file mode 100644 (file)
index 0000000..d5c23fe
--- /dev/null
@@ -0,0 +1,68 @@
+From a4ef3c347dee02e67eeb66eddef8e6848ef8fafc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 22:26:59 +0300
+Subject: net/mlx5e: Fix UDP GSO for encapsulated packets
+
+From: Gal Pressman <gal@nvidia.com>
+
+[ Upstream commit 83fea49f2711fc90c0d115b0ed04046b45155b65 ]
+
+When the skb is encapsulated, adjust the inner UDP header instead of the
+outer one, and account for UDP header (instead of TCP) in the inline
+header size calculation.
+
+Fixes: 689adf0d4892 ("net/mlx5e: Add UDP GSO support")
+Reported-by: Jason Baron <jbaron@akamai.com>
+Closes: https://lore.kernel.org/netdev/c42961cb-50b9-4a9a-bd43-87fe48d88d29@akamai.com/
+Signed-off-by: Gal Pressman <gal@nvidia.com>
+Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com>
+Reviewed-by: Boris Pismenny <borisp@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h   | 8 +++++++-
+ drivers/net/ethernet/mellanox/mlx5/core/en_tx.c           | 6 +++++-
+ 2 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+index 07187028f0d35..1445a9a46baea 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/en_accel.h
+@@ -102,8 +102,14 @@ static inline void
+ mlx5e_udp_gso_handle_tx_skb(struct sk_buff *skb)
+ {
+       int payload_len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr);
++      struct udphdr *udphdr;
+-      udp_hdr(skb)->len = htons(payload_len);
++      if (skb->encapsulation)
++              udphdr = (struct udphdr *)skb_inner_transport_header(skb);
++      else
++              udphdr = udp_hdr(skb);
++
++      udphdr->len = htons(payload_len);
+ }
+ struct mlx5e_accel_tx_state {
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+index a6d7e2cfcd0e1..e6e792a38a640 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+@@ -153,7 +153,11 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop)
+       *hopbyhop = 0;
+       if (skb->encapsulation) {
+-              ihs = skb_inner_tcp_all_headers(skb);
++              if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
++                      ihs = skb_inner_transport_offset(skb) +
++                            sizeof(struct udphdr);
++              else
++                      ihs = skb_inner_tcp_all_headers(skb);
+               stats->tso_inner_packets++;
+               stats->tso_inner_bytes += skb->len - ihs;
+       } else {
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-mlx5e-use-rx_missed_errors-instead-of-rx_dropped.patch b/queue-6.1/net-mlx5e-use-rx_missed_errors-instead-of-rx_dropped.patch
new file mode 100644 (file)
index 0000000..baf8b05
--- /dev/null
@@ -0,0 +1,46 @@
+From 9e0702ce7c4c61ec6a7f2f0c117d03285318eb31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 22:26:58 +0300
+Subject: net/mlx5e: Use rx_missed_errors instead of rx_dropped for reporting
+ buffer exhaustion
+
+From: Carolina Jubran <cjubran@nvidia.com>
+
+[ Upstream commit 5c74195d5dd977e97556e6fa76909b831c241230 ]
+
+Previously, the driver incorrectly used rx_dropped to report device
+buffer exhaustion.
+
+According to the documentation, rx_dropped should not be used to count
+packets dropped due to buffer exhaustion, which is the purpose of
+rx_missed_errors.
+
+Use rx_missed_errors as intended for counting packets dropped due to
+buffer exhaustion.
+
+Fixes: 269e6b3af3bf ("net/mlx5e: Report additional error statistics in get stats ndo")
+Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index e7d396434da36..e2f134e1d9fcf 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3666,7 +3666,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+               mlx5e_fold_sw_stats64(priv, stats);
+       }
+-      stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
++      stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+       stats->rx_length_errors =
+               PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-phy-micrel-set-soft_reset-callback-to-genphy_sof.patch b/queue-6.1/net-phy-micrel-set-soft_reset-callback-to-genphy_sof.patch
new file mode 100644 (file)
index 0000000..69b9f28
--- /dev/null
@@ -0,0 +1,46 @@
+From ab6d4066bcadc24e4bba97ad99ae80275a6979df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 08:54:06 +0200
+Subject: net: phy: micrel: set soft_reset callback to genphy_soft_reset for
+ KSZ8061
+
+From: Mathieu Othacehe <othacehe@gnu.org>
+
+[ Upstream commit 128d54fbcb14b8717ecf596d3dbded327b9980b3 ]
+
+Following a similar reinstate for the KSZ8081 and KSZ9031.
+
+Older kernels would use the genphy_soft_reset if the PHY did not implement
+a .soft_reset.
+
+The KSZ8061 errata described here:
+https://ww1.microchip.com/downloads/en/DeviceDoc/KSZ8061-Errata-DS80000688B.pdf
+and worked around with 232ba3a51c ("net: phy: Micrel KSZ8061: link failure after cable connect")
+is back again without this soft reset.
+
+Fixes: 6e2d85ec0559 ("net: phy: Stop with excessive soft reset")
+Tested-by: Karim Ben Houcine <karim.benhoucine@landisgyr.com>
+Signed-off-by: Mathieu Othacehe <othacehe@gnu.org>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/micrel.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index 2cbb1d1830bbd..98c6d0caf8faf 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -3245,6 +3245,7 @@ static struct phy_driver ksphy_driver[] = {
+       /* PHY_BASIC_FEATURES */
+       .probe          = kszphy_probe,
+       .config_init    = ksz8061_config_init,
++      .soft_reset     = genphy_soft_reset,
+       .config_intr    = kszphy_config_intr,
+       .handle_interrupt = kszphy_handle_interrupt,
+       .suspend        = kszphy_suspend,
+-- 
+2.43.0
+
diff --git a/queue-6.1/net-usb-smsc95xx-fix-changing-led_sel-bit-value-upda.patch b/queue-6.1/net-usb-smsc95xx-fix-changing-led_sel-bit-value-upda.patch
new file mode 100644 (file)
index 0000000..c120838
--- /dev/null
@@ -0,0 +1,68 @@
+From 3c5c27f0692a174b973cfc15c1c2a87103533bc0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 14:23:14 +0530
+Subject: net: usb: smsc95xx: fix changing LED_SEL bit value updated from
+ EEPROM
+
+From: Parthiban Veerasooran <Parthiban.Veerasooran@microchip.com>
+
+[ Upstream commit 52a2f0608366a629d43dacd3191039c95fef74ba ]
+
+LED Select (LED_SEL) bit in the LED General Purpose IO Configuration
+register is used to determine the functionality of external LED pins
+(Speed Indicator, Link and Activity Indicator, Full Duplex Link
+Indicator). The default value for this bit is 0 when no EEPROM is
+present. If a EEPROM is present, the default value is the value of the
+LED Select bit in the Configuration Flags of the EEPROM. A USB Reset or
+Lite Reset (LRST) will cause this bit to be restored to the image value
+last loaded from EEPROM, or to be set to 0 if no EEPROM is present.
+
+While configuring the dual purpose GPIO/LED pins to LED outputs in the
+LED General Purpose IO Configuration register, the LED_SEL bit is changed
+as 0 and resulting the configured value from the EEPROM is cleared. The
+issue is fixed by using read-modify-write approach.
+
+Fixes: f293501c61c5 ("smsc95xx: configure LED outputs")
+Signed-off-by: Parthiban Veerasooran <Parthiban.Veerasooran@microchip.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Woojung Huh <woojung.huh@microchip.com>
+Link: https://lore.kernel.org/r/20240523085314.167650-1-Parthiban.Veerasooran@microchip.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/smsc95xx.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index cbea246664795..8e82184be5e7d 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -879,7 +879,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev)
+ static int smsc95xx_reset(struct usbnet *dev)
+ {
+       struct smsc95xx_priv *pdata = dev->driver_priv;
+-      u32 read_buf, write_buf, burst_cap;
++      u32 read_buf, burst_cap;
+       int ret = 0, timeout;
+       netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
+@@ -1003,10 +1003,13 @@ static int smsc95xx_reset(struct usbnet *dev)
+               return ret;
+       netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
++      ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
++      if (ret < 0)
++              return ret;
+       /* Configure GPIO pins as LED outputs */
+-      write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+-              LED_GPIO_CFG_FDX_LED;
+-      ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
++      read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
++                  LED_GPIO_CFG_FDX_LED;
++      ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
+       if (ret < 0)
+               return ret;
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-nfnetlink_queue-acquire-rcu_read_lock-in-i.patch b/queue-6.1/netfilter-nfnetlink_queue-acquire-rcu_read_lock-in-i.patch
new file mode 100644 (file)
index 0000000..1bcd6aa
--- /dev/null
@@ -0,0 +1,79 @@
+From e766ae70b6e803aebb5800fd3785c6c78ca10228 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 May 2024 13:23:39 +0000
+Subject: netfilter: nfnetlink_queue: acquire rcu_read_lock() in
+ instance_destroy_rcu()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit dc21c6cc3d6986d938efbf95de62473982c98dec ]
+
+syzbot reported that nf_reinject() could be called without rcu_read_lock() :
+
+WARNING: suspicious RCU usage
+6.9.0-rc7-syzkaller-02060-g5c1672705a1a #0 Not tainted
+
+net/netfilter/nfnetlink_queue.c:263 suspicious rcu_dereference_check() usage!
+
+other info that might help us debug this:
+
+rcu_scheduler_active = 2, debug_locks = 1
+2 locks held by syz-executor.4/13427:
+  #0: ffffffff8e334f60 (rcu_callback){....}-{0:0}, at: rcu_lock_acquire include/linux/rcupdate.h:329 [inline]
+  #0: ffffffff8e334f60 (rcu_callback){....}-{0:0}, at: rcu_do_batch kernel/rcu/tree.c:2190 [inline]
+  #0: ffffffff8e334f60 (rcu_callback){....}-{0:0}, at: rcu_core+0xa86/0x1830 kernel/rcu/tree.c:2471
+  #1: ffff88801ca92958 (&inst->lock){+.-.}-{2:2}, at: spin_lock_bh include/linux/spinlock.h:356 [inline]
+  #1: ffff88801ca92958 (&inst->lock){+.-.}-{2:2}, at: nfqnl_flush net/netfilter/nfnetlink_queue.c:405 [inline]
+  #1: ffff88801ca92958 (&inst->lock){+.-.}-{2:2}, at: instance_destroy_rcu+0x30/0x220 net/netfilter/nfnetlink_queue.c:172
+
+stack backtrace:
+CPU: 0 PID: 13427 Comm: syz-executor.4 Not tainted 6.9.0-rc7-syzkaller-02060-g5c1672705a1a #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/02/2024
+Call Trace:
+ <IRQ>
+  __dump_stack lib/dump_stack.c:88 [inline]
+  dump_stack_lvl+0x241/0x360 lib/dump_stack.c:114
+  lockdep_rcu_suspicious+0x221/0x340 kernel/locking/lockdep.c:6712
+  nf_reinject net/netfilter/nfnetlink_queue.c:323 [inline]
+  nfqnl_reinject+0x6ec/0x1120 net/netfilter/nfnetlink_queue.c:397
+  nfqnl_flush net/netfilter/nfnetlink_queue.c:410 [inline]
+  instance_destroy_rcu+0x1ae/0x220 net/netfilter/nfnetlink_queue.c:172
+  rcu_do_batch kernel/rcu/tree.c:2196 [inline]
+  rcu_core+0xafd/0x1830 kernel/rcu/tree.c:2471
+  handle_softirqs+0x2d6/0x990 kernel/softirq.c:554
+  __do_softirq kernel/softirq.c:588 [inline]
+  invoke_softirq kernel/softirq.c:428 [inline]
+  __irq_exit_rcu+0xf4/0x1c0 kernel/softirq.c:637
+  irq_exit_rcu+0x9/0x30 kernel/softirq.c:649
+  instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1043 [inline]
+  sysvec_apic_timer_interrupt+0xa6/0xc0 arch/x86/kernel/apic/apic.c:1043
+ </IRQ>
+ <TASK>
+
+Fixes: 9872bec773c2 ("[NETFILTER]: nfnetlink: use RCU for queue instances hash")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nfnetlink_queue.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index 87a9009d5234d..5bc342cb13767 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -167,7 +167,9 @@ instance_destroy_rcu(struct rcu_head *head)
+       struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
+                                                  rcu);
++      rcu_read_lock();
+       nfqnl_flush(inst, NULL, 0);
++      rcu_read_unlock();
+       kfree(inst);
+       module_put(THIS_MODULE);
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-nft_fib-allow-from-forward-input-without-i.patch b/queue-6.1/netfilter-nft_fib-allow-from-forward-input-without-i.patch
new file mode 100644 (file)
index 0000000..c58284d
--- /dev/null
@@ -0,0 +1,45 @@
+From 8565f546b907e08ec12b976a3fe5fc08b629b743 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 10:25:05 -0400
+Subject: netfilter: nft_fib: allow from forward/input without iif selector
+
+From: Eric Garver <eric@garver.life>
+
+[ Upstream commit e8ded22ef0f4831279c363c264cd41cd9d59ca9e ]
+
+This removes the restriction of needing iif selector in the
+forward/input hooks for fib lookups when requested result is
+oif/oifname.
+
+Removing this restriction allows "loose" lookups from the forward hooks.
+
+Fixes: be8be04e5ddb ("netfilter: nft_fib: reverse path filter for policy-based routing on iif")
+Signed-off-by: Eric Garver <eric@garver.life>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_fib.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/net/netfilter/nft_fib.c b/net/netfilter/nft_fib.c
+index 5748415f74d0b..0f17ace972276 100644
+--- a/net/netfilter/nft_fib.c
++++ b/net/netfilter/nft_fib.c
+@@ -34,11 +34,9 @@ int nft_fib_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
+       switch (priv->result) {
+       case NFT_FIB_RESULT_OIF:
+       case NFT_FIB_RESULT_OIFNAME:
+-              hooks = (1 << NF_INET_PRE_ROUTING);
+-              if (priv->flags & NFTA_FIB_F_IIF) {
+-                      hooks |= (1 << NF_INET_LOCAL_IN) |
+-                               (1 << NF_INET_FORWARD);
+-              }
++              hooks = (1 << NF_INET_PRE_ROUTING) |
++                      (1 << NF_INET_LOCAL_IN) |
++                      (1 << NF_INET_FORWARD);
+               break;
+       case NFT_FIB_RESULT_ADDRTYPE:
+               if (priv->flags & NFTA_FIB_F_IIF)
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-nft_payload-move-struct-nft_payload_set-de.patch b/queue-6.1/netfilter-nft_payload-move-struct-nft_payload_set-de.patch
new file mode 100644 (file)
index 0000000..af72323
--- /dev/null
@@ -0,0 +1,66 @@
+From 8829dd1ff01edbbdad75b7106f905f7eb1f4b318 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Sep 2022 23:55:06 +0200
+Subject: netfilter: nft_payload: move struct nft_payload_set definition where
+ it belongs
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit ac1f8c049319847b1b4c6b387fdb2e3f7fb84ffc ]
+
+Not required to expose this header in nf_tables_core.h, move it to where
+it is used, ie. nft_payload.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 33c563ebf8d3 ("netfilter: nft_payload: skbuff vlan metadata mangle support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables_core.h | 10 ----------
+ net/netfilter/nft_payload.c            | 10 ++++++++++
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 1223af68cd9a4..990c3767a3509 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -66,16 +66,6 @@ struct nft_payload {
+       u8                      dreg;
+ };
+-struct nft_payload_set {
+-      enum nft_payload_bases  base:8;
+-      u8                      offset;
+-      u8                      len;
+-      u8                      sreg;
+-      u8                      csum_type;
+-      u8                      csum_offset;
+-      u8                      csum_flags;
+-};
+-
+ extern const struct nft_expr_ops nft_payload_fast_ops;
+ extern const struct nft_expr_ops nft_bitwise_fast_ops;
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 70d8d87848fc0..e36627a654244 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -656,6 +656,16 @@ static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
+       return 0;
+ }
++struct nft_payload_set {
++      enum nft_payload_bases  base:8;
++      u8                      offset;
++      u8                      len;
++      u8                      sreg;
++      u8                      csum_type;
++      u8                      csum_offset;
++      u8                      csum_flags;
++};
++
+ static void nft_payload_set_eval(const struct nft_expr *expr,
+                                struct nft_regs *regs,
+                                const struct nft_pktinfo *pkt)
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-nft_payload-rebuild-vlan-header-on-h_proto.patch b/queue-6.1/netfilter-nft_payload-rebuild-vlan-header-on-h_proto.patch
new file mode 100644 (file)
index 0000000..d6f7c1c
--- /dev/null
@@ -0,0 +1,66 @@
+From 5be3888e02f1c3ac12426e52783868e689e10a91 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Sep 2023 10:42:10 +0200
+Subject: netfilter: nft_payload: rebuild vlan header on h_proto access
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit af84f9e447a65b4b9f79e7e5d69e19039b431c56 ]
+
+nft can perform merging of adjacent payload requests.
+This means that:
+
+ether saddr 00:11 ... ether type 8021ad ...
+
+is a single payload expression, for 8 bytes, starting at the
+ethernet source offset.
+
+Check that offset+length is fully within the source/destination mac
+addersses.
+
+This bug prevents 'ether type' from matching the correct h_proto in case
+vlan tag got stripped.
+
+Fixes: de6843be3082 ("netfilter: nft_payload: rebuild vlan header when needed")
+Reported-by: David Ward <david.ward@ll.mit.edu>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Stable-dep-of: 33c563ebf8d3 ("netfilter: nft_payload: skbuff vlan metadata mangle support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 74777a687eb5f..eaa629c6d7da6 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -109,6 +109,17 @@ static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+       return pkt->inneroff;
+ }
++static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
++{
++      unsigned int len = priv->offset + priv->len;
++
++      /* data past ether src/dst requested, copy needed */
++      if (len > offsetof(struct ethhdr, h_proto))
++              return true;
++
++      return false;
++}
++
+ void nft_payload_eval(const struct nft_expr *expr,
+                     struct nft_regs *regs,
+                     const struct nft_pktinfo *pkt)
+@@ -127,7 +138,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+                       goto err;
+               if (skb_vlan_tag_present(skb) &&
+-                  priv->offset >= offsetof(struct ethhdr, h_proto)) {
++                  nft_payload_need_vlan_copy(priv)) {
+                       if (!nft_payload_copy_vlan(dest, skb,
+                                                  priv->offset, priv->len))
+                               goto err;
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-nft_payload-rebuild-vlan-header-when-neede.patch b/queue-6.1/netfilter-nft_payload-rebuild-vlan-header-when-neede.patch
new file mode 100644 (file)
index 0000000..b34e99b
--- /dev/null
@@ -0,0 +1,36 @@
+From 1c770538323d7502d9d52e9d18d8febdd104af19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 09:38:42 +0200
+Subject: netfilter: nft_payload: rebuild vlan header when needed
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit de6843be3082d416eaf2a00b72dad95c784ca980 ]
+
+Skip rebuilding the vlan header when accessing destination and source
+mac address.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 33c563ebf8d3 ("netfilter: nft_payload: skbuff vlan metadata mangle support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index e36627a654244..74777a687eb5f 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -126,7 +126,8 @@ void nft_payload_eval(const struct nft_expr *expr,
+               if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
+                       goto err;
+-              if (skb_vlan_tag_present(skb)) {
++              if (skb_vlan_tag_present(skb) &&
++                  priv->offset >= offsetof(struct ethhdr, h_proto)) {
+                       if (!nft_payload_copy_vlan(dest, skb,
+                                                  priv->offset, priv->len))
+                               goto err;
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-nft_payload-restore-vlan-q-in-q-match-supp.patch b/queue-6.1/netfilter-nft_payload-restore-vlan-q-in-q-match-supp.patch
new file mode 100644 (file)
index 0000000..66db319
--- /dev/null
@@ -0,0 +1,74 @@
+From f4e2e2604d2ce5569ab53bfa3c78a80bafacda6d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 May 2024 23:02:24 +0200
+Subject: netfilter: nft_payload: restore vlan q-in-q match support
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit aff5c01fa1284d606f8e7cbdaafeef2511bb46c1 ]
+
+Revert f6ae9f120dad ("netfilter: nft_payload: add C-VLAN support").
+
+f41f72d09ee1 ("netfilter: nft_payload: simplify vlan header handling")
+already allows to match on inner vlan tags by subtract the vlan header
+size to the payload offset which has been popped and stored in skbuff
+metadata fields.
+
+Fixes: f6ae9f120dad ("netfilter: nft_payload: add C-VLAN support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 23 +++++++----------------
+ 1 file changed, 7 insertions(+), 16 deletions(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index f44f2eaf32172..70d8d87848fc0 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -44,36 +44,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+       int mac_off = skb_mac_header(skb) - skb->data;
+       u8 *vlanh, *dst_u8 = (u8 *) d;
+       struct vlan_ethhdr veth;
+-      u8 vlan_hlen = 0;
+-
+-      if ((skb->protocol == htons(ETH_P_8021AD) ||
+-           skb->protocol == htons(ETH_P_8021Q)) &&
+-          offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
+-              vlan_hlen += VLAN_HLEN;
+       vlanh = (u8 *) &veth;
+-      if (offset < VLAN_ETH_HLEN + vlan_hlen) {
++      if (offset < VLAN_ETH_HLEN) {
+               u8 ethlen = len;
+-              if (vlan_hlen &&
+-                  skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
+-                      return false;
+-              else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
++              if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+                       return false;
+-              if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+-                      ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
++              if (offset + len > VLAN_ETH_HLEN)
++                      ethlen -= offset + len - VLAN_ETH_HLEN;
+-              memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
++              memcpy(dst_u8, vlanh + offset, ethlen);
+               len -= ethlen;
+               if (len == 0)
+                       return true;
+               dst_u8 += ethlen;
+-              offset = ETH_HLEN + vlan_hlen;
++              offset = ETH_HLEN;
+       } else {
+-              offset -= VLAN_HLEN + vlan_hlen;
++              offset -= VLAN_HLEN;
+       }
+       return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-nft_payload-skbuff-vlan-metadata-mangle-su.patch b/queue-6.1/netfilter-nft_payload-skbuff-vlan-metadata-mangle-su.patch
new file mode 100644 (file)
index 0000000..b1616f7
--- /dev/null
@@ -0,0 +1,147 @@
+From fcee422f065cdd7c76168ee5902634be2b3b5733 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 May 2024 22:50:34 +0200
+Subject: netfilter: nft_payload: skbuff vlan metadata mangle support
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 33c563ebf8d3deed7d8addd20d77398ac737ef9a ]
+
+Userspace assumes vlan header is present at a given offset, but vlan
+offload allows to store this in metadata fields of the skbuff. Hence
+mangling vlan results in a garbled packet. Handle this transparently by
+adding a parser to the kernel.
+
+If vlan metadata is present and payload offset is over 12 bytes (source
+and destination mac address fields), then subtract vlan header present
+in vlan metadata, otherwise mangle vlan metadata based on offset and
+length, extracting data from the source register.
+
+This is similar to:
+
+  8cfd23e67401 ("netfilter: nft_payload: work around vlan header stripping")
+
+to deal with vlan payload mangling.
+
+Fixes: 7ec3f7b47b8d ("netfilter: nft_payload: add packet mangling support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 72 +++++++++++++++++++++++++++++++++----
+ 1 file changed, 65 insertions(+), 7 deletions(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index eaa629c6d7da6..1b001dd2bc9ad 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -109,12 +109,12 @@ static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+       return pkt->inneroff;
+ }
+-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
++static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
+ {
+-      unsigned int len = priv->offset + priv->len;
++      unsigned int boundary = offset + len;
+       /* data past ether src/dst requested, copy needed */
+-      if (len > offsetof(struct ethhdr, h_proto))
++      if (boundary > offsetof(struct ethhdr, h_proto))
+               return true;
+       return false;
+@@ -138,7 +138,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+                       goto err;
+               if (skb_vlan_tag_present(skb) &&
+-                  nft_payload_need_vlan_copy(priv)) {
++                  nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+                       if (!nft_payload_copy_vlan(dest, skb,
+                                                  priv->offset, priv->len))
+                               goto err;
+@@ -678,21 +678,79 @@ struct nft_payload_set {
+       u8                      csum_flags;
+ };
++/* This is not struct vlan_hdr. */
++struct nft_payload_vlan_hdr {
++      __be16                  h_vlan_proto;
++      __be16                  h_vlan_TCI;
++};
++
++static bool
++nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
++                   int *vlan_hlen)
++{
++      struct nft_payload_vlan_hdr *vlanh;
++      __be16 vlan_proto;
++      u16 vlan_tci;
++
++      if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
++              *vlan_hlen = VLAN_HLEN;
++              return true;
++      }
++
++      switch (offset) {
++      case offsetof(struct vlan_ethhdr, h_vlan_proto):
++              if (len == 2) {
++                      vlan_proto = nft_reg_load_be16(src);
++                      skb->vlan_proto = vlan_proto;
++              } else if (len == 4) {
++                      vlanh = (struct nft_payload_vlan_hdr *)src;
++                      __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
++                                             ntohs(vlanh->h_vlan_TCI));
++              } else {
++                      return false;
++              }
++              break;
++      case offsetof(struct vlan_ethhdr, h_vlan_TCI):
++              if (len != 2)
++                      return false;
++
++              vlan_tci = ntohs(nft_reg_load_be16(src));
++              skb->vlan_tci = vlan_tci;
++              break;
++      default:
++              return false;
++      }
++
++      return true;
++}
++
+ static void nft_payload_set_eval(const struct nft_expr *expr,
+                                struct nft_regs *regs,
+                                const struct nft_pktinfo *pkt)
+ {
+       const struct nft_payload_set *priv = nft_expr_priv(expr);
+-      struct sk_buff *skb = pkt->skb;
+       const u32 *src = &regs->data[priv->sreg];
+-      int offset, csum_offset;
++      int offset, csum_offset, vlan_hlen = 0;
++      struct sk_buff *skb = pkt->skb;
+       __wsum fsum, tsum;
+       switch (priv->base) {
+       case NFT_PAYLOAD_LL_HEADER:
+               if (!skb_mac_header_was_set(skb))
+                       goto err;
+-              offset = skb_mac_header(skb) - skb->data;
++
++              if (skb_vlan_tag_present(skb) &&
++                  nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
++                      if (!nft_payload_set_vlan(src, skb,
++                                                priv->offset, priv->len,
++                                                &vlan_hlen))
++                              goto err;
++
++                      if (!vlan_hlen)
++                              return;
++              }
++
++              offset = skb_mac_header(skb) - skb->data - vlan_hlen;
+               break;
+       case NFT_PAYLOAD_NETWORK_HEADER:
+               offset = skb_network_offset(skb);
+-- 
+2.43.0
+
diff --git a/queue-6.1/netfilter-tproxy-bail-out-if-ip-has-been-disabled-on.patch b/queue-6.1/netfilter-tproxy-bail-out-if-ip-has-been-disabled-on.patch
new file mode 100644 (file)
index 0000000..a7dda5c
--- /dev/null
@@ -0,0 +1,45 @@
+From d6aef109a4e48a71e890ccc60d1d427c8f36ae2a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 May 2024 12:27:15 +0200
+Subject: netfilter: tproxy: bail out if IP has been disabled on the device
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 21a673bddc8fd4873c370caf9ae70ffc6d47e8d3 ]
+
+syzbot reports:
+general protection fault, probably for non-canonical address 0xdffffc0000000003: 0000 [#1] PREEMPT SMP KASAN PTI
+KASAN: null-ptr-deref in range [0x0000000000000018-0x000000000000001f]
+[..]
+RIP: 0010:nf_tproxy_laddr4+0xb7/0x340 net/ipv4/netfilter/nf_tproxy_ipv4.c:62
+Call Trace:
+ nft_tproxy_eval_v4 net/netfilter/nft_tproxy.c:56 [inline]
+ nft_tproxy_eval+0xa9a/0x1a00 net/netfilter/nft_tproxy.c:168
+
+__in_dev_get_rcu() can return NULL, so check for this.
+
+Reported-and-tested-by: syzbot+b94a6818504ea90d7661@syzkaller.appspotmail.com
+Fixes: cc6eb4338569 ("tproxy: use the interface primary IP address as a default value for --on-ip")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_tproxy_ipv4.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+index 69e3317996043..73e66a088e25e 100644
+--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
+       laddr = 0;
+       indev = __in_dev_get_rcu(skb->dev);
++      if (!indev)
++              return daddr;
+       in_dev_for_each_ifa_rcu(ifa, indev) {
+               if (ifa->ifa_flags & IFA_F_SECONDARY)
+-- 
+2.43.0
+
diff --git a/queue-6.1/nvmet-fix-ns-enable-disable-possible-hang.patch b/queue-6.1/nvmet-fix-ns-enable-disable-possible-hang.patch
new file mode 100644 (file)
index 0000000..98c0a95
--- /dev/null
@@ -0,0 +1,59 @@
+From c7217d8f2b52426c5fe515ebcb9ae973dc215af2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 23:20:28 +0300
+Subject: nvmet: fix ns enable/disable possible hang
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit f97914e35fd98b2b18fb8a092e0a0799f73afdfe ]
+
+When disabling an nvmet namespace, there is a period where the
+subsys->lock is released, as the ns disable waits for backend IO to
+complete, and the ns percpu ref to be properly killed. The original
+intent was to avoid taking the subsystem lock for a prolong period as
+other processes may need to acquire it (for example new incoming
+connections).
+
+However, it opens up a window where another process may come in and
+enable the ns, (re)intiailizing the ns percpu_ref, causing the disable
+sequence to hang.
+
+Solve this by taking the global nvmet_config_sem over the entire configfs
+enable/disable sequence.
+
+Fixes: a07b4970f464 ("nvmet: add a generic NVMe target")
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/configfs.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 40c1c3db5d7cd..2e87718aa194d 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -537,10 +537,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
+       if (strtobool(page, &enable))
+               return -EINVAL;
++      /*
++       * take a global nvmet_config_sem because the disable routine has a
++       * window where it releases the subsys-lock, giving a chance to
++       * a parallel enable to concurrently execute causing the disable to
++       * have a misaccounting of the ns percpu_ref.
++       */
++      down_write(&nvmet_config_sem);
+       if (enable)
+               ret = nvmet_ns_enable(ns);
+       else
+               nvmet_ns_disable(ns);
++      up_write(&nvmet_config_sem);
+       return ret ? ret : count;
+ }
+-- 
+2.43.0
+
diff --git a/queue-6.1/powerpc-pseries-lparcfg-drop-error-message-from-gues.patch b/queue-6.1/powerpc-pseries-lparcfg-drop-error-message-from-gues.patch
new file mode 100644 (file)
index 0000000..8748b4a
--- /dev/null
@@ -0,0 +1,41 @@
+From 91c9c207d22615d6beeff4b35930e81cc4d315d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 May 2024 14:29:54 -0500
+Subject: powerpc/pseries/lparcfg: drop error message from guest name lookup
+
+From: Nathan Lynch <nathanl@linux.ibm.com>
+
+[ Upstream commit 12870ae3818e39ea65bf710f645972277b634f72 ]
+
+It's not an error or exceptional situation when the hosting
+environment does not expose a name for the LP/guest via RTAS or the
+device tree. This happens with qemu when run without the '-name'
+option. The message also lacks a newline. Remove it.
+
+Signed-off-by: Nathan Lynch <nathanl@linux.ibm.com>
+Fixes: eddaa9a40275 ("powerpc/pseries: read the lpar name from the firmware")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240524-lparcfg-updates-v2-1-62e2e9d28724@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/platforms/pseries/lparcfg.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
+index ca10a3682c46e..a0364028f5ef1 100644
+--- a/arch/powerpc/platforms/pseries/lparcfg.c
++++ b/arch/powerpc/platforms/pseries/lparcfg.c
+@@ -393,8 +393,8 @@ static int read_dt_lpar_name(struct seq_file *m)
+ static void read_lpar_name(struct seq_file *m)
+ {
+-      if (read_rtas_lpar_name(m) && read_dt_lpar_name(m))
+-              pr_err_once("Error can't get the LPAR name");
++      if (read_rtas_lpar_name(m))
++              read_dt_lpar_name(m);
+ }
+ #define SPLPAR_CHARACTERISTICS_TOKEN 20
+-- 
+2.43.0
+
diff --git a/queue-6.1/powerpc-uaccess-use-yz-asm-constraint-for-ld.patch b/queue-6.1/powerpc-uaccess-use-yz-asm-constraint-for-ld.patch
new file mode 100644 (file)
index 0000000..4e2d583
--- /dev/null
@@ -0,0 +1,64 @@
+From 974321b78f1a28f4776f6bbdf5e9609453e11d0b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 22:30:29 +1000
+Subject: powerpc/uaccess: Use YZ asm constraint for ld
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 50934945d54238d2d6d8db4b7c1d4c90d2696c57 ]
+
+The 'ld' instruction requires a 4-byte aligned displacement because it
+is a DS-form instruction. But the "m" asm constraint doesn't enforce
+that.
+
+Add a special case of __get_user_asm2_goto() so that the "YZ" constraint
+can be used for "ld".
+
+The "Z" constraint is documented in the GCC manual PowerPC machine
+constraints, and specifies a "memory operand accessed with indexed or
+indirect addressing". "Y" is not documented in the manual but specifies
+a "memory operand for a DS-form instruction". Using both allows the
+compiler to generate a DS-form "ld" or X-form "ldx" as appropriate.
+
+The change has to be conditional on CONFIG_PPC_KERNEL_PREFIXED because
+the "Y" constraint does not guarantee 4-byte alignment when prefixed
+instructions are enabled.
+
+No build errors have been reported due to this, but the possibility is
+there depending on compiler code generation decisions.
+
+Fixes: c20beffeec3c ("powerpc/uaccess: Use flexible addressing with __put_user()/__get_user()")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240529123029.146953-2-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/uaccess.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index 45d4c9cf3f3a2..60eead5d720a3 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -141,8 +141,19 @@ do {                                                              \
+               : label)
+ #ifdef __powerpc64__
++#ifdef CONFIG_PPC_KERNEL_PREFIXED
+ #define __get_user_asm2_goto(x, addr, label)                  \
+       __get_user_asm_goto(x, addr, label, "ld")
++#else
++#define __get_user_asm2_goto(x, addr, label)                  \
++      asm_goto_output(                                        \
++              "1:     ld%U1%X1 %0, %1 # get_user\n"           \
++              EX_TABLE(1b, %l2)                               \
++              : "=r" (x)                                      \
++              : DS_FORM_CONSTRAINT (*addr)                    \
++              :                                               \
++              : label)
++#endif // CONFIG_PPC_KERNEL_PREFIXED
+ #else /* __powerpc64__ */
+ #define __get_user_asm2_goto(x, addr, label)                  \
+       asm_goto_output(                                        \
+-- 
+2.43.0
+
diff --git a/queue-6.1/riscv-prevent-pt_regs-corruption-for-secondary-idle-.patch b/queue-6.1/riscv-prevent-pt_regs-corruption-for-secondary-idle-.patch
new file mode 100644 (file)
index 0000000..796ab99
--- /dev/null
@@ -0,0 +1,63 @@
+From 98ccbf5a2e96b66c8fcbb1b7a1ff68992262f00e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 11:43:23 +0300
+Subject: riscv: prevent pt_regs corruption for secondary idle threads
+
+From: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
+
+[ Upstream commit a638b0461b58aa3205cd9d5f14d6f703d795b4af ]
+
+Top of the kernel thread stack should be reserved for pt_regs. However
+this is not the case for the idle threads of the secondary boot harts.
+Their stacks overlap with their pt_regs, so both may get corrupted.
+
+Similar issue has been fixed for the primary hart, see c7cdd96eca28
+("riscv: prevent stack corruption by reserving task_pt_regs(p) early").
+However that fix was not propagated to the secondary harts. The problem
+has been noticed in some CPU hotplug tests with V enabled. The function
+smp_callin stored several registers on stack, corrupting top of pt_regs
+structure including status field. As a result, kernel attempted to save
+or restore inexistent V context.
+
+Fixes: 9a2451f18663 ("RISC-V: Avoid using per cpu array for ordered booting")
+Fixes: 2875fe056156 ("RISC-V: Add cpu_ops and modify default booting method")
+Signed-off-by: Sergey Matyukevich <sergey.matyukevich@syntacore.com>
+Reviewed-by: Alexandre Ghiti <alexghiti@rivosinc.com>
+Link: https://lore.kernel.org/r/20240523084327.2013211-1-geomatsi@gmail.com
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/cpu_ops_sbi.c      | 2 +-
+ arch/riscv/kernel/cpu_ops_spinwait.c | 3 +--
+ 2 files changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
+index efa0f0816634c..93cbc38d18057 100644
+--- a/arch/riscv/kernel/cpu_ops_sbi.c
++++ b/arch/riscv/kernel/cpu_ops_sbi.c
+@@ -72,7 +72,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+       /* Make sure tidle is updated */
+       smp_mb();
+       bdata->task_ptr = tidle;
+-      bdata->stack_ptr = task_stack_page(tidle) + THREAD_SIZE;
++      bdata->stack_ptr = task_pt_regs(tidle);
+       /* Make sure boot data is updated */
+       smp_mb();
+       hsm_data = __pa(bdata);
+diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
+index d98d19226b5f5..691e0c5366d2b 100644
+--- a/arch/riscv/kernel/cpu_ops_spinwait.c
++++ b/arch/riscv/kernel/cpu_ops_spinwait.c
+@@ -34,8 +34,7 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
+       /* Make sure tidle is updated */
+       smp_mb();
+-      WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid],
+-                 task_stack_page(tidle) + THREAD_SIZE);
++      WRITE_ONCE(__cpu_spinwait_stack_pointer[hartid], task_pt_regs(tidle));
+       WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
+ }
+-- 
+2.43.0
+
index 526cbaec16d76fb99e59f52c0a7126fdb2c2c486..0967af74bf78bc50436f8b82b8fa7be7ccd8302b 100644 (file)
@@ -425,3 +425,45 @@ nfc-nci-fix-kcov-check-in-nci_rx_work.patch
 nfc-nci-fix-handling-of-zero-length-payload-packets-.patch
 drivers-xen-improve-the-late-xenstore-init-protocol.patch
 ice-interpret-.set_channels-input-differently.patch
+netfilter-nfnetlink_queue-acquire-rcu_read_lock-in-i.patch
+netfilter-nft_payload-restore-vlan-q-in-q-match-supp.patch
+spi-don-t-mark-message-dma-mapped-when-no-transfer-i.patch
+dma-mapping-benchmark-fix-node-id-validation.patch
+dma-mapping-benchmark-handle-numa_no_node-correctly.patch
+nvmet-fix-ns-enable-disable-possible-hang.patch
+net-phy-micrel-set-soft_reset-callback-to-genphy_sof.patch
+net-mlx5-lag-do-bond-only-if-slaves-agree-on-roce-st.patch
+net-mlx5e-fix-ipsec-tunnel-mode-offload-feature-chec.patch
+net-mlx5e-use-rx_missed_errors-instead-of-rx_dropped.patch
+net-mlx5e-fix-udp-gso-for-encapsulated-packets.patch
+dma-buf-sw-sync-don-t-enable-irq-from-sync_print_obj.patch
+bpf-fix-potential-integer-overflow-in-resolve_btfids.patch
+alsa-jack-use-guard-for-locking.patch
+alsa-core-remove-debugfs-at-disconnection.patch
+alsa-hda-realtek-add-quirk-for-asus-rog-g634z.patch
+alsa-hda-realtek-amend-g634-quirk-to-enable-rear-spe.patch
+alsa-hda-realtek-adjust-g814jzr-to-use-spi-init-for-.patch
+enic-validate-length-of-nl-attributes-in-enic_set_vf.patch
+af_unix-read-sk-sk_hash-under-bindlock-during-bind.patch
+net-usb-smsc95xx-fix-changing-led_sel-bit-value-upda.patch
+bpf-allow-delete-from-sockmap-sockhash-only-if-updat.patch
+net-fec-add-fec_enet_deinit.patch
+ice-fix-accounting-if-a-vlan-already-exists.patch
+netfilter-nft_payload-move-struct-nft_payload_set-de.patch
+netfilter-nft_payload-rebuild-vlan-header-when-neede.patch
+netfilter-nft_payload-rebuild-vlan-header-on-h_proto.patch
+netfilter-nft_payload-skbuff-vlan-metadata-mangle-su.patch
+netfilter-tproxy-bail-out-if-ip-has-been-disabled-on.patch
+netfilter-nft_fib-allow-from-forward-input-without-i.patch
+kconfig-fix-comparison-to-constant-symbols-m-n.patch
+drm-i915-guc-avoid-field_prep-warning.patch
+spi-stm32-don-t-warn-about-spurious-interrupts.patch
+net-dsa-microchip-fix-rgmii-error-in-ksz-dsa-driver.patch
+net-ena-add-dynamic-recycling-mechanism-for-rx-buffe.patch
+net-ena-reduce-lines-with-longer-column-width-bounda.patch
+net-ena-fix-redundant-device-numa-node-override.patch
+ipvlan-dont-use-skb-sk-in-ipvlan_process_v-4-6-_outb.patch
+powerpc-pseries-lparcfg-drop-error-message-from-gues.patch
+powerpc-uaccess-use-yz-asm-constraint-for-ld.patch
+hwmon-shtc1-fix-property-misspelling.patch
+riscv-prevent-pt_regs-corruption-for-secondary-idle-.patch
diff --git a/queue-6.1/spi-don-t-mark-message-dma-mapped-when-no-transfer-i.patch b/queue-6.1/spi-don-t-mark-message-dma-mapped-when-no-transfer-i.patch
new file mode 100644 (file)
index 0000000..5c1d499
--- /dev/null
@@ -0,0 +1,48 @@
+From a542f0d88ec4712c0628d12146ba69064f33d781 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 20:09:49 +0300
+Subject: spi: Don't mark message DMA mapped when no transfer in it is
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 9f788ba457b45b0ce422943fcec9fa35c4587764 ]
+
+There is no need to set the DMA mapped flag of the message if it has
+no mapped transfers. Moreover, it may give the code a chance to take
+the wrong paths, i.e. to exercise DMA related APIs on unmapped data.
+Make __spi_map_msg() to bail earlier on the above mentioned cases.
+
+Fixes: 99adef310f68 ("spi: Provide core support for DMA mapping transfers")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://msgid.link/r/20240522171018.3362521-2-andriy.shevchenko@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 1018feff468c9..50fe5aa450f84 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1147,6 +1147,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+       else
+               rx_dev = ctlr->dev.parent;
++      ret = -ENOMSG;
+       list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               /* The sync is done before each transfer. */
+               unsigned long attrs = DMA_ATTR_SKIP_CPU_SYNC;
+@@ -1176,6 +1177,9 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+                       }
+               }
+       }
++      /* No transfer has been mapped, bail out with success */
++      if (ret)
++              return 0;
+       ctlr->cur_rx_dma_dev = rx_dev;
+       ctlr->cur_tx_dma_dev = tx_dev;
+-- 
+2.43.0
+
diff --git a/queue-6.1/spi-stm32-don-t-warn-about-spurious-interrupts.patch b/queue-6.1/spi-stm32-don-t-warn-about-spurious-interrupts.patch
new file mode 100644 (file)
index 0000000..d076a16
--- /dev/null
@@ -0,0 +1,43 @@
+From 790911d23faa282b3c0f8e5907888729f9bcdd39 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 12:52:42 +0200
+Subject: spi: stm32: Don't warn about spurious interrupts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 95d7c452a26564ef0c427f2806761b857106d8c4 ]
+
+The dev_warn to notify about a spurious interrupt was introduced with
+the reasoning that these are unexpected. However spurious interrupts
+tend to trigger continously and the error message on the serial console
+prevents that the core's detection of spurious interrupts kicks in
+(which disables the irq) and just floods the console.
+
+Fixes: c64e7efe46b7 ("spi: stm32: make spurious and overrun interrupts visible")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://msgid.link/r/20240521105241.62400-2-u.kleine-koenig@pengutronix.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-stm32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 12241815510d4..c37d557f7d03c 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -884,7 +884,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+               mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
+       if (!(sr & mask)) {
+-              dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
++              dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+                        sr, ier);
+               spin_unlock_irqrestore(&spi->lock, flags);
+               return IRQ_NONE;
+-- 
+2.43.0
+