--- /dev/null
+From 25962dbf6c6ff498c922c8000ee3604257ebe4f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 May 2024 13:20:07 +0200
+Subject: bpf: Allow delete from sockmap/sockhash only if update is allowed
+
+From: Jakub Sitnicki <jakub@cloudflare.com>
+
+[ Upstream commit 98e948fb60d41447fd8d2d0c3b8637fc6b6dc26d ]
+
+We have seen an influx of syzkaller reports where a BPF program attached to
+a tracepoint triggers a locking rule violation by performing a map_delete
+on a sockmap/sockhash.
+
+We don't intend to support this artificial use scenario. Extend the
+existing verifier allowed-program-type check for updating sockmap/sockhash
+to also cover deleting from a map.
+
+From now on only BPF programs which were previously allowed to update
+sockmap/sockhash can delete from these map types.
+
+Fixes: ff9105993240 ("bpf, sockmap: Prevent lock inversion deadlock in map delete elem")
+Reported-by: Tetsuo Handa <penguin-kernel@i-love.sakura.ne.jp>
+Reported-by: syzbot+ec941d6e24f633a59172@syzkaller.appspotmail.com
+Signed-off-by: Jakub Sitnicki <jakub@cloudflare.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Tested-by: syzbot+ec941d6e24f633a59172@syzkaller.appspotmail.com
+Acked-by: John Fastabend <john.fastabend@gmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=ec941d6e24f633a59172
+Link: https://lore.kernel.org/bpf/20240527-sockmap-verify-deletes-v1-1-944b372f2101@cloudflare.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 94d952967fbf9..07ca1157f97cf 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -5568,7 +5568,8 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ enum bpf_attach_type eatype = env->prog->expected_attach_type;
+ enum bpf_prog_type type = resolve_prog_type(env->prog);
+
+- if (func_id != BPF_FUNC_map_update_elem)
++ if (func_id != BPF_FUNC_map_update_elem &&
++ func_id != BPF_FUNC_map_delete_elem)
+ return false;
+
+ /* It's not possible to get access to a locked struct sock in these
+@@ -5579,6 +5580,11 @@ static bool may_update_sockmap(struct bpf_verifier_env *env, int func_id)
+ if (eatype == BPF_TRACE_ITER)
+ return true;
+ break;
++ case BPF_PROG_TYPE_SOCK_OPS:
++ /* map_update allowed only via dedicated helpers with event type checks */
++ if (func_id == BPF_FUNC_map_delete_elem)
++ return true;
++ break;
+ case BPF_PROG_TYPE_SOCKET_FILTER:
+ case BPF_PROG_TYPE_SCHED_CLS:
+ case BPF_PROG_TYPE_SCHED_ACT:
+@@ -5666,7 +5672,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ case BPF_MAP_TYPE_SOCKMAP:
+ if (func_id != BPF_FUNC_sk_redirect_map &&
+ func_id != BPF_FUNC_sock_map_update &&
+- func_id != BPF_FUNC_map_delete_elem &&
+ func_id != BPF_FUNC_msg_redirect_map &&
+ func_id != BPF_FUNC_sk_select_reuseport &&
+ func_id != BPF_FUNC_map_lookup_elem &&
+@@ -5676,7 +5681,6 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
+ case BPF_MAP_TYPE_SOCKHASH:
+ if (func_id != BPF_FUNC_sk_redirect_hash &&
+ func_id != BPF_FUNC_sock_hash_update &&
+- func_id != BPF_FUNC_map_delete_elem &&
+ func_id != BPF_FUNC_msg_redirect_hash &&
+ func_id != BPF_FUNC_sk_select_reuseport &&
+ func_id != BPF_FUNC_map_lookup_elem &&
+--
+2.43.0
+
--- /dev/null
+From 7dae606a4aca43edd676ee1f91c7864b03b7eda4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 May 2024 09:09:31 +0200
+Subject: bpf: Fix potential integer overflow in resolve_btfids
+
+From: Friedrich Vock <friedrich.vock@gmx.de>
+
+[ Upstream commit 44382b3ed6b2787710c8ade06c0e97f5970a47c8 ]
+
+err is a 32-bit integer, but elf_update returns an off_t, which is 64-bit
+at least on 64-bit platforms. If symbols_patch is called on a binary between
+2-4GB in size, the result will be negative when cast to a 32-bit integer,
+which the code assumes means an error occurred. This can wrongly trigger
+build failures when building very large kernel images.
+
+Fixes: fbbb68de80a4 ("bpf: Add resolve_btfids tool to resolve BTF IDs in ELF object")
+Signed-off-by: Friedrich Vock <friedrich.vock@gmx.de>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Acked-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20240514070931.199694-1-friedrich.vock@gmx.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/bpf/resolve_btfids/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c
+index 45e0d640618ac..55ca620b56918 100644
+--- a/tools/bpf/resolve_btfids/main.c
++++ b/tools/bpf/resolve_btfids/main.c
+@@ -643,7 +643,7 @@ static int sets_patch(struct object *obj)
+
+ static int symbols_patch(struct object *obj)
+ {
+- int err;
++ off_t err;
+
+ if (__symbols_patch(obj, &obj->structs) ||
+ __symbols_patch(obj, &obj->unions) ||
+--
+2.43.0
+
--- /dev/null
+From db9d33fa5317d3a2032b6ef4584b5f7b93161ffa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 5 May 2024 23:08:31 +0900
+Subject: dma-buf/sw-sync: don't enable IRQ from sync_print_obj()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+
+[ Upstream commit b794918961516f667b0c745aebdfebbb8a98df39 ]
+
+Since commit a6aa8fca4d79 ("dma-buf/sw-sync: Reduce irqsave/irqrestore from
+known context") by error replaced spin_unlock_irqrestore() with
+spin_unlock_irq() for both sync_debugfs_show() and sync_print_obj() despite
+sync_print_obj() is called from sync_debugfs_show(), lockdep complains
+inconsistent lock state warning.
+
+Use plain spin_{lock,unlock}() for sync_print_obj(), for
+sync_debugfs_show() is already using spin_{lock,unlock}_irq().
+
+Reported-by: syzbot <syzbot+a225ee3df7e7f9372dbe@syzkaller.appspotmail.com>
+Closes: https://syzkaller.appspot.com/bug?extid=a225ee3df7e7f9372dbe
+Fixes: a6aa8fca4d79 ("dma-buf/sw-sync: Reduce irqsave/irqrestore from known context")
+Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/c2e46020-aaa6-4e06-bf73-f05823f913f0@I-love.SAKURA.ne.jp
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma-buf/sync_debug.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
+index 101394f16930f..237bce21d1e72 100644
+--- a/drivers/dma-buf/sync_debug.c
++++ b/drivers/dma-buf/sync_debug.c
+@@ -110,12 +110,12 @@ static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+
+ seq_printf(s, "%s: %d\n", obj->name, obj->value);
+
+- spin_lock_irq(&obj->lock);
++ spin_lock(&obj->lock); /* Caller already disabled IRQ. */
+ list_for_each(pos, &obj->pt_list) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+ sync_print_fence(s, &pt->base, false);
+ }
+- spin_unlock_irq(&obj->lock);
++ spin_unlock(&obj->lock);
+ }
+
+ static void sync_print_sync_file(struct seq_file *s,
+--
+2.43.0
+
--- /dev/null
+From 28b30a5dd3a9e6d31003035281ce75f3226baca3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 May 2024 14:47:03 +0300
+Subject: dma-mapping: benchmark: fix node id validation
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit 1ff05e723f7ca30644b8ec3fb093f16312e408ad ]
+
+While validating node ids in map_benchmark_ioctl(), node_possible() may
+be provided with invalid argument outside of [0,MAX_NUMNODES-1] range
+leading to:
+
+BUG: KASAN: wild-memory-access in map_benchmark_ioctl (kernel/dma/map_benchmark.c:214)
+Read of size 8 at addr 1fffffff8ccb6398 by task dma_map_benchma/971
+CPU: 7 PID: 971 Comm: dma_map_benchma Not tainted 6.9.0-rc6 #37
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
+Call Trace:
+ <TASK>
+dump_stack_lvl (lib/dump_stack.c:117)
+kasan_report (mm/kasan/report.c:603)
+kasan_check_range (mm/kasan/generic.c:189)
+variable_test_bit (arch/x86/include/asm/bitops.h:227) [inline]
+arch_test_bit (arch/x86/include/asm/bitops.h:239) [inline]
+_test_bit at (include/asm-generic/bitops/instrumented-non-atomic.h:142) [inline]
+node_state (include/linux/nodemask.h:423) [inline]
+map_benchmark_ioctl (kernel/dma/map_benchmark.c:214)
+full_proxy_unlocked_ioctl (fs/debugfs/file.c:333)
+__x64_sys_ioctl (fs/ioctl.c:890)
+do_syscall_64 (arch/x86/entry/common.c:83)
+entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
+
+Compare node ids with sane bounds first. NUMA_NO_NODE is considered a
+special valid case meaning that benchmarking kthreads won't be bound to a
+cpuset of a given node.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 65789daa8087 ("dma-mapping: add benchmark support for streaming DMA APIs")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Reviewed-by: Robin Murphy <robin.murphy@arm.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/map_benchmark.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 9b9af1bd6be31..130feaa8de7fc 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -231,7 +231,8 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
+ }
+
+ if (map->bparam.node != NUMA_NO_NODE &&
+- !node_possible(map->bparam.node)) {
++ (map->bparam.node < 0 || map->bparam.node >= MAX_NUMNODES ||
++ !node_possible(map->bparam.node))) {
+ pr_err("invalid numa node\n");
+ return -EINVAL;
+ }
+--
+2.43.0
+
--- /dev/null
+From b5e3c4ef65d3e5b2a806e3821ba1dbb52b6f37fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 4 May 2024 14:47:04 +0300
+Subject: dma-mapping: benchmark: handle NUMA_NO_NODE correctly
+
+From: Fedor Pchelkin <pchelkin@ispras.ru>
+
+[ Upstream commit e64746e74f717961250a155e14c156616fcd981f ]
+
+cpumask_of_node() can be called for NUMA_NO_NODE inside do_map_benchmark()
+resulting in the following sanitizer report:
+
+UBSAN: array-index-out-of-bounds in ./arch/x86/include/asm/topology.h:72:28
+index -1 is out of range for type 'cpumask [64][1]'
+CPU: 1 PID: 990 Comm: dma_map_benchma Not tainted 6.9.0-rc6 #29
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
+Call Trace:
+ <TASK>
+dump_stack_lvl (lib/dump_stack.c:117)
+ubsan_epilogue (lib/ubsan.c:232)
+__ubsan_handle_out_of_bounds (lib/ubsan.c:429)
+cpumask_of_node (arch/x86/include/asm/topology.h:72) [inline]
+do_map_benchmark (kernel/dma/map_benchmark.c:104)
+map_benchmark_ioctl (kernel/dma/map_benchmark.c:246)
+full_proxy_unlocked_ioctl (fs/debugfs/file.c:333)
+__x64_sys_ioctl (fs/ioctl.c:890)
+do_syscall_64 (arch/x86/entry/common.c:83)
+entry_SYSCALL_64_after_hwframe (arch/x86/entry/entry_64.S:130)
+
+Use cpumask_of_node() in place when binding a kernel thread to a cpuset
+of a particular node.
+
+Note that the provided node id is checked inside map_benchmark_ioctl().
+It's just a NUMA_NO_NODE case which is not handled properly later.
+
+Found by Linux Verification Center (linuxtesting.org).
+
+Fixes: 65789daa8087 ("dma-mapping: add benchmark support for streaming DMA APIs")
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Acked-by: Barry Song <baohua@kernel.org>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/dma/map_benchmark.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/dma/map_benchmark.c b/kernel/dma/map_benchmark.c
+index 130feaa8de7fc..b7f8bb7a1e5c5 100644
+--- a/kernel/dma/map_benchmark.c
++++ b/kernel/dma/map_benchmark.c
+@@ -124,7 +124,6 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ struct task_struct **tsk;
+ int threads = map->bparam.threads;
+ int node = map->bparam.node;
+- const cpumask_t *cpu_mask = cpumask_of_node(node);
+ u64 loops;
+ int ret = 0;
+ int i;
+@@ -145,7 +144,7 @@ static int do_map_benchmark(struct map_benchmark_data *map)
+ }
+
+ if (node != NUMA_NO_NODE)
+- kthread_bind_mask(tsk[i], cpu_mask);
++ kthread_bind_mask(tsk[i], cpumask_of_node(node));
+ }
+
+ /* clear the old value in the previous benchmark */
+--
+2.43.0
+
--- /dev/null
+From 6098ba633d02a5e7777aa32910146f9e7bf892b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 10:30:44 +0300
+Subject: enic: Validate length of nl attributes in enic_set_vf_port
+
+From: Roded Zats <rzats@paloaltonetworks.com>
+
+[ Upstream commit e8021b94b0412c37bcc79027c2e382086b6ce449 ]
+
+enic_set_vf_port assumes that the nl attribute IFLA_PORT_PROFILE
+is of length PORT_PROFILE_MAX and that the nl attributes
+IFLA_PORT_INSTANCE_UUID, IFLA_PORT_HOST_UUID are of length PORT_UUID_MAX.
+These attributes are validated (in the function do_setlink in rtnetlink.c)
+using the nla_policy ifla_port_policy. The policy defines IFLA_PORT_PROFILE
+as NLA_STRING, IFLA_PORT_INSTANCE_UUID as NLA_BINARY and
+IFLA_PORT_HOST_UUID as NLA_STRING. That means that the length validation
+using the policy is for the max size of the attributes and not on exact
+size so the length of these attributes might be less than the sizes that
+enic_set_vf_port expects. This might cause an out of bands
+read access in the memcpys of the data of these
+attributes in enic_set_vf_port.
+
+Fixes: f8bd909183ac ("net: Add ndo_{set|get}_vf_port support for enic dynamic vnics")
+Signed-off-by: Roded Zats <rzats@paloaltonetworks.com>
+Link: https://lore.kernel.org/r/20240522073044.33519-1-rzats@paloaltonetworks.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/cisco/enic/enic_main.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
+index d0a8f7106958b..52bc164a1cfbc 100644
+--- a/drivers/net/ethernet/cisco/enic/enic_main.c
++++ b/drivers/net/ethernet/cisco/enic/enic_main.c
+@@ -1117,18 +1117,30 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
+ pp->request = nla_get_u8(port[IFLA_PORT_REQUEST]);
+
+ if (port[IFLA_PORT_PROFILE]) {
++ if (nla_len(port[IFLA_PORT_PROFILE]) != PORT_PROFILE_MAX) {
++ memcpy(pp, &prev_pp, sizeof(*pp));
++ return -EINVAL;
++ }
+ pp->set |= ENIC_SET_NAME;
+ memcpy(pp->name, nla_data(port[IFLA_PORT_PROFILE]),
+ PORT_PROFILE_MAX);
+ }
+
+ if (port[IFLA_PORT_INSTANCE_UUID]) {
++ if (nla_len(port[IFLA_PORT_INSTANCE_UUID]) != PORT_UUID_MAX) {
++ memcpy(pp, &prev_pp, sizeof(*pp));
++ return -EINVAL;
++ }
+ pp->set |= ENIC_SET_INSTANCE;
+ memcpy(pp->instance_uuid,
+ nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
+ }
+
+ if (port[IFLA_PORT_HOST_UUID]) {
++ if (nla_len(port[IFLA_PORT_HOST_UUID]) != PORT_UUID_MAX) {
++ memcpy(pp, &prev_pp, sizeof(*pp));
++ return -EINVAL;
++ }
+ pp->set |= ENIC_SET_HOST;
+ memcpy(pp->host_uuid,
+ nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
+--
+2.43.0
+
--- /dev/null
+From 6ea34ffe3a6c85d0c1e118f34e2dbee41e5cbcb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 May 2024 08:20:14 -0700
+Subject: hwmon: (shtc1) Fix property misspelling
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit 52a2c70c3ec555e670a34dd1ab958986451d2dd2 ]
+
+The property name is "sensirion,low-precision", not
+"sensicon,low-precision".
+
+Cc: Chris Ruehl <chris.ruehl@gtsys.com.hk>
+Fixes: be7373b60df5 ("hwmon: shtc1: add support for device tree bindings")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/shtc1.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/shtc1.c b/drivers/hwmon/shtc1.c
+index 18546ebc8e9f7..0365643029aee 100644
+--- a/drivers/hwmon/shtc1.c
++++ b/drivers/hwmon/shtc1.c
+@@ -238,7 +238,7 @@ static int shtc1_probe(struct i2c_client *client)
+
+ if (np) {
+ data->setup.blocking_io = of_property_read_bool(np, "sensirion,blocking-io");
+- data->setup.high_precision = !of_property_read_bool(np, "sensicon,low-precision");
++ data->setup.high_precision = !of_property_read_bool(np, "sensirion,low-precision");
+ } else {
+ if (client->dev.platform_data)
+ data->setup = *(struct shtc1_platform_data *)dev->platform_data;
+--
+2.43.0
+
--- /dev/null
+From ff7c1599dd5756ba746aa33cba107d281977bde1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 17:56:33 +0800
+Subject: ipvlan: Dont Use skb->sk in ipvlan_process_v{4,6}_outbound
+
+From: Yue Haibing <yuehaibing@huawei.com>
+
+[ Upstream commit b3dc6e8003b500861fa307e9a3400c52e78e4d3a ]
+
+Raw packet from PF_PACKET socket ontop of an IPv6-backed ipvlan device will
+hit WARN_ON_ONCE() in sk_mc_loop() through sch_direct_xmit() path.
+
+WARNING: CPU: 2 PID: 0 at net/core/sock.c:775 sk_mc_loop+0x2d/0x70
+Modules linked in: sch_netem ipvlan rfkill cirrus drm_shmem_helper sg drm_kms_helper
+CPU: 2 PID: 0 Comm: swapper/2 Kdump: loaded Not tainted 6.9.0+ #279
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
+RIP: 0010:sk_mc_loop+0x2d/0x70
+Code: fa 0f 1f 44 00 00 65 0f b7 15 f7 96 a3 4f 31 c0 66 85 d2 75 26 48 85 ff 74 1c
+RSP: 0018:ffffa9584015cd78 EFLAGS: 00010212
+RAX: 0000000000000011 RBX: ffff91e585793e00 RCX: 0000000002c6a001
+RDX: 0000000000000000 RSI: 0000000000000040 RDI: ffff91e589c0f000
+RBP: ffff91e5855bd100 R08: 0000000000000000 R09: 3d00545216f43d00
+R10: ffff91e584fdcc50 R11: 00000060dd8616f4 R12: ffff91e58132d000
+R13: ffff91e584fdcc68 R14: ffff91e5869ce800 R15: ffff91e589c0f000
+FS: 0000000000000000(0000) GS:ffff91e898100000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f788f7c44c0 CR3: 0000000008e1a000 CR4: 00000000000006f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+<IRQ>
+ ? __warn (kernel/panic.c:693)
+ ? sk_mc_loop (net/core/sock.c:760)
+ ? report_bug (lib/bug.c:201 lib/bug.c:219)
+ ? handle_bug (arch/x86/kernel/traps.c:239)
+ ? exc_invalid_op (arch/x86/kernel/traps.c:260 (discriminator 1))
+ ? asm_exc_invalid_op (./arch/x86/include/asm/idtentry.h:621)
+ ? sk_mc_loop (net/core/sock.c:760)
+ ip6_finish_output2 (net/ipv6/ip6_output.c:83 (discriminator 1))
+ ? nf_hook_slow (net/netfilter/core.c:626)
+ ip6_finish_output (net/ipv6/ip6_output.c:222)
+ ? __pfx_ip6_finish_output (net/ipv6/ip6_output.c:215)
+ ipvlan_xmit_mode_l3 (drivers/net/ipvlan/ipvlan_core.c:602) ipvlan
+ ipvlan_start_xmit (drivers/net/ipvlan/ipvlan_main.c:226) ipvlan
+ dev_hard_start_xmit (net/core/dev.c:3594)
+ sch_direct_xmit (net/sched/sch_generic.c:343)
+ __qdisc_run (net/sched/sch_generic.c:416)
+ net_tx_action (net/core/dev.c:5286)
+ handle_softirqs (kernel/softirq.c:555)
+ __irq_exit_rcu (kernel/softirq.c:589)
+ sysvec_apic_timer_interrupt (arch/x86/kernel/apic/apic.c:1043)
+
+The warning triggers as this:
+packet_sendmsg
+ packet_snd //skb->sk is packet sk
+ __dev_queue_xmit
+ __dev_xmit_skb //q->enqueue is not NULL
+ __qdisc_run
+ sch_direct_xmit
+ dev_hard_start_xmit
+ ipvlan_start_xmit
+ ipvlan_xmit_mode_l3 //l3 mode
+ ipvlan_process_outbound //vepa flag
+ ipvlan_process_v6_outbound
+ ip6_local_out
+ __ip6_finish_output
+ ip6_finish_output2 //multicast packet
+ sk_mc_loop //sk->sk_family is AF_PACKET
+
+Call ip{6}_local_out() with NULL sk in ipvlan as other tunnels to fix this.
+
+Fixes: 2ad7bf363841 ("ipvlan: Initial check-in of the IPVLAN driver.")
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: Yue Haibing <yuehaibing@huawei.com>
+Reviewed-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20240529095633.613103-1-yuehaibing@huawei.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ipvlan/ipvlan_core.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
+index 5aa9217240d53..a18b49db38ee0 100644
+--- a/drivers/net/ipvlan/ipvlan_core.c
++++ b/drivers/net/ipvlan/ipvlan_core.c
+@@ -440,7 +440,7 @@ static noinline_for_stack int ipvlan_process_v4_outbound(struct sk_buff *skb)
+
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+
+- err = ip_local_out(net, skb->sk, skb);
++ err = ip_local_out(net, NULL, skb);
+ if (unlikely(net_xmit_eval(err)))
+ DEV_STATS_INC(dev, tx_errors);
+ else
+@@ -495,7 +495,7 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
+
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+
+- err = ip6_local_out(dev_net(dev), skb->sk, skb);
++ err = ip6_local_out(dev_net(dev), NULL, skb);
+ if (unlikely(net_xmit_eval(err)))
+ DEV_STATS_INC(dev, tx_errors);
+ else
+--
+2.43.0
+
--- /dev/null
+From 3f1046cdeca23886661fd59640d64f17dbee09fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 May 2024 18:22:27 +0900
+Subject: kconfig: fix comparison to constant symbols, 'm', 'n'
+
+From: Masahiro Yamada <masahiroy@kernel.org>
+
+[ Upstream commit aabdc960a283ba78086b0bf66ee74326f49e218e ]
+
+Currently, comparisons to 'm' or 'n' result in incorrect output.
+
+[Test Code]
+
+ config MODULES
+ def_bool y
+ modules
+
+ config A
+ def_tristate m
+
+ config B
+ def_bool A > n
+
+CONFIG_B is unset, while CONFIG_B=y is expected.
+
+The reason for the issue is because Kconfig compares the tristate values
+as strings.
+
+Currently, the .type fields in the constant symbol definitions,
+symbol_{yes,mod,no} are unspecified, i.e., S_UNKNOWN.
+
+When expr_calc_value() evaluates 'A > n', it checks the types of 'A' and
+'n' to determine how to compare them.
+
+The left-hand side, 'A', is a tristate symbol with a value of 'm', which
+corresponds to a numeric value of 1. (Internally, 'y', 'm', and 'n' are
+represented as 2, 1, and 0, respectively.)
+
+The right-hand side, 'n', has an unknown type, so it is treated as the
+string "n" during the comparison.
+
+expr_calc_value() compares two values numerically only when both can
+have numeric values. Otherwise, they are compared as strings.
+
+ symbol numeric value ASCII code
+ -------------------------------------
+ y 2 0x79
+ m 1 0x6d
+ n 0 0x6e
+
+'m' is greater than 'n' if compared numerically (since 1 is greater
+than 0), but smaller than 'n' if compared as strings (since the ASCII
+code 0x6d is smaller than 0x6e).
+
+Specifying .type=S_TRISTATE for symbol_{yes,mod,no} fixes the above
+test code.
+
+Doing so, however, would cause a regression to the following test code.
+
+[Test Code 2]
+
+ config MODULES
+ def_bool n
+ modules
+
+ config A
+ def_tristate n
+
+ config B
+ def_bool A = m
+
+You would get CONFIG_B=y, while CONFIG_B should not be set.
+
+The reason is because sym_get_string_value() turns 'm' into 'n' when the
+module feature is disabled. Consequently, expr_calc_value() evaluates
+'A = n' instead of 'A = m'. This oddity has been hidden because the type
+of 'm' was previously S_UNKNOWN instead of S_TRISTATE.
+
+sym_get_string_value() should not tweak the string because the tristate
+value has already been correctly calculated. There is no reason to
+return the string "n" where its tristate value is mod.
+
+Fixes: 31847b67bec0 ("kconfig: allow use of relations other than (in)equality")
+Signed-off-by: Masahiro Yamada <masahiroy@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ scripts/kconfig/symbol.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
+index 7f8013dcef002..f9786621a178e 100644
+--- a/scripts/kconfig/symbol.c
++++ b/scripts/kconfig/symbol.c
+@@ -13,18 +13,21 @@
+
+ struct symbol symbol_yes = {
+ .name = "y",
++ .type = S_TRISTATE,
+ .curr = { "y", yes },
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+
+ struct symbol symbol_mod = {
+ .name = "m",
++ .type = S_TRISTATE,
+ .curr = { "m", mod },
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+
+ struct symbol symbol_no = {
+ .name = "n",
++ .type = S_TRISTATE,
+ .curr = { "n", no },
+ .flags = SYMBOL_CONST|SYMBOL_VALID,
+ };
+@@ -775,8 +778,7 @@ const char *sym_get_string_value(struct symbol *sym)
+ case no:
+ return "n";
+ case mod:
+- sym_calc_value(modules_sym);
+- return (modules_sym->curr.tri == no) ? "n" : "m";
++ return "m";
+ case yes:
+ return "y";
+ }
+--
+2.43.0
+
--- /dev/null
+From d7fd59b0b6a4ea9e5129ec82b0c66f7091ac6746 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jan 2022 20:23:38 +0000
+Subject: net: ena: Add capabilities field with support for ENI stats
+ capability
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit a2d5d6a70fa5211e071747876fa6a7621c7257fd ]
+
+This bitmask field indicates what capabilities are supported by the
+device.
+
+The capabilities field differs from the 'supported_features' field which
+indicates what sub-commands for the set/get feature commands are
+supported. The sub-commands are specified in the 'feature_id' field of
+the 'ena_admin_set_feat_cmd' struct in the following way:
+
+ struct ena_admin_set_feat_cmd cmd;
+
+ cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+ cmd.feat_common.feature_
+
+The 'capabilities' field, on the other hand, specifies different
+capabilities of the device. For example, whether the device supports
+querying of ENI stats.
+
+Also add an enumerator which contains all the capabilities. The
+first added capability macro is for ENI stats feature.
+
+Capabilities are queried along with the other device attributes (in
+ena_com_get_dev_attr_feat()) during device initialization and are stored
+in the ena_com_dev struct. They can be later queried using the
+ena_com_get_cap() helper function.
+
+Signed-off-by: Shay Agroskin <shayagr@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2dc8b1e7177d ("net: ena: Fix redundant device NUMA node override")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_admin_defs.h | 10 +++++++++-
+ drivers/net/ethernet/amazon/ena/ena_com.c | 8 ++++++++
+ drivers/net/ethernet/amazon/ena/ena_com.h | 13 +++++++++++++
+ 3 files changed, 30 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+index f5ec35fa4c631..466ad9470d1f4 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
++++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+@@ -48,6 +48,11 @@ enum ena_admin_aq_feature_id {
+ ENA_ADMIN_FEATURES_OPCODE_NUM = 32,
+ };
+
++/* device capabilities */
++enum ena_admin_aq_caps_id {
++ ENA_ADMIN_ENI_STATS = 0,
++};
++
+ enum ena_admin_placement_policy_type {
+ /* descriptors and headers are in host memory */
+ ENA_ADMIN_PLACEMENT_POLICY_HOST = 1,
+@@ -455,7 +460,10 @@ struct ena_admin_device_attr_feature_desc {
+ */
+ u32 supported_features;
+
+- u32 reserved3;
++ /* bitmap of ena_admin_aq_caps_id, which represents device
++ * capabilities.
++ */
++ u32 capabilities;
+
+ /* Indicates how many bits are used physical address access. */
+ u32 phys_addr_width;
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index e37c82eb62326..4db689372980e 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -1974,6 +1974,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ sizeof(get_resp.u.dev_attr));
+
+ ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
++ ena_dev->capabilities = get_resp.u.dev_attr.capabilities;
+
+ if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
+ rc = ena_com_get_feature(ena_dev, &get_resp,
+@@ -2226,6 +2227,13 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ struct ena_com_stats_ctx ctx;
+ int ret;
+
++ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
++ netdev_err(ena_dev->net_device,
++ "Capability %d isn't supported\n",
++ ENA_ADMIN_ENI_STATS);
++ return -EOPNOTSUPP;
++ }
++
+ memset(&ctx, 0x0, sizeof(ctx));
+ ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
+ if (likely(ret == 0))
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
+index 73b03ce594129..3c5081d9d25d6 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_com.h
+@@ -314,6 +314,7 @@ struct ena_com_dev {
+
+ struct ena_rss rss;
+ u32 supported_features;
++ u32 capabilities;
+ u32 dma_addr_bits;
+
+ struct ena_host_attribute host_attr;
+@@ -967,6 +968,18 @@ static inline void ena_com_disable_adaptive_moderation(struct ena_com_dev *ena_d
+ ena_dev->adaptive_coalescing = false;
+ }
+
++/* ena_com_get_cap - query whether device supports a capability.
++ * @ena_dev: ENA communication layer struct
++ * @cap_id: enum value representing the capability
++ *
++ * @return - true if capability is supported or false otherwise
++ */
++static inline bool ena_com_get_cap(struct ena_com_dev *ena_dev,
++ enum ena_admin_aq_caps_id cap_id)
++{
++ return !!(ena_dev->capabilities & BIT(cap_id));
++}
++
+ /* ena_com_update_intr_reg - Prepare interrupt register
+ * @intr_reg: interrupt register to update.
+ * @rx_delay_interval: Rx interval in usecs
+--
+2.43.0
+
--- /dev/null
+From faa6119d6a045b5d42c6056a641505f1ba2cc447 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 12 Jun 2023 12:14:48 +0000
+Subject: net: ena: Add dynamic recycling mechanism for rx buffers
+
+From: David Arinzon <darinzon@amazon.com>
+
+[ Upstream commit f7d625adeb7bc6a9ec83d32d9615889969d64484 ]
+
+The current implementation allocates page-sized rx buffers.
+As traffic may consist of different types and sizes of packets,
+in various cases, buffers are not fully used.
+
+This change (Dynamic RX Buffers - DRB) uses part of the allocated rx
+page needed for the incoming packet, and returns the rest of the
+unused page to be used again as an rx buffer for future packets.
+A threshold of 2K for unused space has been set in order to declare
+whether the remainder of the page can be reused again as an rx buffer.
+
+As a page may be reused, dma_sync_single_for_cpu() is added in order
+to sync the memory to the CPU side after it was owned by the HW.
+In addition, when the rx page can no longer be reused, it is being
+unmapped using dma_page_unmap(), which implicitly syncs and then
+unmaps the entire page. In case the kernel still handles the skbs
+pointing to the previous buffers from that rx page, it may access
+garbage pointers, caused by the implicit sync overwriting them.
+The implicit dma sync is removed by replacing dma_page_unmap() with
+dma_unmap_page_attrs() with DMA_ATTR_SKIP_CPU_SYNC flag.
+
+The functionality is disabled for XDP traffic to avoid handling
+several descriptors per packet.
+
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: Shay Agroskin <shayagr@amazon.com>
+Signed-off-by: David Arinzon <darinzon@amazon.com>
+Link: https://lore.kernel.org/r/20230612121448.28829-1-darinzon@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2dc8b1e7177d ("net: ena: Fix redundant device NUMA node override")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../device_drivers/ethernet/amazon/ena.rst | 32 +++++
+ .../net/ethernet/amazon/ena/ena_admin_defs.h | 6 +-
+ drivers/net/ethernet/amazon/ena/ena_netdev.c | 136 ++++++++++++------
+ drivers/net/ethernet/amazon/ena/ena_netdev.h | 4 +
+ 4 files changed, 136 insertions(+), 42 deletions(-)
+
+diff --git a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+index 01b2a69b0cb03..a8a2aa2ae8975 100644
+--- a/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
++++ b/Documentation/networking/device_drivers/ethernet/amazon/ena.rst
+@@ -205,6 +205,7 @@ Adaptive coalescing can be switched on/off through `ethtool(8)`'s
+ More information about Adaptive Interrupt Moderation (DIM) can be found in
+ Documentation/networking/net_dim.rst
+
++.. _`RX copybreak`:
+ RX copybreak
+ ============
+ The rx_copybreak is initialized by default to ENA_DEFAULT_RX_COPYBREAK
+@@ -315,3 +316,34 @@ Rx
+ - The new SKB is updated with the necessary information (protocol,
+ checksum hw verify result, etc), and then passed to the network
+ stack, using the NAPI interface function :code:`napi_gro_receive()`.
++
++Dynamic RX Buffers (DRB)
++------------------------
++
++Each RX descriptor in the RX ring is a single memory page (which is either 4KB
++or 16KB long depending on system's configurations).
++To reduce the memory allocations required when dealing with a high rate of small
++packets, the driver tries to reuse the remaining RX descriptor's space if more
++than 2KB of this page remain unused.
++
++A simple example of this mechanism is the following sequence of events:
++
++::
++
++ 1. Driver allocates page-sized RX buffer and passes it to hardware
++ +----------------------+
++ |4KB RX Buffer |
++ +----------------------+
++
++ 2. A 300Bytes packet is received on this buffer
++
++ 3. The driver increases the ref count on this page and returns it back to
++ HW as an RX buffer of size 4KB - 300Bytes = 3796 Bytes
++ +----+--------------------+
++ |****|3796 Bytes RX Buffer|
++ +----+--------------------+
++
++This mechanism isn't used when an XDP program is loaded, or when the
++RX packet is less than rx_copybreak bytes (in which case the packet is
++copied out of the RX buffer into the linear part of a new skb allocated
++for it and the RX buffer remains the same size, see `RX copybreak`_).
+diff --git a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+index 466ad9470d1f4..6de0d590be34f 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
++++ b/drivers/net/ethernet/amazon/ena/ena_admin_defs.h
+@@ -869,7 +869,9 @@ struct ena_admin_host_info {
+ * 2 : interrupt_moderation
+ * 3 : rx_buf_mirroring
+ * 4 : rss_configurable_function_key
+- * 31:5 : reserved
++ * 5 : reserved
++ * 6 : rx_page_reuse
++ * 31:7 : reserved
+ */
+ u32 driver_supported_features;
+ };
+@@ -1184,6 +1186,8 @@ struct ena_admin_ena_mmio_req_read_less_resp {
+ #define ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK BIT(3)
+ #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_SHIFT 4
+ #define ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK BIT(4)
++#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_SHIFT 6
++#define ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK BIT(6)
+
+ /* aenq_common_desc */
+ #define ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0)
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index e2b43d0f90784..c407fad52aeb3 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1022,7 +1022,7 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ int tailroom;
+
+ /* restore page offset value in case it has been changed by device */
+- rx_info->page_offset = headroom;
++ rx_info->buf_offset = headroom;
+
+ /* if previous allocated page is not used */
+ if (unlikely(rx_info->page))
+@@ -1039,6 +1039,8 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ rx_info->page = page;
++ rx_info->dma_addr = dma;
++ rx_info->page_offset = 0;
+ ena_buf = &rx_info->ena_buf;
+ ena_buf->paddr = dma + headroom;
+ ena_buf->len = ENA_PAGE_SIZE - headroom - tailroom;
+@@ -1046,14 +1048,12 @@ static int ena_alloc_rx_buffer(struct ena_ring *rx_ring,
+ return 0;
+ }
+
+-static void ena_unmap_rx_buff(struct ena_ring *rx_ring,
+- struct ena_rx_buffer *rx_info)
++static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
++ struct ena_rx_buffer *rx_info,
++ unsigned long attrs)
+ {
+- struct ena_com_buf *ena_buf = &rx_info->ena_buf;
+-
+- dma_unmap_page(rx_ring->dev, ena_buf->paddr - rx_ring->rx_headroom,
+- ENA_PAGE_SIZE,
+- DMA_BIDIRECTIONAL);
++ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
++ DMA_BIDIRECTIONAL, attrs);
+ }
+
+ static void ena_free_rx_page(struct ena_ring *rx_ring,
+@@ -1067,7 +1067,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring,
+ return;
+ }
+
+- ena_unmap_rx_buff(rx_ring, rx_info);
++ ena_unmap_rx_buff_attrs(rx_ring, rx_info, 0);
+
+ __free_page(page);
+ rx_info->page = NULL;
+@@ -1413,14 +1413,14 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+ return tx_pkts;
+ }
+
+-static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
++static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag, u16 len)
+ {
+ struct sk_buff *skb;
+
+ if (!first_frag)
+- skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
++ skb = napi_alloc_skb(rx_ring->napi, len);
+ else
+- skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
++ skb = napi_build_skb(first_frag, len);
+
+ if (unlikely(!skb)) {
+ ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
+@@ -1429,24 +1429,47 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
+ netif_dbg(rx_ring->adapter, rx_err, rx_ring->netdev,
+ "Failed to allocate skb. first_frag %s\n",
+ first_frag ? "provided" : "not provided");
+- return NULL;
+ }
+
+ return skb;
+ }
+
++static bool ena_try_rx_buf_page_reuse(struct ena_rx_buffer *rx_info, u16 buf_len,
++ u16 len, int pkt_offset)
++{
++ struct ena_com_buf *ena_buf = &rx_info->ena_buf;
++
++ /* More than ENA_MIN_RX_BUF_SIZE left in the reused buffer
++ * for data + headroom + tailroom.
++ */
++ if (SKB_DATA_ALIGN(len + pkt_offset) + ENA_MIN_RX_BUF_SIZE <= ena_buf->len) {
++ page_ref_inc(rx_info->page);
++ rx_info->page_offset += buf_len;
++ ena_buf->paddr += buf_len;
++ ena_buf->len -= buf_len;
++ return true;
++ }
++
++ return false;
++}
++
+ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ struct ena_com_rx_buf_info *ena_bufs,
+ u32 descs,
+ u16 *next_to_clean)
+ {
++ int tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
++ bool is_xdp_loaded = ena_xdp_present_ring(rx_ring);
+ struct ena_rx_buffer *rx_info;
+ struct ena_adapter *adapter;
++ int page_offset, pkt_offset;
++ dma_addr_t pre_reuse_paddr;
+ u16 len, req_id, buf = 0;
++ bool reuse_rx_buf_page;
+ struct sk_buff *skb;
+- void *page_addr;
+- u32 page_offset;
+- void *data_addr;
++ void *buf_addr;
++ int buf_offset;
++ u16 buf_len;
+
+ len = ena_bufs[buf].len;
+ req_id = ena_bufs[buf].req_id;
+@@ -1466,34 +1489,30 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ "rx_info %p page %p\n",
+ rx_info, rx_info->page);
+
+- /* save virt address of first buffer */
+- page_addr = page_address(rx_info->page);
++ buf_offset = rx_info->buf_offset;
++ pkt_offset = buf_offset - rx_ring->rx_headroom;
+ page_offset = rx_info->page_offset;
+- data_addr = page_addr + page_offset;
+-
+- prefetch(data_addr);
++ buf_addr = page_address(rx_info->page) + page_offset;
+
+ if (len <= rx_ring->rx_copybreak) {
+- skb = ena_alloc_skb(rx_ring, NULL);
++ skb = ena_alloc_skb(rx_ring, NULL, len);
+ if (unlikely(!skb))
+ return NULL;
+
+- netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+- "RX allocated small packet. len %d. data_len %d\n",
+- skb->len, skb->data_len);
+-
+ /* sync this buffer for CPU use */
+ dma_sync_single_for_cpu(rx_ring->dev,
+- dma_unmap_addr(&rx_info->ena_buf, paddr),
++ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+ len,
+ DMA_FROM_DEVICE);
+- skb_copy_to_linear_data(skb, data_addr, len);
++ skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
+ dma_sync_single_for_device(rx_ring->dev,
+- dma_unmap_addr(&rx_info->ena_buf, paddr),
++ dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
+ len,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
++ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
++ "RX allocated small packet. len %d.\n", skb->len);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+ rx_ring->free_ids[*next_to_clean] = req_id;
+ *next_to_clean = ENA_RX_RING_IDX_ADD(*next_to_clean, descs,
+@@ -1501,14 +1520,28 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ return skb;
+ }
+
+- ena_unmap_rx_buff(rx_ring, rx_info);
++ buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
++
++ pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
++
++ /* If XDP isn't loaded try to reuse part of the RX buffer */
++ reuse_rx_buf_page = !is_xdp_loaded &&
++ ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
+
+- skb = ena_alloc_skb(rx_ring, page_addr);
++ dma_sync_single_for_cpu(rx_ring->dev,
++ pre_reuse_paddr + pkt_offset,
++ len,
++ DMA_FROM_DEVICE);
++
++ if (!reuse_rx_buf_page)
++ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
++
++ skb = ena_alloc_skb(rx_ring, buf_addr, buf_len);
+ if (unlikely(!skb))
+ return NULL;
+
+ /* Populate skb's linear part */
+- skb_reserve(skb, page_offset);
++ skb_reserve(skb, buf_offset);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, rx_ring->netdev);
+
+@@ -1517,7 +1550,8 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ "RX skb updated. len %d. data_len %d\n",
+ skb->len, skb->data_len);
+
+- rx_info->page = NULL;
++ if (!reuse_rx_buf_page)
++ rx_info->page = NULL;
+
+ rx_ring->free_ids[*next_to_clean] = req_id;
+ *next_to_clean =
+@@ -1532,10 +1566,28 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+
+ rx_info = &rx_ring->rx_buffer_info[req_id];
+
+- ena_unmap_rx_buff(rx_ring, rx_info);
++ /* rx_info->buf_offset includes rx_ring->rx_headroom */
++ buf_offset = rx_info->buf_offset;
++ pkt_offset = buf_offset - rx_ring->rx_headroom;
++ buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
++ page_offset = rx_info->page_offset;
++
++ pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
++
++ reuse_rx_buf_page = !is_xdp_loaded &&
++ ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
++
++ dma_sync_single_for_cpu(rx_ring->dev,
++ pre_reuse_paddr + pkt_offset,
++ len,
++ DMA_FROM_DEVICE);
++
++ if (!reuse_rx_buf_page)
++ ena_unmap_rx_buff_attrs(rx_ring, rx_info,
++ DMA_ATTR_SKIP_CPU_SYNC);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+- rx_info->page_offset, len, ENA_PAGE_SIZE);
++ page_offset + buf_offset, len, buf_len);
+
+ } while (1);
+
+@@ -1641,14 +1693,14 @@ static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u
+
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+ xdp_prepare_buff(xdp, page_address(rx_info->page),
+- rx_info->page_offset,
++ rx_info->buf_offset,
+ rx_ring->ena_bufs[0].len, false);
+
+ ret = ena_xdp_execute(rx_ring, xdp);
+
+ /* The xdp program might expand the headers */
+ if (ret == ENA_XDP_PASS) {
+- rx_info->page_offset = xdp->data - xdp->data_hard_start;
++ rx_info->buf_offset = xdp->data - xdp->data_hard_start;
+ rx_ring->ena_bufs[0].len = xdp->data_end - xdp->data;
+ }
+
+@@ -1703,7 +1755,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+
+ /* First descriptor might have an offset set by the device */
+ rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
+- rx_info->page_offset += ena_rx_ctx.pkt_offset;
++ rx_info->buf_offset += ena_rx_ctx.pkt_offset;
+
+ netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
+ "rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
+@@ -1733,8 +1785,9 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ * from RX side.
+ */
+ if (xdp_verdict & ENA_XDP_FORWARDED) {
+- ena_unmap_rx_buff(rx_ring,
+- &rx_ring->rx_buffer_info[req_id]);
++ ena_unmap_rx_buff_attrs(rx_ring,
++ &rx_ring->rx_buffer_info[req_id],
++ 0);
+ rx_ring->rx_buffer_info[req_id].page = NULL;
+ }
+ }
+@@ -3218,7 +3271,8 @@ static void ena_config_host_info(struct ena_com_dev *ena_dev, struct pci_dev *pd
+ ENA_ADMIN_HOST_INFO_RX_OFFSET_MASK |
+ ENA_ADMIN_HOST_INFO_INTERRUPT_MODERATION_MASK |
+ ENA_ADMIN_HOST_INFO_RX_BUF_MIRRORING_MASK |
+- ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK;
++ ENA_ADMIN_HOST_INFO_RSS_CONFIGURABLE_FUNCTION_KEY_MASK |
++ ENA_ADMIN_HOST_INFO_RX_PAGE_REUSE_MASK;
+
+ rc = ena_com_set_host_attributes(ena_dev);
+ if (rc) {
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index 4ad5a086b47ea..de54815845ab3 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -50,6 +50,8 @@
+ #define ENA_DEFAULT_RING_SIZE (1024)
+ #define ENA_MIN_RING_SIZE (256)
+
++#define ENA_MIN_RX_BUF_SIZE (2048)
++
+ #define ENA_MIN_NUM_IO_QUEUES (1)
+
+ #define ENA_TX_WAKEUP_THRESH (MAX_SKB_FRAGS + 2)
+@@ -186,7 +188,9 @@ struct ena_tx_buffer {
+ struct ena_rx_buffer {
+ struct sk_buff *skb;
+ struct page *page;
++ dma_addr_t dma_addr;
+ u32 page_offset;
++ u32 buf_offset;
+ struct ena_com_buf ena_buf;
+ } ____cacheline_aligned;
+
+--
+2.43.0
+
--- /dev/null
+From 9082b0cad5261964a74a0a18c3e3b3dff4242bb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 29 Jan 2022 08:53:36 +0000
+Subject: net: ena: Do not waste napi skb cache
+
+From: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+
+[ Upstream commit 7354a426e063e108c0a3590f13abc77573172576 ]
+
+By profiling, discovered that ena device driver allocates skb by
+build_skb() and frees by napi_skb_cache_put(). Because the driver
+does not use napi skb cache in allocation path, napi skb cache is
+periodically filled and flushed. This is waste of napi skb cache.
+
+As ena_alloc_skb() is called only in napi, Use napi_build_skb()
+and napi_alloc_skb() when allocating skb.
+
+This patch was tested on aws a1.metal instance.
+
+[ jwiedmann.dev@gmail.com: Use napi_alloc_skb() instead of
+ netdev_alloc_skb_ip_align() to keep things consistent. ]
+
+Signed-off-by: Hyeonggon Yoo <42.hyeyoo@gmail.com>
+Acked-by: Shay Agroskin <shayagr@amazon.com>
+Link: https://lore.kernel.org/r/YfUAkA9BhyOJRT4B@ip-172-31-19-208.ap-northeast-1.compute.internal
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2dc8b1e7177d ("net: ena: Fix redundant device NUMA node override")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index cf8148a159ee0..e2b43d0f90784 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -1418,10 +1418,9 @@ static struct sk_buff *ena_alloc_skb(struct ena_ring *rx_ring, void *first_frag)
+ struct sk_buff *skb;
+
+ if (!first_frag)
+- skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+- rx_ring->rx_copybreak);
++ skb = napi_alloc_skb(rx_ring->napi, rx_ring->rx_copybreak);
+ else
+- skb = build_skb(first_frag, ENA_PAGE_SIZE);
++ skb = napi_build_skb(first_frag, ENA_PAGE_SIZE);
+
+ if (unlikely(!skb)) {
+ ena_increase_stat(&rx_ring->rx_stats.skb_alloc_fail, 1,
+--
+2.43.0
+
--- /dev/null
+From f2b7b0ce3cb43e019dc08d974678a265cde5d5bd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Jan 2022 20:23:46 +0000
+Subject: net: ena: Extract recurring driver reset code into a function
+
+From: Arthur Kiyanovski <akiyano@amazon.com>
+
+[ Upstream commit 9fe890cc5bb84d6859d9a2422830b7fd6fd20521 ]
+
+Create an inline function for resetting the driver
+to reduce code duplication.
+
+Signed-off-by: Nati Koler <nkoler@amazon.com>
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 2dc8b1e7177d ("net: ena: Fix redundant device NUMA node override")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_netdev.c | 45 ++++++--------------
+ drivers/net/ethernet/amazon/ena/ena_netdev.h | 9 ++++
+ 2 files changed, 23 insertions(+), 31 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index 3ea449be7bdc3..cf8148a159ee0 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -103,7 +103,7 @@ static void ena_tx_timeout(struct net_device *dev, unsigned int txqueue)
+ if (test_and_set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
+ return;
+
+- adapter->reset_reason = ENA_REGS_RESET_OS_NETDEV_WD;
++ ena_reset_device(adapter, ENA_REGS_RESET_OS_NETDEV_WD);
+ ena_increase_stat(&adapter->dev_stats.tx_timeout, 1, &adapter->syncp);
+
+ netif_err(adapter, tx_err, dev, "Transmit time out\n");
+@@ -166,11 +166,9 @@ static int ena_xmit_common(struct net_device *dev,
+ "Failed to prepare tx bufs\n");
+ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
+ &ring->syncp);
+- if (rc != -ENOMEM) {
+- adapter->reset_reason =
+- ENA_REGS_RESET_DRIVER_INVALID_STATE;
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+- }
++ if (rc != -ENOMEM)
++ ena_reset_device(adapter,
++ ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ return rc;
+ }
+
+@@ -1297,10 +1295,8 @@ static int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
+ req_id);
+
+ ena_increase_stat(&ring->tx_stats.bad_req_id, 1, &ring->syncp);
++ ena_reset_device(ring->adapter, ENA_REGS_RESET_INV_TX_REQ_ID);
+
+- /* Trigger device reset */
+- ring->adapter->reset_reason = ENA_REGS_RESET_INV_TX_REQ_ID;
+- set_bit(ENA_FLAG_TRIGGER_RESET, &ring->adapter->flags);
+ return -EFAULT;
+ }
+
+@@ -1463,10 +1459,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ netif_err(adapter, rx_err, rx_ring->netdev,
+ "Page is NULL. qid %u req_id %u\n", rx_ring->qid, req_id);
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1, &rx_ring->syncp);
+- adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
+- /* Make sure reset reason is set before triggering the reset */
+- smp_mb__before_atomic();
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
++ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
+ return NULL;
+ }
+
+@@ -1806,15 +1799,12 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ if (rc == -ENOSPC) {
+ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
+ &rx_ring->syncp);
+- adapter->reset_reason = ENA_REGS_RESET_TOO_MANY_RX_DESCS;
++ ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else {
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+ &rx_ring->syncp);
+- adapter->reset_reason = ENA_REGS_RESET_INV_RX_REQ_ID;
++ ena_reset_device(adapter, ENA_REGS_RESET_INV_RX_REQ_ID);
+ }
+-
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
+-
+ return 0;
+ }
+
+@@ -3740,9 +3730,8 @@ static int check_for_rx_interrupt_queue(struct ena_adapter *adapter,
+ netif_err(adapter, rx_err, adapter->netdev,
+ "Potential MSIX issue on Rx side Queue = %d. Reset the device\n",
+ rx_ring->qid);
+- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+- smp_mb__before_atomic();
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
++
++ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
+ return -EIO;
+ }
+
+@@ -3779,9 +3768,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+ netif_err(adapter, tx_err, adapter->netdev,
+ "Potential MSIX issue on Tx side Queue = %d. Reset the device\n",
+ tx_ring->qid);
+- adapter->reset_reason = ENA_REGS_RESET_MISS_INTERRUPT;
+- smp_mb__before_atomic();
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
++ ena_reset_device(adapter, ENA_REGS_RESET_MISS_INTERRUPT);
+ return -EIO;
+ }
+
+@@ -3807,9 +3794,7 @@ static int check_missing_comp_in_tx_queue(struct ena_adapter *adapter,
+ "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n",
+ missed_tx,
+ adapter->missing_tx_completion_threshold);
+- adapter->reset_reason =
+- ENA_REGS_RESET_MISS_TX_CMPL;
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
++ ena_reset_device(adapter, ENA_REGS_RESET_MISS_TX_CMPL);
+ rc = -EIO;
+ }
+
+@@ -3933,8 +3918,7 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
+ "Keep alive watchdog timeout.\n");
+ ena_increase_stat(&adapter->dev_stats.wd_expired, 1,
+ &adapter->syncp);
+- adapter->reset_reason = ENA_REGS_RESET_KEEP_ALIVE_TO;
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
++ ena_reset_device(adapter, ENA_REGS_RESET_KEEP_ALIVE_TO);
+ }
+ }
+
+@@ -3945,8 +3929,7 @@ static void check_for_admin_com_state(struct ena_adapter *adapter)
+ "ENA admin queue is not in running state!\n");
+ ena_increase_stat(&adapter->dev_stats.admin_q_pause, 1,
+ &adapter->syncp);
+- adapter->reset_reason = ENA_REGS_RESET_ADMIN_TO;
+- set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
++ ena_reset_device(adapter, ENA_REGS_RESET_ADMIN_TO);
+ }
+ }
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+index bf2a39c91c00d..4ad5a086b47ea 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -410,6 +410,15 @@ int ena_set_rx_copybreak(struct ena_adapter *adapter, u32 rx_copybreak);
+
+ int ena_get_sset_count(struct net_device *netdev, int sset);
+
++static inline void ena_reset_device(struct ena_adapter *adapter,
++ enum ena_regs_reset_reason_types reset_reason)
++{
++ adapter->reset_reason = reset_reason;
++ /* Make sure reset reason is set before triggering the reset */
++ smp_mb__before_atomic();
++ set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
++}
++
+ enum ena_xdp_errors_t {
+ ENA_XDP_ALLOWED = 0,
+ ENA_XDP_CURRENT_MTU_TOO_LARGE,
+--
+2.43.0
+
--- /dev/null
+From 1773979668cb085dd20712503a13d29816de3b5e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 May 2024 20:09:12 +0300
+Subject: net: ena: Fix redundant device NUMA node override
+
+From: Shay Agroskin <shayagr@amazon.com>
+
+[ Upstream commit 2dc8b1e7177d4f49f492ce648440caf2de0c3616 ]
+
+The driver overrides the NUMA node id of the device regardless of
+whether it knows its correct value (often setting it to -1 even though
+the node id is advertised in 'struct device'). This can lead to
+suboptimal configurations.
+
+This patch fixes this behavior and makes the shared memory allocation
+functions use the NUMA node id advertised by the underlying device.
+
+Fixes: 1738cd3ed342 ("net: ena: Add a driver for Amazon Elastic Network Adapters (ENA)")
+Signed-off-by: Shay Agroskin <shayagr@amazon.com>
+Link: https://lore.kernel.org/r/20240528170912.1204417-1-shayagr@amazon.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 11 -----------
+ 1 file changed, 11 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index e733419dd3f49..276f6a8631fb1 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -312,7 +312,6 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_sq *io_sq)
+ {
+ size_t size;
+- int dev_node = 0;
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+
+@@ -325,12 +324,9 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+ if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
+- dev_node = dev_to_node(ena_dev->dmadev);
+- set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->desc_addr.virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
+- set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, size,
+@@ -354,10 +350,7 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ size = (size_t)io_sq->bounce_buf_ctrl.buffer_size *
+ io_sq->bounce_buf_ctrl.buffers_num;
+
+- dev_node = dev_to_node(ena_dev->dmadev);
+- set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+- set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+@@ -397,7 +390,6 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ struct ena_com_io_cq *io_cq)
+ {
+ size_t size;
+- int prev_node = 0;
+
+ memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
+
+@@ -409,11 +401,8 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+- prev_node = dev_to_node(ena_dev->dmadev);
+- set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_cq->cdesc_addr.virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+- set_dev_node(ena_dev->dmadev, prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
+ io_cq->cdesc_addr.virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
+--
+2.43.0
+
--- /dev/null
+From d243225b167875d8e9944a872633ff1f8ca7ecb2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jan 2024 09:53:53 +0000
+Subject: net: ena: Reduce lines with longer column width boundary
+
+From: David Arinzon <darinzon@amazon.com>
+
+[ Upstream commit 50613650c3d6255cef13a129ccaa919ca73a6743 ]
+
+This patch reduces some of the lines by removing newlines
+where more variables or print strings can be pushed back
+to the previous line while still adhering to the styling
+guidelines.
+
+Signed-off-by: David Arinzon <darinzon@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 2dc8b1e7177d ("net: ena: Fix redundant device NUMA node override")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 315 +++++++-----------
+ drivers/net/ethernet/amazon/ena/ena_eth_com.c | 49 ++-
+ drivers/net/ethernet/amazon/ena/ena_eth_com.h | 15 +-
+ drivers/net/ethernet/amazon/ena/ena_netdev.c | 32 +-
+ 4 files changed, 151 insertions(+), 260 deletions(-)
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
+index 4db689372980e..e733419dd3f49 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
+ struct ena_com_admin_sq *sq = &admin_queue->sq;
+ u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+
+- sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+- &sq->dma_addr, GFP_KERNEL);
++ sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL);
+
+ if (!sq->entries) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
+ struct ena_com_admin_cq *cq = &admin_queue->cq;
+ u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+
+- cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
+- &cq->dma_addr, GFP_KERNEL);
++ cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL);
+
+ if (!cq->entries) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+
+ ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
+ size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
+- aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
+- &aenq->dma_addr, GFP_KERNEL);
++ aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL);
+
+ if (!aenq->entries) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+@@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
+
+ aenq_caps = 0;
+ aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
+- aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
+- << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
+- ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
++ aenq_caps |=
++ (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
++ ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
+ writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
+
+ if (unlikely(!aenq_handlers)) {
+- netdev_err(ena_dev->net_device,
+- "AENQ handlers pointer is NULL\n");
++ netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n");
+ return -EINVAL;
+ }
+
+@@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu
+ }
+
+ if (unlikely(!admin_queue->comp_ctx)) {
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Completion context is NULL\n");
++ netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n");
+ return NULL;
+ }
+
+ if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Completion context is occupied\n");
++ netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n");
+ return NULL;
+ }
+
+@@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu
+ /* In case of queue FULL */
+ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
+ if (cnt >= admin_queue->q_depth) {
+- netdev_dbg(admin_queue->ena_dev->net_device,
+- "Admin queue is full.\n");
++ netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n");
+ admin_queue->stats.out_of_space++;
+ return ERR_PTR(-ENOSPC);
+ }
+@@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
+ struct ena_comp_ctx *comp_ctx;
+ u16 i;
+
+- admin_queue->comp_ctx =
+- devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
++ admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
+ if (unlikely(!admin_queue->comp_ctx)) {
+ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+ return -ENOMEM;
+@@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+ dev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_sq->desc_addr.virt_addr =
+- dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_sq->desc_addr.phys_addr,
++ dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr,
+ GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->desc_addr.virt_addr) {
+ io_sq->desc_addr.virt_addr =
+ dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_sq->desc_addr.phys_addr,
+- GFP_KERNEL);
++ &io_sq->desc_addr.phys_addr, GFP_KERNEL);
+ }
+
+ if (!io_sq->desc_addr.virt_addr) {
+- netdev_err(ena_dev->net_device,
+- "Memory allocation failed\n");
++ netdev_err(ena_dev->net_device, "Memory allocation failed\n");
+ return -ENOMEM;
+ }
+ }
+@@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
+
+ dev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+- io_sq->bounce_buf_ctrl.base_buffer =
+- devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
++ io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, dev_node);
+ if (!io_sq->bounce_buf_ctrl.base_buffer)
+ io_sq->bounce_buf_ctrl.base_buffer =
+ devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+ if (!io_sq->bounce_buf_ctrl.base_buffer) {
+- netdev_err(ena_dev->net_device,
+- "Bounce buffer memory allocation failed\n");
++ netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+@@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
+ prev_node = dev_to_node(ena_dev->dmadev);
+ set_dev_node(ena_dev->dmadev, ctx->numa_node);
+ io_cq->cdesc_addr.virt_addr =
+- dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
++ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
+ set_dev_node(ena_dev->dmadev, prev_node);
+ if (!io_cq->cdesc_addr.virt_addr) {
+ io_cq->cdesc_addr.virt_addr =
+- dma_alloc_coherent(ena_dev->dmadev, size,
+- &io_cq->cdesc_addr.phys_addr,
++ dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr,
+ GFP_KERNEL);
+ }
+
+@@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
+ u8 comp_status)
+ {
+ if (unlikely(comp_status != 0))
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Admin command failed[%u]\n", comp_status);
++ netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n",
++ comp_status);
+
+ switch (comp_status) {
+ case ENA_ADMIN_SUCCESS:
+@@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+ }
+
+ if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
+- netdev_err(admin_queue->ena_dev->net_device,
+- "Command was aborted\n");
++ netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n");
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ admin_queue->stats.aborted_cmd++;
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
+ goto err;
+ }
+
+- WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
+- comp_ctx->status);
++ WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status);
+
+ ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
+ err:
+@@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set LLQ configurations: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret);
+
+ return ret;
+ }
+@@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ llq_default_cfg->llq_header_location;
+ } else {
+ netdev_err(ena_dev->net_device,
+- "Invalid header location control, supported: 0x%x\n",
+- supported_feat);
++ "Invalid header location control, supported: 0x%x\n", supported_feat);
+ return -EINVAL;
+ }
+
+@@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+
+ netdev_err(ena_dev->net_device,
+ "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+- llq_default_cfg->llq_stride_ctrl,
+- supported_feat, llq_info->desc_stride_ctrl);
++ llq_default_cfg->llq_stride_ctrl, supported_feat,
++ llq_info->desc_stride_ctrl);
+ }
+ } else {
+ llq_info->desc_stride_ctrl = 0;
+@@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+ llq_info->desc_list_entry_size = 256;
+ } else {
+ netdev_err(ena_dev->net_device,
+- "Invalid entry_size_ctrl, supported: 0x%x\n",
+- supported_feat);
++ "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
+ return -EINVAL;
+ }
+
+@@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+
+ netdev_err(ena_dev->net_device,
+ "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+- llq_default_cfg->llq_num_decs_before_header,
+- supported_feat, llq_info->descs_num_before_header);
++ llq_default_cfg->llq_num_decs_before_header, supported_feat,
++ llq_info->descs_num_before_header);
+ }
+ /* Check for accelerated queue supported */
+ llq_accel_mode_get = llq_features->accel_mode.u.get;
+@@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+
+ rc = ena_com_set_llq(ena_dev);
+ if (rc)
+- netdev_err(ena_dev->net_device,
+- "Cannot set LLQ configuration: %d\n", rc);
++ netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc);
+
+ return rc;
+ }
+@@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+ int ret;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+- usecs_to_jiffies(
+- admin_queue->completion_timeout));
++ usecs_to_jiffies(admin_queue->completion_timeout));
+
+ /* In case the command wasn't completed find out the root cause.
+ * There might be 2 kinds of errors
+@@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com
+ if (comp_ctx->status == ENA_CMD_COMPLETED) {
+ netdev_err(admin_queue->ena_dev->net_device,
+ "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
+- comp_ctx->cmd_opcode,
+- admin_queue->auto_polling ? "ON" : "OFF");
++ comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
+ /* Check if fallback to polling is enabled */
+ if (admin_queue->auto_polling)
+ admin_queue->polling = true;
+@@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
+ if (unlikely(i == timeout)) {
+ netdev_err(ena_dev->net_device,
+ "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
+- mmio_read->seq_num, offset, read_resp->req_id,
+- read_resp->reg_off);
++ mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off);
+ ret = ENA_MMIO_READ_TIMEOUT;
+ goto err;
+ }
+
+ if (read_resp->reg_off != offset) {
+- netdev_err(ena_dev->net_device,
+- "Read failure: wrong offset provided\n");
++ netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n");
+ ret = ENA_MMIO_READ_TIMEOUT;
+ } else {
+ ret = read_resp->reg_val;
+@@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+- netdev_err(ena_dev->net_device,
+- "Failed to destroy io sq error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret);
+
+ return ret;
+ }
+@@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ if (io_cq->cdesc_addr.virt_addr) {
+ size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
+
+- dma_free_coherent(ena_dev->dmadev, size,
+- io_cq->cdesc_addr.virt_addr,
++ dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr,
+ io_cq->cdesc_addr.phys_addr);
+
+ io_cq->cdesc_addr.virt_addr = NULL;
+@@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
+ if (io_sq->desc_addr.virt_addr) {
+ size = io_sq->desc_entry_size * io_sq->q_depth;
+
+- dma_free_coherent(ena_dev->dmadev, size,
+- io_sq->desc_addr.virt_addr,
++ dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr,
+ io_sq->desc_addr.phys_addr);
+
+ io_sq->desc_addr.virt_addr = NULL;
+@@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
+ val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+
+ if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
+- netdev_err(ena_dev->net_device,
+- "Reg read timeout occurred\n");
++ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+@@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
+- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+- feature_id);
++ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id);
+ return -EOPNOTSUPP;
+ }
+
+@@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
+
+ if (unlikely(ret))
+ netdev_err(ena_dev->net_device,
+- "Failed to submit get_feature command %d error: %d\n",
+- feature_id, ret);
++ "Failed to submit get_feature command %d error: %d\n", feature_id, ret);
+
+ return ret;
+ }
+@@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
+ {
+ struct ena_rss *rss = &ena_dev->rss;
+
+- if (!ena_com_check_supported_feature_id(ena_dev,
+- ENA_ADMIN_RSS_HASH_FUNCTION))
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION))
+ return -EOPNOTSUPP;
+
+- rss->hash_key =
+- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+- &rss->hash_key_dma_addr, GFP_KERNEL);
++ rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
++ &rss->hash_key_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_key))
+ return -ENOMEM;
+@@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_key)
+- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
+- rss->hash_key, rss->hash_key_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key,
++ rss->hash_key_dma_addr);
+ rss->hash_key = NULL;
+ }
+
+@@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
+ {
+ struct ena_rss *rss = &ena_dev->rss;
+
+- rss->hash_ctrl =
+- dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+- &rss->hash_ctrl_dma_addr, GFP_KERNEL);
++ rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
++ &rss->hash_ctrl_dma_addr, GFP_KERNEL);
+
+ if (unlikely(!rss->hash_ctrl))
+ return -ENOMEM;
+@@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
+ struct ena_rss *rss = &ena_dev->rss;
+
+ if (rss->hash_ctrl)
+- dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
+- rss->hash_ctrl, rss->hash_ctrl_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl,
++ rss->hash_ctrl_dma_addr);
+ rss->hash_ctrl = NULL;
+ }
+
+@@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+- rss->rss_ind_tbl =
+- dma_alloc_coherent(ena_dev->dmadev, tbl_size,
+- &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
++ rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr,
++ GFP_KERNEL);
+ if (unlikely(!rss->rss_ind_tbl))
+ goto mem_err1;
+
+ tbl_size = (1ULL << log_size) * sizeof(u16);
+- rss->host_rss_ind_tbl =
+- devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
++ rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
+ if (unlikely(!rss->host_rss_ind_tbl))
+ goto mem_err2;
+
+@@ -1197,8 +1161,7 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
+ tbl_size = (1ULL << log_size) *
+ sizeof(struct ena_admin_rss_ind_table_entry);
+
+- dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
+- rss->rss_ind_tbl_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr);
+ rss->rss_ind_tbl = NULL;
+ mem_err1:
+ rss->tbl_log_size = 0;
+@@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ &create_cmd.sq_ba,
+ io_sq->desc_addr.phys_addr);
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Memory address set failed\n");
++ netdev_err(ena_dev->net_device, "Memory address set failed\n");
+ return ret;
+ }
+ }
+@@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to create IO SQ. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret);
+ return ret;
+ }
+
+@@ -1292,8 +1253,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
+ cmd_completion.llq_descriptors_offset);
+ }
+
+- netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
+- io_sq->idx, io_sq->q_depth);
++ netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
+
+ return ret;
+ }
+@@ -1420,8 +1380,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ (struct ena_admin_acq_entry *)&cmd_completion,
+ sizeof(cmd_completion));
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to create IO CQ. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret);
+ return ret;
+ }
+
+@@ -1440,8 +1399,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
+ (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
+ cmd_completion.numa_node_register_offset);
+
+- netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
+- io_cq->idx, io_cq->q_depth);
++ netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
+
+ return ret;
+ }
+@@ -1451,8 +1409,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
+ struct ena_com_io_cq **io_cq)
+ {
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+- netdev_err(ena_dev->net_device,
+- "Invalid queue number %d but the max is %d\n", qid,
++ netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid,
+ ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+@@ -1492,8 +1449,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+- ena_delay_exponential_backoff_us(exp++,
+- ena_dev->ena_min_poll_delay_us);
++ ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ }
+ spin_unlock_irqrestore(&admin_queue->q_lock, flags);
+@@ -1519,8 +1475,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
+ sizeof(destroy_resp));
+
+ if (unlikely(ret && (ret != -ENODEV)))
+- netdev_err(ena_dev->net_device,
+- "Failed to destroy IO CQ. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -1588,8 +1543,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to config AENQ ret: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret);
+
+ return ret;
+ }
+@@ -1610,8 +1564,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
+ netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
+
+ if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
+- netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
+- width);
++ netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width);
+ return -EINVAL;
+ }
+
+@@ -1633,19 +1586,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev)
+ ctrl_ver = ena_com_reg_bar_read32(ena_dev,
+ ENA_REGS_CONTROLLER_VERSION_OFF);
+
+- if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
+- (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
++ if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
+ netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
+ return -ETIME;
+ }
+
+ dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
+- (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
+- ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
++ (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
+ ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
+
+- dev_info(ena_dev->dmadev,
+- "ENA controller version: %d.%d.%d implementation version %d\n",
++ dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n",
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
+ ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
+ (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
+@@ -1694,20 +1644,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
+
+ size = ADMIN_SQ_SIZE(admin_queue->q_depth);
+ if (sq->entries)
+- dma_free_coherent(ena_dev->dmadev, size, sq->entries,
+- sq->dma_addr);
++ dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr);
+ sq->entries = NULL;
+
+ size = ADMIN_CQ_SIZE(admin_queue->q_depth);
+ if (cq->entries)
+- dma_free_coherent(ena_dev->dmadev, size, cq->entries,
+- cq->dma_addr);
++ dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr);
+ cq->entries = NULL;
+
+ size = ADMIN_AENQ_SIZE(aenq->q_depth);
+ if (ena_dev->aenq.entries)
+- dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
+- aenq->dma_addr);
++ dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr);
+ aenq->entries = NULL;
+ }
+
+@@ -1733,10 +1680,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
+ struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
+
+ spin_lock_init(&mmio_read->lock);
+- mmio_read->read_resp =
+- dma_alloc_coherent(ena_dev->dmadev,
+- sizeof(*mmio_read->read_resp),
+- &mmio_read->read_resp_dma_addr, GFP_KERNEL);
++ mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
++ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ if (unlikely(!mmio_read->read_resp))
+ goto err;
+
+@@ -1767,8 +1712,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
+ writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
+
+- dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
+- mmio_read->read_resp, mmio_read->read_resp_dma_addr);
++ dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp,
++ mmio_read->read_resp_dma_addr);
+
+ mmio_read->read_resp = NULL;
+ }
+@@ -1800,8 +1745,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev,
+ }
+
+ if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
+- netdev_err(ena_dev->net_device,
+- "Device isn't ready, abort com init\n");
++ netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n");
+ return -ENODEV;
+ }
+
+@@ -1878,8 +1822,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+ int ret;
+
+ if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
+- netdev_err(ena_dev->net_device,
+- "Qid (%d) is bigger than max num of queues (%d)\n",
++ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+ ctx->qid, ENA_TOTAL_NUM_QUEUES);
+ return -EINVAL;
+ }
+@@ -1905,8 +1848,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
+
+ if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
+ /* header length is limited to 8 bits */
+- io_sq->tx_max_header_size =
+- min_t(u32, ena_dev->tx_max_header_size, SZ_256);
++ io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256);
+
+ ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
+ if (ret)
+@@ -1938,8 +1880,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
+ struct ena_com_io_cq *io_cq;
+
+ if (qid >= ENA_TOTAL_NUM_QUEUES) {
+- netdev_err(ena_dev->net_device,
+- "Qid (%d) is bigger than max num of queues (%d)\n",
++ netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n",
+ qid, ENA_TOTAL_NUM_QUEUES);
+ return;
+ }
+@@ -1983,8 +1924,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ if (rc)
+ return rc;
+
+- if (get_resp.u.max_queue_ext.version !=
+- ENA_FEATURE_MAX_QUEUE_EXT_VER)
++ if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
+ return -EINVAL;
+
+ memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
+@@ -2025,18 +1965,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
+
+ if (!rc)
+- memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
+- sizeof(get_resp.u.hw_hints));
++ memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints));
+ else if (rc == -EOPNOTSUPP)
+- memset(&get_feat_ctx->hw_hints, 0x0,
+- sizeof(get_feat_ctx->hw_hints));
++ memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
+ else
+ return rc;
+
+ rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
+ if (!rc)
+- memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+- sizeof(get_resp.u.llq));
++ memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq));
+ else if (rc == -EOPNOTSUPP)
+ memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+ else
+@@ -2084,8 +2021,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+ aenq_common = &aenq_e->aenq_common_desc;
+
+ /* Go over all the events */
+- while ((READ_ONCE(aenq_common->flags) &
+- ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
++ while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Make sure the phase bit (ownership) is as expected before
+ * reading the rest of the descriptor.
+ */
+@@ -2094,8 +2030,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+ timestamp = (u64)aenq_common->timestamp_low |
+ ((u64)aenq_common->timestamp_high << 32);
+
+- netdev_dbg(ena_dev->net_device,
+- "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
++ netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
+ aenq_common->group, aenq_common->syndrome, timestamp);
+
+ /* Handle specific event*/
+@@ -2124,8 +2059,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
+
+ /* write the aenq doorbell after all AENQ descriptors were read */
+ mb();
+- writel_relaxed((u32)aenq->head,
+- ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
++ writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
+ }
+
+ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+@@ -2137,15 +2071,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
+ cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
+
+- if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
+- (cap == ENA_MMIO_READ_TIMEOUT))) {
++ if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) {
+ netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
+ return -ETIME;
+ }
+
+ if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
+- netdev_err(ena_dev->net_device,
+- "Device isn't ready, can't reset device\n");
++ netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n");
+ return -EINVAL;
+ }
+
+@@ -2168,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ rc = wait_for_reset_state(ena_dev, timeout,
+ ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
+ if (rc != 0) {
+- netdev_err(ena_dev->net_device,
+- "Reset indication didn't turn on\n");
++ netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n");
+ return rc;
+ }
+
+@@ -2177,8 +2108,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev,
+ writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
+ rc = wait_for_reset_state(ena_dev, timeout, 0);
+ if (rc != 0) {
+- netdev_err(ena_dev->net_device,
+- "Reset indication didn't turn off\n");
++ netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n");
+ return rc;
+ }
+
+@@ -2215,8 +2145,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
+ sizeof(*get_resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to get stats. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -2228,8 +2157,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
+ int ret;
+
+ if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) {
+- netdev_err(ena_dev->net_device,
+- "Capability %d isn't supported\n",
++ netdev_err(ena_dev->net_device, "Capability %d isn't supported\n",
+ ENA_ADMIN_ENI_STATS);
+ return -EOPNOTSUPP;
+ }
+@@ -2266,8 +2194,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+ int ret;
+
+ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
+- netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+- ENA_ADMIN_MTU);
++ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU);
+ return -EOPNOTSUPP;
+ }
+
+@@ -2286,8 +2213,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set mtu %d. error: %d\n", mtu, ret);
++ netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret);
+
+ return ret;
+ }
+@@ -2301,8 +2227,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
+ ret = ena_com_get_feature(ena_dev, &resp,
+ ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to get offload capabilities %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret);
+ return ret;
+ }
+
+@@ -2320,8 +2245,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ struct ena_admin_get_feat_resp get_resp;
+ int ret;
+
+- if (!ena_com_check_supported_feature_id(ena_dev,
+- ENA_ADMIN_RSS_HASH_FUNCTION)) {
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) {
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_FUNCTION);
+ return -EOPNOTSUPP;
+@@ -2334,8 +2258,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ return ret;
+
+ if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
+- netdev_err(ena_dev->net_device,
+- "Func hash %d isn't supported by device, abort\n",
++ netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n",
+ rss->hash_func);
+ return -EOPNOTSUPP;
+ }
+@@ -2365,8 +2288,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret)) {
+- netdev_err(ena_dev->net_device,
+- "Failed to set hash function %d. error: %d\n",
++ netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n",
+ rss->hash_func, ret);
+ return -EINVAL;
+ }
+@@ -2398,16 +2320,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
+ return rc;
+
+ if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
+- netdev_err(ena_dev->net_device,
+- "Flow hash function %d isn't supported\n", func);
++ netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func);
+ return -EOPNOTSUPP;
+ }
+
+ if ((func == ENA_ADMIN_TOEPLITZ) && key) {
+ if (key_len != sizeof(hash_key->key)) {
+ netdev_err(ena_dev->net_device,
+- "key len (%u) doesn't equal the supported size (%zu)\n",
+- key_len, sizeof(hash_key->key));
++ "key len (%u) doesn't equal the supported size (%zu)\n", key_len,
++ sizeof(hash_key->key));
+ return -EINVAL;
+ }
+ memcpy(hash_key->key, key, key_len);
+@@ -2495,8 +2416,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+- if (!ena_com_check_supported_feature_id(ena_dev,
+- ENA_ADMIN_RSS_HASH_INPUT)) {
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) {
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_HASH_INPUT);
+ return -EOPNOTSUPP;
+@@ -2527,8 +2447,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
+ (struct ena_admin_acq_entry *)&resp,
+ sizeof(resp));
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set hash input. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -2605,8 +2524,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
+ int rc;
+
+ if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
+- netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
+- proto);
++ netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto);
+ return -EINVAL;
+ }
+
+@@ -2658,8 +2576,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+ struct ena_admin_set_feat_resp resp;
+ int ret;
+
+- if (!ena_com_check_supported_feature_id(
+- ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
++ if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
+ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
+ return -EOPNOTSUPP;
+@@ -2699,8 +2616,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set indirect table. error: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret);
+
+ return ret;
+ }
+@@ -2779,9 +2695,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
+ {
+ struct ena_host_attribute *host_attr = &ena_dev->host_attr;
+
+- host_attr->host_info =
+- dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
+- &host_attr->host_info_dma_addr, GFP_KERNEL);
++ host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
++ &host_attr->host_info_dma_addr, GFP_KERNEL);
+ if (unlikely(!host_attr->host_info))
+ return -ENOMEM;
+
+@@ -2827,8 +2742,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
+
+ if (host_attr->debug_area_virt_addr) {
+ dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
+- host_attr->debug_area_virt_addr,
+- host_attr->debug_area_dma_addr);
++ host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr);
+ host_attr->debug_area_virt_addr = NULL;
+ }
+ }
+@@ -2877,8 +2791,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
+ sizeof(resp));
+
+ if (unlikely(ret))
+- netdev_err(ena_dev->net_device,
+- "Failed to set host attributes: %d\n", ret);
++ netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret);
+
+ return ret;
+ }
+@@ -2896,8 +2809,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en
+ u32 *intr_moder_interval)
+ {
+ if (!intr_delay_resolution) {
+- netdev_err(ena_dev->net_device,
+- "Illegal interrupt delay granularity value\n");
++ netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n");
+ return -EFAULT;
+ }
+
+@@ -2935,14 +2847,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+- netdev_dbg(ena_dev->net_device,
+- "Feature %d isn't supported\n",
++ netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
+ ENA_ADMIN_INTERRUPT_MODERATION);
+ rc = 0;
+ } else {
+ netdev_err(ena_dev->net_device,
+- "Failed to get interrupt moderation admin cmd. rc: %d\n",
+- rc);
++ "Failed to get interrupt moderation admin cmd. rc: %d\n", rc);
+ }
+
+ /* no moderation supported, disable adaptive support */
+@@ -2990,8 +2900,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+ (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
+
+ if (unlikely(ena_dev->tx_max_header_size == 0)) {
+- netdev_err(ena_dev->net_device,
+- "The size of the LLQ entry is smaller than needed\n");
++ netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n");
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+index f9f886289b970..933e619b3a313 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+@@ -18,8 +18,7 @@ static struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc(
+ cdesc = (struct ena_eth_io_rx_cdesc_base *)(io_cq->cdesc_addr.virt_addr
+ + (head_masked * io_cq->cdesc_entry_size_in_bytes));
+
+- desc_phase = (READ_ONCE(cdesc->status) &
+- ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
++ desc_phase = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_PHASE_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_PHASE_SHIFT;
+
+ if (desc_phase != expected_phase)
+@@ -65,8 +64,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+
+ io_sq->entries_in_tx_burst_left--;
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Decreasing entries_in_tx_burst_left of queue %d to %d\n",
+- io_sq->qid, io_sq->entries_in_tx_burst_left);
++ "Decreasing entries_in_tx_burst_left of queue %d to %d\n", io_sq->qid,
++ io_sq->entries_in_tx_burst_left);
+ }
+
+ /* Make sure everything was written into the bounce buffer before
+@@ -75,8 +74,8 @@ static int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq *io_sq,
+ wmb();
+
+ /* The line is completed. Copy it to dev */
+- __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+- bounce_buffer, (llq_info->desc_list_entry_size) / 8);
++ __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset, bounce_buffer,
++ (llq_info->desc_list_entry_size) / 8);
+
+ io_sq->tail++;
+
+@@ -102,16 +101,14 @@ static int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+ header_offset =
+ llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+- if (unlikely((header_offset + header_len) >
+- llq_info->desc_list_entry_size)) {
++ if (unlikely((header_offset + header_len) > llq_info->desc_list_entry_size)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Trying to write header larger than llq entry can accommodate\n");
+ return -EFAULT;
+ }
+
+ if (unlikely(!bounce_buffer)) {
+- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Bounce buffer is NULL\n");
++ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+ return -EFAULT;
+ }
+
+@@ -129,8 +126,7 @@ static void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+ bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+ if (unlikely(!bounce_buffer)) {
+- netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Bounce buffer is NULL\n");
++ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device, "Bounce buffer is NULL\n");
+ return NULL;
+ }
+
+@@ -247,8 +243,7 @@ static u16 ena_com_cdesc_rx_pkt_get(struct ena_com_io_cq *io_cq,
+
+ ena_com_cq_inc_head(io_cq);
+ count++;
+- last = (READ_ONCE(cdesc->status) &
+- ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
++ last = (READ_ONCE(cdesc->status) & ENA_ETH_IO_RX_CDESC_BASE_LAST_MASK) >>
+ ENA_ETH_IO_RX_CDESC_BASE_LAST_SHIFT;
+ } while (!last);
+
+@@ -369,9 +364,8 @@ static void ena_com_rx_set_flags(struct ena_com_io_cq *io_cq,
+
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+ "l3_proto %d l4_proto %d l3_csum_err %d l4_csum_err %d hash %d frag %d cdesc_status %x\n",
+- ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto,
+- ena_rx_ctx->l3_csum_err, ena_rx_ctx->l4_csum_err,
+- ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
++ ena_rx_ctx->l3_proto, ena_rx_ctx->l4_proto, ena_rx_ctx->l3_csum_err,
++ ena_rx_ctx->l4_csum_err, ena_rx_ctx->hash, ena_rx_ctx->frag, cdesc->status);
+ }
+
+ /*****************************************************************************/
+@@ -403,13 +397,12 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
+
+ if (unlikely(header_len > io_sq->tx_max_header_size)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Header size is too large %d max header: %d\n",
+- header_len, io_sq->tx_max_header_size);
++ "Header size is too large %d max header: %d\n", header_len,
++ io_sq->tx_max_header_size);
+ return -EINVAL;
+ }
+
+- if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+- !buffer_to_push)) {
++ if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV && !buffer_to_push)) {
+ netdev_err(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+ "Push header wasn't provided in LLQ mode\n");
+ return -EINVAL;
+@@ -556,13 +549,11 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ }
+
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid,
+- nb_hw_desc);
++ "Fetch rx packet: queue %d completed desc: %d\n", io_cq->qid, nb_hw_desc);
+
+ if (unlikely(nb_hw_desc > ena_rx_ctx->max_bufs)) {
+ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc,
+- ena_rx_ctx->max_bufs);
++ "Too many RX cdescs (%d) > MAX(%d)\n", nb_hw_desc, ena_rx_ctx->max_bufs);
+ return -ENOSPC;
+ }
+
+@@ -586,8 +577,8 @@ int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
+ io_sq->next_to_comp += nb_hw_desc;
+
+ netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "[%s][QID#%d] Updating SQ head to: %d\n", __func__,
+- io_sq->qid, io_sq->next_to_comp);
++ "[%s][QID#%d] Updating SQ head to: %d\n", __func__, io_sq->qid,
++ io_sq->next_to_comp);
+
+ /* Get rx flags from the last pkt */
+ ena_com_rx_set_flags(io_cq, ena_rx_ctx, cdesc);
+@@ -624,8 +615,8 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
+ desc->req_id = req_id;
+
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "[%s] Adding single RX desc, Queue: %u, req_id: %u\n",
+- __func__, io_sq->qid, req_id);
++ "[%s] Adding single RX desc, Queue: %u, req_id: %u\n", __func__, io_sq->qid,
++ req_id);
+
+ desc->buff_addr_lo = (u32)ena_buf->paddr;
+ desc->buff_addr_hi =
+diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+index 689313ee25a80..07029eee78caf 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
++++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+@@ -141,8 +141,8 @@ static inline bool ena_com_is_doorbell_needed(struct ena_com_io_sq *io_sq,
+ }
+
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Queue: %d num_descs: %d num_entries_needed: %d\n",
+- io_sq->qid, num_descs, num_entries_needed);
++ "Queue: %d num_descs: %d num_entries_needed: %d\n", io_sq->qid, num_descs,
++ num_entries_needed);
+
+ return num_entries_needed > io_sq->entries_in_tx_burst_left;
+ }
+@@ -153,15 +153,14 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+ u16 tail = io_sq->tail;
+
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Write submission queue doorbell for queue: %d tail: %d\n",
+- io_sq->qid, tail);
++ "Write submission queue doorbell for queue: %d tail: %d\n", io_sq->qid, tail);
+
+ writel(tail, io_sq->db_addr);
+
+ if (is_llq_max_tx_burst_exists(io_sq)) {
+ netdev_dbg(ena_com_io_sq_to_ena_dev(io_sq)->net_device,
+- "Reset available entries in tx burst for queue %d to %d\n",
+- io_sq->qid, max_entries_in_tx_burst);
++ "Reset available entries in tx burst for queue %d to %d\n", io_sq->qid,
++ max_entries_in_tx_burst);
+ io_sq->entries_in_tx_burst_left = max_entries_in_tx_burst;
+ }
+
+@@ -244,8 +243,8 @@ static inline int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq,
+
+ *req_id = READ_ONCE(cdesc->req_id);
+ if (unlikely(*req_id >= io_cq->q_depth)) {
+- netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
+- "Invalid req id %d\n", cdesc->req_id);
++ netdev_err(ena_com_io_cq_to_ena_dev(io_cq)->net_device, "Invalid req id %d\n",
++ cdesc->req_id);
+ return -EINVAL;
+ }
+
+diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+index c407fad52aeb3..8eb3881f4f6fd 100644
+--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -164,11 +164,9 @@ static int ena_xmit_common(struct net_device *dev,
+ if (unlikely(rc)) {
+ netif_err(adapter, tx_queued, dev,
+ "Failed to prepare tx bufs\n");
+- ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1,
+- &ring->syncp);
++ ena_increase_stat(&ring->tx_stats.prepare_ctx_err, 1, &ring->syncp);
+ if (rc != -ENOMEM)
+- ena_reset_device(adapter,
+- ENA_REGS_RESET_DRIVER_INVALID_STATE);
++ ena_reset_device(adapter, ENA_REGS_RESET_DRIVER_INVALID_STATE);
+ return rc;
+ }
+
+@@ -992,8 +990,7 @@ static struct page *ena_alloc_map_page(struct ena_ring *rx_ring,
+ */
+ page = dev_alloc_page();
+ if (!page) {
+- ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1,
+- &rx_ring->syncp);
++ ena_increase_stat(&rx_ring->rx_stats.page_alloc_fail, 1, &rx_ring->syncp);
+ return ERR_PTR(-ENOSPC);
+ }
+
+@@ -1052,8 +1049,8 @@ static void ena_unmap_rx_buff_attrs(struct ena_ring *rx_ring,
+ struct ena_rx_buffer *rx_info,
+ unsigned long attrs)
+ {
+- dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE,
+- DMA_BIDIRECTIONAL, attrs);
++ dma_unmap_page_attrs(rx_ring->dev, rx_info->dma_addr, ENA_PAGE_SIZE, DMA_BIDIRECTIONAL,
++ attrs);
+ }
+
+ static void ena_free_rx_page(struct ena_ring *rx_ring,
+@@ -1344,8 +1341,7 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
+ &req_id);
+ if (rc) {
+ if (unlikely(rc == -EINVAL))
+- handle_invalid_req_id(tx_ring, req_id, NULL,
+- false);
++ handle_invalid_req_id(tx_ring, req_id, NULL, false);
+ break;
+ }
+
+@@ -1583,8 +1579,7 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
+ DMA_FROM_DEVICE);
+
+ if (!reuse_rx_buf_page)
+- ena_unmap_rx_buff_attrs(rx_ring, rx_info,
+- DMA_ATTR_SKIP_CPU_SYNC);
++ ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
+ page_offset + buf_offset, len, buf_len);
+@@ -1849,8 +1844,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
+ adapter = netdev_priv(rx_ring->netdev);
+
+ if (rc == -ENOSPC) {
+- ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1,
+- &rx_ring->syncp);
++ ena_increase_stat(&rx_ring->rx_stats.bad_desc_num, 1, &rx_ring->syncp);
+ ena_reset_device(adapter, ENA_REGS_RESET_TOO_MANY_RX_DESCS);
+ } else {
+ ena_increase_stat(&rx_ring->rx_stats.bad_req_id, 1,
+@@ -2397,8 +2391,7 @@ static int ena_rss_configure(struct ena_adapter *adapter)
+ if (!ena_dev->rss.tbl_log_size) {
+ rc = ena_rss_init_default(adapter);
+ if (rc && (rc != -EOPNOTSUPP)) {
+- netif_err(adapter, ifup, adapter->netdev,
+- "Failed to init RSS rc: %d\n", rc);
++ netif_err(adapter, ifup, adapter->netdev, "Failed to init RSS rc: %d\n", rc);
+ return rc;
+ }
+ }
+@@ -3315,8 +3308,7 @@ static void ena_config_debug_area(struct ena_adapter *adapter)
+ rc = ena_com_set_host_attributes(adapter->ena_dev);
+ if (rc) {
+ if (rc == -EOPNOTSUPP)
+- netif_warn(adapter, drv, adapter->netdev,
+- "Cannot set host attributes\n");
++ netif_warn(adapter, drv, adapter->netdev, "Cannot set host attributes\n");
+ else
+ netif_err(adapter, drv, adapter->netdev,
+ "Cannot set host attributes\n");
+@@ -4188,8 +4180,8 @@ static int ena_rss_init_default(struct ena_adapter *adapter)
+ }
+ }
+
+- rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL,
+- ENA_HASH_KEY_SIZE, 0xFFFFFFFF);
++ rc = ena_com_fill_hash_function(ena_dev, ENA_ADMIN_TOEPLITZ, NULL, ENA_HASH_KEY_SIZE,
++ 0xFFFFFFFF);
+ if (unlikely(rc && (rc != -EOPNOTSUPP))) {
+ dev_err(dev, "Cannot fill hash function\n");
+ goto err_fill_indir;
+--
+2.43.0
+
--- /dev/null
+From 2bbb315556d1cfe3c600b5d666725b86387e7600 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 May 2024 13:05:28 +0800
+Subject: net:fec: Add fec_enet_deinit()
+
+From: Xiaolei Wang <xiaolei.wang@windriver.com>
+
+[ Upstream commit bf0497f53c8535f99b72041529d3f7708a6e2c0d ]
+
+When fec_probe() fails or fec_drv_remove() needs to release the
+fec queue and remove a NAPI context, therefore add a function
+corresponding to fec_enet_init() and call fec_enet_deinit() which
+does the opposite to release memory and remove a NAPI context.
+
+Fixes: 59d0f7465644 ("net: fec: init multi queue date structure")
+Signed-off-by: Xiaolei Wang <xiaolei.wang@windriver.com>
+Reviewed-by: Wei Fang <wei.fang@nxp.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20240524050528.4115581-1-xiaolei.wang@windriver.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/freescale/fec_main.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
+index 972808777f308..f02376555ed45 100644
+--- a/drivers/net/ethernet/freescale/fec_main.c
++++ b/drivers/net/ethernet/freescale/fec_main.c
+@@ -3627,6 +3627,14 @@ static int fec_enet_init(struct net_device *ndev)
+ return ret;
+ }
+
++static void fec_enet_deinit(struct net_device *ndev)
++{
++ struct fec_enet_private *fep = netdev_priv(ndev);
++
++ netif_napi_del(&fep->napi);
++ fec_enet_free_queue(ndev);
++}
++
+ #ifdef CONFIG_OF
+ static int fec_reset_phy(struct platform_device *pdev)
+ {
+@@ -4023,6 +4031,7 @@ fec_probe(struct platform_device *pdev)
+ fec_enet_mii_remove(fep);
+ failed_mii_init:
+ failed_irq:
++ fec_enet_deinit(ndev);
+ failed_init:
+ fec_ptp_stop(pdev);
+ failed_reset:
+@@ -4085,6 +4094,7 @@ fec_drv_remove(struct platform_device *pdev)
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
++ fec_enet_deinit(ndev);
+ free_netdev(ndev);
+ return 0;
+ }
+--
+2.43.0
+
--- /dev/null
+From 97e45c56d7faac7d82cc3441cb4f277fc7c218d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 22:26:56 +0300
+Subject: net/mlx5e: Fix IPsec tunnel mode offload feature check
+
+From: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+
+[ Upstream commit 9a52f6d44f4521773b4699b4ed34b8e21d5a175c ]
+
+Remove faulty check disabling checksum offload and GSO for offload of
+simple IPsec tunnel L4 traffic. Comment previously describing the deleted
+code incorrectly claimed the check prevented double tunnel (or three layers
+of ip headers).
+
+Fixes: f1267798c980 ("net/mlx5: Fix checksum issue of VXLAN and IPsec crypto offload")
+Signed-off-by: Rahul Rameshbabu <rrameshbabu@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 17 +++++------------
+ 1 file changed, 5 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+index 428881e0adcbe..6621f6cd43151 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h
+@@ -105,18 +105,11 @@ mlx5e_ipsec_feature_check(struct sk_buff *skb, netdev_features_t features)
+ if (!x || !x->xso.offload_handle)
+ goto out_disable;
+
+- if (xo->inner_ipproto) {
+- /* Cannot support tunnel packet over IPsec tunnel mode
+- * because we cannot offload three IP header csum
+- */
+- if (x->props.mode == XFRM_MODE_TUNNEL)
+- goto out_disable;
+-
+- /* Only support UDP or TCP L4 checksum */
+- if (xo->inner_ipproto != IPPROTO_UDP &&
+- xo->inner_ipproto != IPPROTO_TCP)
+- goto out_disable;
+- }
++ /* Only support UDP or TCP L4 checksum */
++ if (xo->inner_ipproto &&
++ xo->inner_ipproto != IPPROTO_UDP &&
++ xo->inner_ipproto != IPPROTO_TCP)
++ goto out_disable;
+
+ return features;
+
+--
+2.43.0
+
--- /dev/null
+From 69cecdfa10d1c8c7d5fcaae714f47e1a1c7059d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 22:26:58 +0300
+Subject: net/mlx5e: Use rx_missed_errors instead of rx_dropped for reporting
+ buffer exhaustion
+
+From: Carolina Jubran <cjubran@nvidia.com>
+
+[ Upstream commit 5c74195d5dd977e97556e6fa76909b831c241230 ]
+
+Previously, the driver incorrectly used rx_dropped to report device
+buffer exhaustion.
+
+According to the documentation, rx_dropped should not be used to count
+packets dropped due to buffer exhaustion, which is the purpose of
+rx_missed_errors.
+
+Use rx_missed_errors as intended for counting packets dropped due to
+buffer exhaustion.
+
+Fixes: 269e6b3af3bf ("net/mlx5e: Report additional error statistics in get stats ndo")
+Signed-off-by: Carolina Jubran <cjubran@nvidia.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+index 923be5fb7d216..79d687c663d54 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+@@ -3185,7 +3185,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+ mlx5e_fold_sw_stats64(priv, stats);
+ }
+
+- stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
++ stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer;
+
+ stats->rx_length_errors =
+ PPORT_802_3_GET(pstats, a_in_range_length_errors) +
+--
+2.43.0
+
--- /dev/null
+From a4d0ccbb807527f65b41b2a9dbb0dcf6d9c11a15 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 08:54:06 +0200
+Subject: net: phy: micrel: set soft_reset callback to genphy_soft_reset for
+ KSZ8061
+
+From: Mathieu Othacehe <othacehe@gnu.org>
+
+[ Upstream commit 128d54fbcb14b8717ecf596d3dbded327b9980b3 ]
+
+Following a similar reinstate for the KSZ8081 and KSZ9031.
+
+Older kernels would use the genphy_soft_reset if the PHY did not implement
+a .soft_reset.
+
+The KSZ8061 errata described here:
+https://ww1.microchip.com/downloads/en/DeviceDoc/KSZ8061-Errata-DS80000688B.pdf
+and worked around with 232ba3a51c ("net: phy: Micrel KSZ8061: link failure after cable connect")
+is back again without this soft reset.
+
+Fixes: 6e2d85ec0559 ("net: phy: Stop with excessive soft reset")
+Tested-by: Karim Ben Houcine <karim.benhoucine@landisgyr.com>
+Signed-off-by: Mathieu Othacehe <othacehe@gnu.org>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Reviewed-by: Florian Fainelli <florian.fainelli@broadcom.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/micrel.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
+index dc209ad8a0fed..59d05a1672ece 100644
+--- a/drivers/net/phy/micrel.c
++++ b/drivers/net/phy/micrel.c
+@@ -1669,6 +1669,7 @@ static struct phy_driver ksphy_driver[] = {
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ /* PHY_BASIC_FEATURES */
+ .config_init = ksz8061_config_init,
++ .soft_reset = genphy_soft_reset,
+ .config_intr = kszphy_config_intr,
+ .handle_interrupt = kszphy_handle_interrupt,
+ .suspend = genphy_suspend,
+--
+2.43.0
+
--- /dev/null
+From b25ffa995257493b8c826494a2028290307b0fa3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 May 2024 14:23:14 +0530
+Subject: net: usb: smsc95xx: fix changing LED_SEL bit value updated from
+ EEPROM
+
+From: Parthiban Veerasooran <Parthiban.Veerasooran@microchip.com>
+
+[ Upstream commit 52a2f0608366a629d43dacd3191039c95fef74ba ]
+
+LED Select (LED_SEL) bit in the LED General Purpose IO Configuration
+register is used to determine the functionality of external LED pins
+(Speed Indicator, Link and Activity Indicator, Full Duplex Link
+Indicator). The default value for this bit is 0 when no EEPROM is
+present. If a EEPROM is present, the default value is the value of the
+LED Select bit in the Configuration Flags of the EEPROM. A USB Reset or
+Lite Reset (LRST) will cause this bit to be restored to the image value
+last loaded from EEPROM, or to be set to 0 if no EEPROM is present.
+
+While configuring the dual purpose GPIO/LED pins to LED outputs in the
+LED General Purpose IO Configuration register, the LED_SEL bit is changed
+as 0 and resulting the configured value from the EEPROM is cleared. The
+issue is fixed by using read-modify-write approach.
+
+Fixes: f293501c61c5 ("smsc95xx: configure LED outputs")
+Signed-off-by: Parthiban Veerasooran <Parthiban.Veerasooran@microchip.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Woojung Huh <woojung.huh@microchip.com>
+Link: https://lore.kernel.org/r/20240523085314.167650-1-Parthiban.Veerasooran@microchip.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/usb/smsc95xx.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
+index 10fae3989fbdb..8a38939dd57e4 100644
+--- a/drivers/net/usb/smsc95xx.c
++++ b/drivers/net/usb/smsc95xx.c
+@@ -842,7 +842,7 @@ static int smsc95xx_start_rx_path(struct usbnet *dev, int in_pm)
+ static int smsc95xx_reset(struct usbnet *dev)
+ {
+ struct smsc95xx_priv *pdata = dev->driver_priv;
+- u32 read_buf, write_buf, burst_cap;
++ u32 read_buf, burst_cap;
+ int ret = 0, timeout;
+
+ netif_dbg(dev, ifup, dev->net, "entering smsc95xx_reset\n");
+@@ -984,10 +984,13 @@ static int smsc95xx_reset(struct usbnet *dev)
+ return ret;
+ netif_dbg(dev, ifup, dev->net, "ID_REV = 0x%08x\n", read_buf);
+
++ ret = smsc95xx_read_reg(dev, LED_GPIO_CFG, &read_buf);
++ if (ret < 0)
++ return ret;
+ /* Configure GPIO pins as LED outputs */
+- write_buf = LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
+- LED_GPIO_CFG_FDX_LED;
+- ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, write_buf);
++ read_buf |= LED_GPIO_CFG_SPD_LED | LED_GPIO_CFG_LNK_LED |
++ LED_GPIO_CFG_FDX_LED;
++ ret = smsc95xx_write_reg(dev, LED_GPIO_CFG, read_buf);
+ if (ret < 0)
+ return ret;
+
+--
+2.43.0
+
--- /dev/null
+From 65faf6f6b2c12e65ae54d9af9c83216485a84cd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 May 2024 13:23:39 +0000
+Subject: netfilter: nfnetlink_queue: acquire rcu_read_lock() in
+ instance_destroy_rcu()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit dc21c6cc3d6986d938efbf95de62473982c98dec ]
+
+syzbot reported that nf_reinject() could be called without rcu_read_lock() :
+
+WARNING: suspicious RCU usage
+6.9.0-rc7-syzkaller-02060-g5c1672705a1a #0 Not tainted
+
+net/netfilter/nfnetlink_queue.c:263 suspicious rcu_dereference_check() usage!
+
+other info that might help us debug this:
+
+rcu_scheduler_active = 2, debug_locks = 1
+2 locks held by syz-executor.4/13427:
+ #0: ffffffff8e334f60 (rcu_callback){....}-{0:0}, at: rcu_lock_acquire include/linux/rcupdate.h:329 [inline]
+ #0: ffffffff8e334f60 (rcu_callback){....}-{0:0}, at: rcu_do_batch kernel/rcu/tree.c:2190 [inline]
+ #0: ffffffff8e334f60 (rcu_callback){....}-{0:0}, at: rcu_core+0xa86/0x1830 kernel/rcu/tree.c:2471
+ #1: ffff88801ca92958 (&inst->lock){+.-.}-{2:2}, at: spin_lock_bh include/linux/spinlock.h:356 [inline]
+ #1: ffff88801ca92958 (&inst->lock){+.-.}-{2:2}, at: nfqnl_flush net/netfilter/nfnetlink_queue.c:405 [inline]
+ #1: ffff88801ca92958 (&inst->lock){+.-.}-{2:2}, at: instance_destroy_rcu+0x30/0x220 net/netfilter/nfnetlink_queue.c:172
+
+stack backtrace:
+CPU: 0 PID: 13427 Comm: syz-executor.4 Not tainted 6.9.0-rc7-syzkaller-02060-g5c1672705a1a #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 04/02/2024
+Call Trace:
+ <IRQ>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0x241/0x360 lib/dump_stack.c:114
+ lockdep_rcu_suspicious+0x221/0x340 kernel/locking/lockdep.c:6712
+ nf_reinject net/netfilter/nfnetlink_queue.c:323 [inline]
+ nfqnl_reinject+0x6ec/0x1120 net/netfilter/nfnetlink_queue.c:397
+ nfqnl_flush net/netfilter/nfnetlink_queue.c:410 [inline]
+ instance_destroy_rcu+0x1ae/0x220 net/netfilter/nfnetlink_queue.c:172
+ rcu_do_batch kernel/rcu/tree.c:2196 [inline]
+ rcu_core+0xafd/0x1830 kernel/rcu/tree.c:2471
+ handle_softirqs+0x2d6/0x990 kernel/softirq.c:554
+ __do_softirq kernel/softirq.c:588 [inline]
+ invoke_softirq kernel/softirq.c:428 [inline]
+ __irq_exit_rcu+0xf4/0x1c0 kernel/softirq.c:637
+ irq_exit_rcu+0x9/0x30 kernel/softirq.c:649
+ instr_sysvec_apic_timer_interrupt arch/x86/kernel/apic/apic.c:1043 [inline]
+ sysvec_apic_timer_interrupt+0xa6/0xc0 arch/x86/kernel/apic/apic.c:1043
+ </IRQ>
+ <TASK>
+
+Fixes: 9872bec773c2 ("[NETFILTER]: nfnetlink: use RCU for queue instances hash")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nfnetlink_queue.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
+index 8c96e01f6a023..89b16d36da9cf 100644
+--- a/net/netfilter/nfnetlink_queue.c
++++ b/net/netfilter/nfnetlink_queue.c
+@@ -167,7 +167,9 @@ instance_destroy_rcu(struct rcu_head *head)
+ struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
+ rcu);
+
++ rcu_read_lock();
+ nfqnl_flush(inst, NULL, 0);
++ rcu_read_unlock();
+ kfree(inst);
+ module_put(THIS_MODULE);
+ }
+--
+2.43.0
+
--- /dev/null
+From 391ff424846dfbf34bf16dbb9c9eb34ca9fcc413 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Sep 2022 23:55:06 +0200
+Subject: netfilter: nft_payload: move struct nft_payload_set definition where
+ it belongs
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit ac1f8c049319847b1b4c6b387fdb2e3f7fb84ffc ]
+
+Not required to expose this header in nf_tables_core.h, move it to where
+it is used, ie. nft_payload.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 33c563ebf8d3 ("netfilter: nft_payload: skbuff vlan metadata mangle support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_tables_core.h | 10 ----------
+ net/netfilter/nft_payload.c | 10 ++++++++++
+ 2 files changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
+index 9dfa11d4224d2..315869fc3fcb8 100644
+--- a/include/net/netfilter/nf_tables_core.h
++++ b/include/net/netfilter/nf_tables_core.h
+@@ -74,16 +74,6 @@ struct nft_payload {
+ u8 dreg;
+ };
+
+-struct nft_payload_set {
+- enum nft_payload_bases base:8;
+- u8 offset;
+- u8 len;
+- u8 sreg;
+- u8 csum_type;
+- u8 csum_offset;
+- u8 csum_flags;
+-};
+-
+ extern const struct nft_expr_ops nft_payload_fast_ops;
+
+ extern const struct nft_expr_ops nft_bitwise_fast_ops;
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 4192f0e366554..e5f0d33a27e61 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -629,6 +629,16 @@ static int nft_payload_csum_inet(struct sk_buff *skb, const u32 *src,
+ return 0;
+ }
+
++struct nft_payload_set {
++ enum nft_payload_bases base:8;
++ u8 offset;
++ u8 len;
++ u8 sreg;
++ u8 csum_type;
++ u8 csum_offset;
++ u8 csum_flags;
++};
++
+ static void nft_payload_set_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+--
+2.43.0
+
--- /dev/null
+From effc6a397c615d3a9e1ddc135daacbc643cf13ff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Sep 2023 10:42:10 +0200
+Subject: netfilter: nft_payload: rebuild vlan header on h_proto access
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit af84f9e447a65b4b9f79e7e5d69e19039b431c56 ]
+
+nft can perform merging of adjacent payload requests.
+This means that:
+
+ether saddr 00:11 ... ether type 8021ad ...
+
+is a single payload expression, for 8 bytes, starting at the
+ethernet source offset.
+
+Check that offset+length is fully within the source/destination mac
+addersses.
+
+This bug prevents 'ether type' from matching the correct h_proto in case
+vlan tag got stripped.
+
+Fixes: de6843be3082 ("netfilter: nft_payload: rebuild vlan header when needed")
+Reported-by: David Ward <david.ward@ll.mit.edu>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Stable-dep-of: 33c563ebf8d3 ("netfilter: nft_payload: skbuff vlan metadata mangle support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index b1745304dbd22..697566e4ae759 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -110,6 +110,17 @@ static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+ return pkt->inneroff;
+ }
+
++static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
++{
++ unsigned int len = priv->offset + priv->len;
++
++ /* data past ether src/dst requested, copy needed */
++ if (len > offsetof(struct ethhdr, h_proto))
++ return true;
++
++ return false;
++}
++
+ void nft_payload_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+@@ -128,7 +139,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+ goto err;
+
+ if (skb_vlan_tag_present(skb) &&
+- priv->offset >= offsetof(struct ethhdr, h_proto)) {
++ nft_payload_need_vlan_copy(priv)) {
+ if (!nft_payload_copy_vlan(dest, skb,
+ priv->offset, priv->len))
+ goto err;
+--
+2.43.0
+
--- /dev/null
+From 28983bb95aecb8c0f0a2498ad54cd548b637f5b6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Jun 2023 09:38:42 +0200
+Subject: netfilter: nft_payload: rebuild vlan header when needed
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit de6843be3082d416eaf2a00b72dad95c784ca980 ]
+
+Skip rebuilding the vlan header when accessing destination and source
+mac address.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Stable-dep-of: 33c563ebf8d3 ("netfilter: nft_payload: skbuff vlan metadata mangle support")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index e5f0d33a27e61..b1745304dbd22 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -127,7 +127,8 @@ void nft_payload_eval(const struct nft_expr *expr,
+ if (!skb_mac_header_was_set(skb) || skb_mac_header_len(skb) == 0)
+ goto err;
+
+- if (skb_vlan_tag_present(skb)) {
++ if (skb_vlan_tag_present(skb) &&
++ priv->offset >= offsetof(struct ethhdr, h_proto)) {
+ if (!nft_payload_copy_vlan(dest, skb,
+ priv->offset, priv->len))
+ goto err;
+--
+2.43.0
+
--- /dev/null
+From 644fc9b6b77ab1a77656bd1a2246b7bfb61f6aae Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 May 2024 23:02:24 +0200
+Subject: netfilter: nft_payload: restore vlan q-in-q match support
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit aff5c01fa1284d606f8e7cbdaafeef2511bb46c1 ]
+
+Revert f6ae9f120dad ("netfilter: nft_payload: add C-VLAN support").
+
+f41f72d09ee1 ("netfilter: nft_payload: simplify vlan header handling")
+already allows to match on inner vlan tags by subtract the vlan header
+size to the payload offset which has been popped and stored in skbuff
+metadata fields.
+
+Fixes: f6ae9f120dad ("netfilter: nft_payload: add C-VLAN support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 23 +++++++----------------
+ 1 file changed, 7 insertions(+), 16 deletions(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 02327ffebc495..4192f0e366554 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -45,36 +45,27 @@ nft_payload_copy_vlan(u32 *d, const struct sk_buff *skb, u8 offset, u8 len)
+ int mac_off = skb_mac_header(skb) - skb->data;
+ u8 *vlanh, *dst_u8 = (u8 *) d;
+ struct vlan_ethhdr veth;
+- u8 vlan_hlen = 0;
+-
+- if ((skb->protocol == htons(ETH_P_8021AD) ||
+- skb->protocol == htons(ETH_P_8021Q)) &&
+- offset >= VLAN_ETH_HLEN && offset < VLAN_ETH_HLEN + VLAN_HLEN)
+- vlan_hlen += VLAN_HLEN;
+
+ vlanh = (u8 *) &veth;
+- if (offset < VLAN_ETH_HLEN + vlan_hlen) {
++ if (offset < VLAN_ETH_HLEN) {
+ u8 ethlen = len;
+
+- if (vlan_hlen &&
+- skb_copy_bits(skb, mac_off, &veth, VLAN_ETH_HLEN) < 0)
+- return false;
+- else if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
++ if (!nft_payload_rebuild_vlan_hdr(skb, mac_off, &veth))
+ return false;
+
+- if (offset + len > VLAN_ETH_HLEN + vlan_hlen)
+- ethlen -= offset + len - VLAN_ETH_HLEN - vlan_hlen;
++ if (offset + len > VLAN_ETH_HLEN)
++ ethlen -= offset + len - VLAN_ETH_HLEN;
+
+- memcpy(dst_u8, vlanh + offset - vlan_hlen, ethlen);
++ memcpy(dst_u8, vlanh + offset, ethlen);
+
+ len -= ethlen;
+ if (len == 0)
+ return true;
+
+ dst_u8 += ethlen;
+- offset = ETH_HLEN + vlan_hlen;
++ offset = ETH_HLEN;
+ } else {
+- offset -= VLAN_HLEN + vlan_hlen;
++ offset -= VLAN_HLEN;
+ }
+
+ return skb_copy_bits(skb, offset + mac_off, dst_u8, len) == 0;
+--
+2.43.0
+
--- /dev/null
+From d4bb136685ed98827eeb34f38506c7cf05d938a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 May 2024 22:50:34 +0200
+Subject: netfilter: nft_payload: skbuff vlan metadata mangle support
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 33c563ebf8d3deed7d8addd20d77398ac737ef9a ]
+
+Userspace assumes vlan header is present at a given offset, but vlan
+offload allows to store this in metadata fields of the skbuff. Hence
+mangling vlan results in a garbled packet. Handle this transparently by
+adding a parser to the kernel.
+
+If vlan metadata is present and payload offset is over 12 bytes (source
+and destination mac address fields), then subtract vlan header present
+in vlan metadata, otherwise mangle vlan metadata based on offset and
+length, extracting data from the source register.
+
+This is similar to:
+
+ 8cfd23e67401 ("netfilter: nft_payload: work around vlan header stripping")
+
+to deal with vlan payload mangling.
+
+Fixes: 7ec3f7b47b8d ("netfilter: nft_payload: add packet mangling support")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_payload.c | 72 +++++++++++++++++++++++++++++++++----
+ 1 file changed, 65 insertions(+), 7 deletions(-)
+
+diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
+index 697566e4ae759..55237d8a3d882 100644
+--- a/net/netfilter/nft_payload.c
++++ b/net/netfilter/nft_payload.c
+@@ -110,12 +110,12 @@ static int nft_payload_inner_offset(const struct nft_pktinfo *pkt)
+ return pkt->inneroff;
+ }
+
+-static bool nft_payload_need_vlan_copy(const struct nft_payload *priv)
++static bool nft_payload_need_vlan_adjust(u32 offset, u32 len)
+ {
+- unsigned int len = priv->offset + priv->len;
++ unsigned int boundary = offset + len;
+
+ /* data past ether src/dst requested, copy needed */
+- if (len > offsetof(struct ethhdr, h_proto))
++ if (boundary > offsetof(struct ethhdr, h_proto))
+ return true;
+
+ return false;
+@@ -139,7 +139,7 @@ void nft_payload_eval(const struct nft_expr *expr,
+ goto err;
+
+ if (skb_vlan_tag_present(skb) &&
+- nft_payload_need_vlan_copy(priv)) {
++ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
+ if (!nft_payload_copy_vlan(dest, skb,
+ priv->offset, priv->len))
+ goto err;
+@@ -651,21 +651,79 @@ struct nft_payload_set {
+ u8 csum_flags;
+ };
+
++/* This is not struct vlan_hdr. */
++struct nft_payload_vlan_hdr {
++ __be16 h_vlan_proto;
++ __be16 h_vlan_TCI;
++};
++
++static bool
++nft_payload_set_vlan(const u32 *src, struct sk_buff *skb, u8 offset, u8 len,
++ int *vlan_hlen)
++{
++ struct nft_payload_vlan_hdr *vlanh;
++ __be16 vlan_proto;
++ u16 vlan_tci;
++
++ if (offset >= offsetof(struct vlan_ethhdr, h_vlan_encapsulated_proto)) {
++ *vlan_hlen = VLAN_HLEN;
++ return true;
++ }
++
++ switch (offset) {
++ case offsetof(struct vlan_ethhdr, h_vlan_proto):
++ if (len == 2) {
++ vlan_proto = nft_reg_load_be16(src);
++ skb->vlan_proto = vlan_proto;
++ } else if (len == 4) {
++ vlanh = (struct nft_payload_vlan_hdr *)src;
++ __vlan_hwaccel_put_tag(skb, vlanh->h_vlan_proto,
++ ntohs(vlanh->h_vlan_TCI));
++ } else {
++ return false;
++ }
++ break;
++ case offsetof(struct vlan_ethhdr, h_vlan_TCI):
++ if (len != 2)
++ return false;
++
++ vlan_tci = ntohs(nft_reg_load_be16(src));
++ skb->vlan_tci = vlan_tci;
++ break;
++ default:
++ return false;
++ }
++
++ return true;
++}
++
+ static void nft_payload_set_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+ {
+ const struct nft_payload_set *priv = nft_expr_priv(expr);
+- struct sk_buff *skb = pkt->skb;
+ const u32 *src = ®s->data[priv->sreg];
+- int offset, csum_offset;
++ int offset, csum_offset, vlan_hlen = 0;
++ struct sk_buff *skb = pkt->skb;
+ __wsum fsum, tsum;
+
+ switch (priv->base) {
+ case NFT_PAYLOAD_LL_HEADER:
+ if (!skb_mac_header_was_set(skb))
+ goto err;
+- offset = skb_mac_header(skb) - skb->data;
++
++ if (skb_vlan_tag_present(skb) &&
++ nft_payload_need_vlan_adjust(priv->offset, priv->len)) {
++ if (!nft_payload_set_vlan(src, skb,
++ priv->offset, priv->len,
++ &vlan_hlen))
++ goto err;
++
++ if (!vlan_hlen)
++ return;
++ }
++
++ offset = skb_mac_header(skb) - skb->data - vlan_hlen;
+ break;
+ case NFT_PAYLOAD_NETWORK_HEADER:
+ offset = skb_network_offset(skb);
+--
+2.43.0
+
--- /dev/null
+From 1f20d6108749ab7f6cdd77040ad4ea99437d7040 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 May 2024 12:27:15 +0200
+Subject: netfilter: tproxy: bail out if IP has been disabled on the device
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 21a673bddc8fd4873c370caf9ae70ffc6d47e8d3 ]
+
+syzbot reports:
+general protection fault, probably for non-canonical address 0xdffffc0000000003: 0000 [#1] PREEMPT SMP KASAN PTI
+KASAN: null-ptr-deref in range [0x0000000000000018-0x000000000000001f]
+[..]
+RIP: 0010:nf_tproxy_laddr4+0xb7/0x340 net/ipv4/netfilter/nf_tproxy_ipv4.c:62
+Call Trace:
+ nft_tproxy_eval_v4 net/netfilter/nft_tproxy.c:56 [inline]
+ nft_tproxy_eval+0xa9a/0x1a00 net/netfilter/nft_tproxy.c:168
+
+__in_dev_get_rcu() can return NULL, so check for this.
+
+Reported-and-tested-by: syzbot+b94a6818504ea90d7661@syzkaller.appspotmail.com
+Fixes: cc6eb4338569 ("tproxy: use the interface primary IP address as a default value for --on-ip")
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/netfilter/nf_tproxy_ipv4.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+index 61cb2341f50fe..7c1a0cd9f4359 100644
+--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
++++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
+@@ -58,6 +58,8 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
+
+ laddr = 0;
+ indev = __in_dev_get_rcu(skb->dev);
++ if (!indev)
++ return daddr;
+
+ in_dev_for_each_ifa_rcu(ifa, indev) {
+ if (ifa->ifa_flags & IFA_F_SECONDARY)
+--
+2.43.0
+
--- /dev/null
+From 0f59a6f91f3db32abd7e55767458eb2777a54305 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 23:20:28 +0300
+Subject: nvmet: fix ns enable/disable possible hang
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit f97914e35fd98b2b18fb8a092e0a0799f73afdfe ]
+
+When disabling an nvmet namespace, there is a period where the
+subsys->lock is released, as the ns disable waits for backend IO to
+complete, and the ns percpu ref to be properly killed. The original
+intent was to avoid taking the subsystem lock for a prolong period as
+other processes may need to acquire it (for example new incoming
+connections).
+
+However, it opens up a window where another process may come in and
+enable the ns, (re)intiailizing the ns percpu_ref, causing the disable
+sequence to hang.
+
+Solve this by taking the global nvmet_config_sem over the entire configfs
+enable/disable sequence.
+
+Fixes: a07b4970f464 ("nvmet: add a generic NVMe target")
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/configfs.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
+index 5bdc3ba51f7ef..a3d3a1bfd292d 100644
+--- a/drivers/nvme/target/configfs.c
++++ b/drivers/nvme/target/configfs.c
+@@ -530,10 +530,18 @@ static ssize_t nvmet_ns_enable_store(struct config_item *item,
+ if (strtobool(page, &enable))
+ return -EINVAL;
+
++ /*
++ * take a global nvmet_config_sem because the disable routine has a
++ * window where it releases the subsys-lock, giving a chance to
++ * a parallel enable to concurrently execute causing the disable to
++ * have a misaccounting of the ns percpu_ref.
++ */
++ down_write(&nvmet_config_sem);
+ if (enable)
+ ret = nvmet_ns_enable(ns);
+ else
+ nvmet_ns_disable(ns);
++ up_write(&nvmet_config_sem);
+
+ return ret ? ret : count;
+ }
+--
+2.43.0
+
--- /dev/null
+From 77c8c18109f7d833e8b3c948d29eea1b258898c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 22:30:29 +1000
+Subject: powerpc/uaccess: Use YZ asm constraint for ld
+
+From: Michael Ellerman <mpe@ellerman.id.au>
+
+[ Upstream commit 50934945d54238d2d6d8db4b7c1d4c90d2696c57 ]
+
+The 'ld' instruction requires a 4-byte aligned displacement because it
+is a DS-form instruction. But the "m" asm constraint doesn't enforce
+that.
+
+Add a special case of __get_user_asm2_goto() so that the "YZ" constraint
+can be used for "ld".
+
+The "Z" constraint is documented in the GCC manual PowerPC machine
+constraints, and specifies a "memory operand accessed with indexed or
+indirect addressing". "Y" is not documented in the manual but specifies
+a "memory operand for a DS-form instruction". Using both allows the
+compiler to generate a DS-form "ld" or X-form "ldx" as appropriate.
+
+The change has to be conditional on CONFIG_PPC_KERNEL_PREFIXED because
+the "Y" constraint does not guarantee 4-byte alignment when prefixed
+instructions are enabled.
+
+No build errors have been reported due to this, but the possibility is
+there depending on compiler code generation decisions.
+
+Fixes: c20beffeec3c ("powerpc/uaccess: Use flexible addressing with __put_user()/__get_user()")
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://msgid.link/20240529123029.146953-2-mpe@ellerman.id.au
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/uaccess.h | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
+index b2680070d65d6..a1fddbd843f00 100644
+--- a/arch/powerpc/include/asm/uaccess.h
++++ b/arch/powerpc/include/asm/uaccess.h
+@@ -151,8 +151,19 @@ do { \
+ : label)
+
+ #ifdef __powerpc64__
++#ifdef CONFIG_PPC_KERNEL_PREFIXED
+ #define __get_user_asm2_goto(x, addr, label) \
+ __get_user_asm_goto(x, addr, label, "ld")
++#else
++#define __get_user_asm2_goto(x, addr, label) \
++ asm_goto_output( \
++ "1: ld%U1%X1 %0, %1 # get_user\n" \
++ EX_TABLE(1b, %l2) \
++ : "=r" (x) \
++ : DS_FORM_CONSTRAINT (*addr) \
++ : \
++ : label)
++#endif // CONFIG_PPC_KERNEL_PREFIXED
+ #else /* __powerpc64__ */
+ #define __get_user_asm2_goto(x, addr, label) \
+ asm_volatile_goto( \
+--
+2.43.0
+
nfc-nci-fix-kcov-check-in-nci_rx_work.patch
nfc-nci-fix-handling-of-zero-length-payload-packets-.patch
ice-interpret-.set_channels-input-differently.patch
+netfilter-nfnetlink_queue-acquire-rcu_read_lock-in-i.patch
+netfilter-nft_payload-restore-vlan-q-in-q-match-supp.patch
+spi-don-t-mark-message-dma-mapped-when-no-transfer-i.patch
+dma-mapping-benchmark-fix-node-id-validation.patch
+dma-mapping-benchmark-handle-numa_no_node-correctly.patch
+nvmet-fix-ns-enable-disable-possible-hang.patch
+net-phy-micrel-set-soft_reset-callback-to-genphy_sof.patch
+net-mlx5e-fix-ipsec-tunnel-mode-offload-feature-chec.patch
+net-mlx5e-use-rx_missed_errors-instead-of-rx_dropped.patch
+dma-buf-sw-sync-don-t-enable-irq-from-sync_print_obj.patch
+bpf-fix-potential-integer-overflow-in-resolve_btfids.patch
+enic-validate-length-of-nl-attributes-in-enic_set_vf.patch
+net-usb-smsc95xx-fix-changing-led_sel-bit-value-upda.patch
+bpf-allow-delete-from-sockmap-sockhash-only-if-updat.patch
+net-fec-add-fec_enet_deinit.patch
+netfilter-nft_payload-move-struct-nft_payload_set-de.patch
+netfilter-nft_payload-rebuild-vlan-header-when-neede.patch
+netfilter-nft_payload-rebuild-vlan-header-on-h_proto.patch
+netfilter-nft_payload-skbuff-vlan-metadata-mangle-su.patch
+netfilter-tproxy-bail-out-if-ip-has-been-disabled-on.patch
+kconfig-fix-comparison-to-constant-symbols-m-n.patch
+spi-stm32-don-t-warn-about-spurious-interrupts.patch
+net-ena-add-capabilities-field-with-support-for-eni-.patch
+net-ena-extract-recurring-driver-reset-code-into-a-f.patch
+net-ena-do-not-waste-napi-skb-cache.patch
+net-ena-add-dynamic-recycling-mechanism-for-rx-buffe.patch
+net-ena-reduce-lines-with-longer-column-width-bounda.patch
+net-ena-fix-redundant-device-numa-node-override.patch
+ipvlan-dont-use-skb-sk-in-ipvlan_process_v-4-6-_outb.patch
+powerpc-uaccess-use-yz-asm-constraint-for-ld.patch
+hwmon-shtc1-fix-property-misspelling.patch
--- /dev/null
+From f817f6d1c83d440b6055dcc3d73ea8a47b7c4d3e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 May 2024 20:09:49 +0300
+Subject: spi: Don't mark message DMA mapped when no transfer in it is
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 9f788ba457b45b0ce422943fcec9fa35c4587764 ]
+
+There is no need to set the DMA mapped flag of the message if it has
+no mapped transfers. Moreover, it may give the code a chance to take
+the wrong paths, i.e. to exercise DMA related APIs on unmapped data.
+Make __spi_map_msg() to bail earlier on the above mentioned cases.
+
+Fixes: 99adef310f68 ("spi: Provide core support for DMA mapping transfers")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Link: https://msgid.link/r/20240522171018.3362521-2-andriy.shevchenko@linux.intel.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index d4b186a35bb22..128f1cda39920 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -1047,6 +1047,7 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ else
+ rx_dev = ctlr->dev.parent;
+
++ ret = -ENOMSG;
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!ctlr->can_dma(ctlr, msg->spi, xfer))
+ continue;
+@@ -1070,6 +1071,9 @@ static int __spi_map_msg(struct spi_controller *ctlr, struct spi_message *msg)
+ }
+ }
+ }
++ /* No transfer has been mapped, bail out with success */
++ if (ret)
++ return 0;
+
+ ctlr->cur_msg_mapped = true;
+
+--
+2.43.0
+
--- /dev/null
+From f394603d300fefff296e9b9fd27e49bc40dee91a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 May 2024 12:52:42 +0200
+Subject: spi: stm32: Don't warn about spurious interrupts
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+
+[ Upstream commit 95d7c452a26564ef0c427f2806761b857106d8c4 ]
+
+The dev_warn to notify about a spurious interrupt was introduced with
+the reasoning that these are unexpected. However spurious interrupts
+tend to trigger continously and the error message on the serial console
+prevents that the core's detection of spurious interrupts kicks in
+(which disables the irq) and just floods the console.
+
+Fixes: c64e7efe46b7 ("spi: stm32: make spurious and overrun interrupts visible")
+Signed-off-by: Uwe Kleine-König <u.kleine-koenig@pengutronix.de>
+Link: https://msgid.link/r/20240521105241.62400-2-u.kleine-koenig@pengutronix.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/spi-stm32.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c
+index 191baa6e45c08..e8d21c93ed7ef 100644
+--- a/drivers/spi/spi-stm32.c
++++ b/drivers/spi/spi-stm32.c
+@@ -884,7 +884,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id)
+ mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP;
+
+ if (!(sr & mask)) {
+- dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
++ dev_vdbg(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n",
+ sr, ier);
+ spin_unlock_irqrestore(&spi->lock, flags);
+ return IRQ_NONE;
+--
+2.43.0
+