--- /dev/null
+From 16ea417ed978c61f1f88f72f8dd5deb1696ff4dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jan 2025 14:21:55 +0100
+Subject: bpf: Fix bpf_sk_select_reuseport() memory leak
+
+From: Michal Luczaj <mhal@rbox.co>
+
+[ Upstream commit b3af60928ab9129befa65e6df0310d27300942bf ]
+
+As pointed out in the original comment, lookup in sockmap can return a TCP
+ESTABLISHED socket. Such TCP socket may have had SO_ATTACH_REUSEPORT_EBPF
+set before it was ESTABLISHED. In other words, a non-NULL sk_reuseport_cb
+does not imply a non-refcounted socket.
+
+Drop sk's reference in both error paths.
+
+unreferenced object 0xffff888101911800 (size 2048):
+ comm "test_progs", pid 44109, jiffies 4297131437
+ hex dump (first 32 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ 80 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace (crc 9336483b):
+ __kmalloc_noprof+0x3bf/0x560
+ __reuseport_alloc+0x1d/0x40
+ reuseport_alloc+0xca/0x150
+ reuseport_attach_prog+0x87/0x140
+ sk_reuseport_attach_bpf+0xc8/0x100
+ sk_setsockopt+0x1181/0x1990
+ do_sock_setsockopt+0x12b/0x160
+ __sys_setsockopt+0x7b/0xc0
+ __x64_sys_setsockopt+0x1b/0x30
+ do_syscall_64+0x93/0x180
+ entry_SYSCALL_64_after_hwframe+0x76/0x7e
+
+Fixes: 64d85290d79c ("bpf: Allow bpf_map_lookup_elem for SOCKMAP and SOCKHASH")
+Signed-off-by: Michal Luczaj <mhal@rbox.co>
+Reviewed-by: Martin KaFai Lau <martin.lau@kernel.org>
+Link: https://patch.msgid.link/20250110-reuseport-memleak-v1-1-fa1ddab0adfe@rbox.co
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index d6042d285aa21..84ec1b14b23f3 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -10509,6 +10509,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
+ bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY;
+ struct sock_reuseport *reuse;
+ struct sock *selected_sk;
++ int err;
+
+ selected_sk = map->ops->map_lookup_elem(map, key);
+ if (!selected_sk)
+@@ -10516,10 +10517,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
+
+ reuse = rcu_dereference(selected_sk->sk_reuseport_cb);
+ if (!reuse) {
+- /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
+- if (sk_is_refcounted(selected_sk))
+- sock_put(selected_sk);
+-
+ /* reuseport_array has only sk with non NULL sk_reuseport_cb.
+ * The only (!reuse) case here is - the sk has already been
+ * unhashed (e.g. by close()), so treat it as -ENOENT.
+@@ -10527,24 +10524,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern,
+ * Other maps (e.g. sock_map) do not provide this guarantee and
+ * the sk may never be in the reuseport group to begin with.
+ */
+- return is_sockarray ? -ENOENT : -EINVAL;
++ err = is_sockarray ? -ENOENT : -EINVAL;
++ goto error;
+ }
+
+ if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) {
+ struct sock *sk = reuse_kern->sk;
+
+- if (sk->sk_protocol != selected_sk->sk_protocol)
+- return -EPROTOTYPE;
+- else if (sk->sk_family != selected_sk->sk_family)
+- return -EAFNOSUPPORT;
+-
+- /* Catch all. Likely bound to a different sockaddr. */
+- return -EBADFD;
++ if (sk->sk_protocol != selected_sk->sk_protocol) {
++ err = -EPROTOTYPE;
++ } else if (sk->sk_family != selected_sk->sk_family) {
++ err = -EAFNOSUPPORT;
++ } else {
++ /* Catch all. Likely bound to a different sockaddr. */
++ err = -EBADFD;
++ }
++ goto error;
+ }
+
+ reuse_kern->selected_sk = selected_sk;
+
+ return 0;
++error:
++ /* Lookup in sock_map can return TCP ESTABLISHED sockets. */
++ if (sk_is_refcounted(selected_sk))
++ sock_put(selected_sk);
++
++ return err;
+ }
+
+ static const struct bpf_func_proto sk_select_reuseport_proto = {
+--
+2.39.5
+
--- /dev/null
+From 3791c851f6ba4b120603c60c5d58a9555ab864f3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2025 12:47:40 -0300
+Subject: drm/v3d: Ensure job pointer is set to NULL after job completion
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Maíra Canal <mcanal@igalia.com>
+
+[ Upstream commit e4b5ccd392b92300a2b341705cc4805681094e49 ]
+
+After a job completes, the corresponding pointer in the device must
+be set to NULL. Failing to do so triggers a warning when unloading
+the driver, as it appears the job is still active. To prevent this,
+assign the job pointer to NULL after completing the job, indicating
+the job has finished.
+
+Fixes: 14d1d1908696 ("drm/v3d: Remove the bad signaled() implementation.")
+Signed-off-by: Maíra Canal <mcanal@igalia.com>
+Reviewed-by: Jose Maria Casanova Crespo <jmcasanova@igalia.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20250113154741.67520-1-mcanal@igalia.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/v3d/v3d_irq.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c
+index e714d5318f309..76806039691a2 100644
+--- a/drivers/gpu/drm/v3d/v3d_irq.c
++++ b/drivers/gpu/drm/v3d/v3d_irq.c
+@@ -103,6 +103,7 @@ v3d_irq(int irq, void *arg)
+
+ trace_v3d_bcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
++ v3d->bin_job = NULL;
+ status = IRQ_HANDLED;
+ }
+
+@@ -112,6 +113,7 @@ v3d_irq(int irq, void *arg)
+
+ trace_v3d_rcl_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
++ v3d->render_job = NULL;
+ status = IRQ_HANDLED;
+ }
+
+@@ -121,6 +123,7 @@ v3d_irq(int irq, void *arg)
+
+ trace_v3d_csd_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
++ v3d->csd_job = NULL;
+ status = IRQ_HANDLED;
+ }
+
+@@ -157,6 +160,7 @@ v3d_hub_irq(int irq, void *arg)
+
+ trace_v3d_tfu_irq(&v3d->drm, fence->seqno);
+ dma_fence_signal(&fence->base);
++ v3d->tfu_job = NULL;
+ status = IRQ_HANDLED;
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 4f9e769ec1e0bf53dbfb375c7bbb66493d673661 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jan 2025 10:47:53 +0900
+Subject: gtp: Destroy device along with udp socket's netns dismantle.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit eb28fd76c0a08a47b470677c6cef9dd1c60e92d1 ]
+
+gtp_newlink() links the device to a list in dev_net(dev) instead of
+src_net, where a udp tunnel socket is created.
+
+Even when src_net is removed, the device stays alive on dev_net(dev).
+Then, removing src_net triggers the splat below. [0]
+
+In this example, gtp0 is created in ns2, and the udp socket is created
+in ns1.
+
+ ip netns add ns1
+ ip netns add ns2
+ ip -n ns1 link add netns ns2 name gtp0 type gtp role sgsn
+ ip netns del ns1
+
+Let's link the device to the socket's netns instead.
+
+Now, gtp_net_exit_batch_rtnl() needs another netdev iteration to remove
+all gtp devices in the netns.
+
+[0]:
+ref_tracker: net notrefcnt@000000003d6e7d05 has 1/2 users at
+ sk_alloc (./include/net/net_namespace.h:345 net/core/sock.c:2236)
+ inet_create (net/ipv4/af_inet.c:326 net/ipv4/af_inet.c:252)
+ __sock_create (net/socket.c:1558)
+ udp_sock_create4 (net/ipv4/udp_tunnel_core.c:18)
+ gtp_create_sock (./include/net/udp_tunnel.h:59 drivers/net/gtp.c:1423)
+ gtp_create_sockets (drivers/net/gtp.c:1447)
+ gtp_newlink (drivers/net/gtp.c:1507)
+ rtnl_newlink (net/core/rtnetlink.c:3786 net/core/rtnetlink.c:3897 net/core/rtnetlink.c:4012)
+ rtnetlink_rcv_msg (net/core/rtnetlink.c:6922)
+ netlink_rcv_skb (net/netlink/af_netlink.c:2542)
+ netlink_unicast (net/netlink/af_netlink.c:1321 net/netlink/af_netlink.c:1347)
+ netlink_sendmsg (net/netlink/af_netlink.c:1891)
+ ____sys_sendmsg (net/socket.c:711 net/socket.c:726 net/socket.c:2583)
+ ___sys_sendmsg (net/socket.c:2639)
+ __sys_sendmsg (net/socket.c:2669)
+ do_syscall_64 (arch/x86/entry/common.c:52 arch/x86/entry/common.c:83)
+
+WARNING: CPU: 1 PID: 60 at lib/ref_tracker.c:179 ref_tracker_dir_exit (lib/ref_tracker.c:179)
+Modules linked in:
+CPU: 1 UID: 0 PID: 60 Comm: kworker/u16:2 Not tainted 6.13.0-rc5-00147-g4c1224501e9d #5
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014
+Workqueue: netns cleanup_net
+RIP: 0010:ref_tracker_dir_exit (lib/ref_tracker.c:179)
+Code: 00 00 00 fc ff df 4d 8b 26 49 bd 00 01 00 00 00 00 ad de 4c 39 f5 0f 85 df 00 00 00 48 8b 74 24 08 48 89 df e8 a5 cc 12 02 90 <0f> 0b 90 48 8d 6b 44 be 04 00 00 00 48 89 ef e8 80 de 67 ff 48 89
+RSP: 0018:ff11000009a07b60 EFLAGS: 00010286
+RAX: 0000000000002bd3 RBX: ff1100000f4e1aa0 RCX: 1ffffffff0e40ac6
+RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff8423ee3c
+RBP: ff1100000f4e1af0 R08: 0000000000000001 R09: fffffbfff0e395ae
+R10: 0000000000000001 R11: 0000000000036001 R12: ff1100000f4e1af0
+R13: dead000000000100 R14: ff1100000f4e1af0 R15: dffffc0000000000
+FS: 0000000000000000(0000) GS:ff1100006ce80000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007f9b2464bd98 CR3: 0000000005286005 CR4: 0000000000771ef0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400
+PKRU: 55555554
+Call Trace:
+ <TASK>
+ ? __warn (kernel/panic.c:748)
+ ? ref_tracker_dir_exit (lib/ref_tracker.c:179)
+ ? report_bug (lib/bug.c:201 lib/bug.c:219)
+ ? handle_bug (arch/x86/kernel/traps.c:285)
+ ? exc_invalid_op (arch/x86/kernel/traps.c:309 (discriminator 1))
+ ? asm_exc_invalid_op (./arch/x86/include/asm/idtentry.h:621)
+ ? _raw_spin_unlock_irqrestore (./arch/x86/include/asm/irqflags.h:42 ./arch/x86/include/asm/irqflags.h:97 ./arch/x86/include/asm/irqflags.h:155 ./include/linux/spinlock_api_smp.h:151 kernel/locking/spinlock.c:194)
+ ? ref_tracker_dir_exit (lib/ref_tracker.c:179)
+ ? __pfx_ref_tracker_dir_exit (lib/ref_tracker.c:158)
+ ? kfree (mm/slub.c:4613 mm/slub.c:4761)
+ net_free (net/core/net_namespace.c:476 net/core/net_namespace.c:467)
+ cleanup_net (net/core/net_namespace.c:664 (discriminator 3))
+ process_one_work (kernel/workqueue.c:3229)
+ worker_thread (kernel/workqueue.c:3304 kernel/workqueue.c:3391)
+ kthread (kernel/kthread.c:389)
+ ret_from_fork (arch/x86/kernel/process.c:147)
+ ret_from_fork_asm (arch/x86/entry/entry_64.S:257)
+ </TASK>
+
+Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)")
+Reported-by: Xiao Liang <shaw.leon@gmail.com>
+Closes: https://lore.kernel.org/netdev/20250104125732.17335-1-shaw.leon@gmail.com/
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/gtp.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index e4d29c22a0a79..2d306971d4fde 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -697,7 +697,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ goto out_encap;
+ }
+
+- gn = net_generic(dev_net(dev), gtp_net_id);
++ gn = net_generic(src_net, gtp_net_id);
+ list_add(>p->list, &gn->gtp_dev_list);
+ dev->priv_destructor = gtp_destructor;
+
+@@ -1414,6 +1414,11 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+ list_for_each_entry(net, net_list, exit_list) {
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+ struct gtp_dev *gtp, *gtp_next;
++ struct net_device *dev;
++
++ for_each_netdev(net, dev)
++ if (dev->rtnl_link_ops == >p_link_ops)
++ gtp_dellink(dev, dev_to_kill);
+
+ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
+--
+2.39.5
+
--- /dev/null
+From cc6cfbbd99a2c0ed574a42f34566b0d6df6bcbd8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 14:43:03 +0000
+Subject: gtp: use exit_batch_rtnl() method
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 6eedda01b2bfdcf427b37759e053dc27232f3af1 ]
+
+exit_batch_rtnl() is called while RTNL is held,
+and devices to be unregistered can be queued in the dev_kill_list.
+
+This saves one rtnl_lock()/rtnl_unlock() pair per netns
+and one unregister_netdevice_many() call per netns.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Antoine Tenart <atenart@kernel.org>
+Link: https://lore.kernel.org/r/20240206144313.2050392-8-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 46841c7053e6 ("gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/gtp.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 2509d7bccb2b3..67dcdd471a659 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -1403,23 +1403,23 @@ static int __net_init gtp_net_init(struct net *net)
+ return 0;
+ }
+
+-static void __net_exit gtp_net_exit(struct net *net)
++static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
++ struct list_head *dev_to_kill)
+ {
+- struct gtp_net *gn = net_generic(net, gtp_net_id);
+- struct gtp_dev *gtp;
+- LIST_HEAD(list);
++ struct net *net;
+
+- rtnl_lock();
+- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
+- gtp_dellink(gtp->dev, &list);
++ list_for_each_entry(net, net_list, exit_list) {
++ struct gtp_net *gn = net_generic(net, gtp_net_id);
++ struct gtp_dev *gtp;
+
+- unregister_netdevice_many(&list);
+- rtnl_unlock();
++ list_for_each_entry(gtp, &gn->gtp_dev_list, list)
++ gtp_dellink(gtp->dev, dev_to_kill);
++ }
+ }
+
+ static struct pernet_operations gtp_net_ops = {
+ .init = gtp_net_init,
+- .exit = gtp_net_exit,
++ .exit_batch_rtnl = gtp_net_exit_batch_rtnl,
+ .id = >p_net_id,
+ .size = sizeof(struct gtp_net),
+ };
+--
+2.39.5
+
--- /dev/null
+From aff8af4170170c74138f229f851727ec8785e71a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jan 2025 10:47:52 +0900
+Subject: gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 46841c7053e6d25fb33e0534ef023833bf03e382 ]
+
+gtp_newlink() links the gtp device to a list in dev_net(dev).
+
+However, even after the gtp device is moved to another netns,
+it stays on the list but should be invisible.
+
+Let's use for_each_netdev_rcu() for netdev traversal in
+gtp_genl_dump_pdp().
+
+Note that gtp_dev_list is no longer used under RCU, so list
+helpers are converted to the non-RCU variant.
+
+Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)")
+Reported-by: Xiao Liang <shaw.leon@gmail.com>
+Closes: https://lore.kernel.org/netdev/CABAhCOQdBL6h9M2C+kd+bGivRJ9Q72JUxW+-gur0nub_=PmFPA@mail.gmail.com/
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/gtp.c | 19 +++++++++++--------
+ 1 file changed, 11 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
+index 67dcdd471a659..e4d29c22a0a79 100644
+--- a/drivers/net/gtp.c
++++ b/drivers/net/gtp.c
+@@ -698,7 +698,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
+ }
+
+ gn = net_generic(dev_net(dev), gtp_net_id);
+- list_add_rcu(>p->list, &gn->gtp_dev_list);
++ list_add(>p->list, &gn->gtp_dev_list);
+ dev->priv_destructor = gtp_destructor;
+
+ netdev_dbg(dev, "registered new GTP interface\n");
+@@ -724,7 +724,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head)
+ hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid)
+ pdp_context_delete(pctx);
+
+- list_del_rcu(>p->list);
++ list_del(>p->list);
+ unregister_netdevice_queue(dev, head);
+ }
+
+@@ -1305,16 +1305,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb,
+ struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp;
+ int i, j, bucket = cb->args[0], skip = cb->args[1];
+ struct net *net = sock_net(skb->sk);
++ struct net_device *dev;
+ struct pdp_ctx *pctx;
+- struct gtp_net *gn;
+-
+- gn = net_generic(net, gtp_net_id);
+
+ if (cb->args[4])
+ return 0;
+
+ rcu_read_lock();
+- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) {
++ for_each_netdev_rcu(net, dev) {
++ if (dev->rtnl_link_ops != >p_link_ops)
++ continue;
++
++ gtp = netdev_priv(dev);
++
+ if (last_gtp && last_gtp != gtp)
+ continue;
+ else
+@@ -1410,9 +1413,9 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list,
+
+ list_for_each_entry(net, net_list, exit_list) {
+ struct gtp_net *gn = net_generic(net, gtp_net_id);
+- struct gtp_dev *gtp;
++ struct gtp_dev *gtp, *gtp_next;
+
+- list_for_each_entry(gtp, &gn->gtp_dev_list, list)
++ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list)
+ gtp_dellink(gtp->dev, dev_to_kill);
+ }
+ }
+--
+2.39.5
+
--- /dev/null
+From fee3cea0dbea8bf57cb6cfbfac1a83fa994adf80 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 6 Feb 2024 14:42:57 +0000
+Subject: net: add exit_batch_rtnl() method
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit fd4f101edbd9f99567ab2adb1f2169579ede7c13 ]
+
+Many (struct pernet_operations)->exit_batch() methods have
+to acquire rtnl.
+
+In presence of rtnl mutex pressure, this makes cleanup_net()
+very slow.
+
+This patch adds a new exit_batch_rtnl() method to reduce
+number of rtnl acquisitions from cleanup_net().
+
+exit_batch_rtnl() handlers are called while rtnl is locked,
+and devices to be killed can be queued in a list provided
+as their second argument.
+
+A single unregister_netdevice_many() is called right
+before rtnl is released.
+
+exit_batch_rtnl() handlers are called before ->exit() and
+->exit_batch() handlers.
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Reviewed-by: Antoine Tenart <atenart@kernel.org>
+Link: https://lore.kernel.org/r/20240206144313.2050392-2-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Stable-dep-of: 46841c7053e6 ("gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/net_namespace.h | 3 +++
+ net/core/net_namespace.c | 31 ++++++++++++++++++++++++++++++-
+ 2 files changed, 33 insertions(+), 1 deletion(-)
+
+diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
+index c47baa623ba58..d184b832166b6 100644
+--- a/include/net/net_namespace.h
++++ b/include/net/net_namespace.h
+@@ -389,6 +389,9 @@ struct pernet_operations {
+ void (*pre_exit)(struct net *net);
+ void (*exit)(struct net *net);
+ void (*exit_batch)(struct list_head *net_exit_list);
++ /* Following method is called with RTNL held. */
++ void (*exit_batch_rtnl)(struct list_head *net_exit_list,
++ struct list_head *dev_kill_list);
+ unsigned int *id;
+ size_t size;
+ };
+diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
+index 3addbce20f8ed..1e9e76c4ff5be 100644
+--- a/net/core/net_namespace.c
++++ b/net/core/net_namespace.c
+@@ -313,8 +313,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
+ {
+ /* Must be called with pernet_ops_rwsem held */
+ const struct pernet_operations *ops, *saved_ops;
+- int error = 0;
+ LIST_HEAD(net_exit_list);
++ LIST_HEAD(dev_kill_list);
++ int error = 0;
+
+ refcount_set(&net->ns.count, 1);
+ refcount_set(&net->passive, 1);
+@@ -350,6 +351,15 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
+
+ synchronize_rcu();
+
++ ops = saved_ops;
++ rtnl_lock();
++ list_for_each_entry_continue_reverse(ops, &pernet_list, list) {
++ if (ops->exit_batch_rtnl)
++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
++ }
++ unregister_netdevice_many(&dev_kill_list);
++ rtnl_unlock();
++
+ ops = saved_ops;
+ list_for_each_entry_continue_reverse(ops, &pernet_list, list)
+ ops_exit_list(ops, &net_exit_list);
+@@ -554,6 +564,7 @@ static void cleanup_net(struct work_struct *work)
+ struct net *net, *tmp, *last;
+ struct llist_node *net_kill_list;
+ LIST_HEAD(net_exit_list);
++ LIST_HEAD(dev_kill_list);
+
+ /* Atomically snapshot the list of namespaces to cleanup */
+ net_kill_list = llist_del_all(&cleanup_list);
+@@ -594,6 +605,14 @@ static void cleanup_net(struct work_struct *work)
+ */
+ synchronize_rcu();
+
++ rtnl_lock();
++ list_for_each_entry_reverse(ops, &pernet_list, list) {
++ if (ops->exit_batch_rtnl)
++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list);
++ }
++ unregister_netdevice_many(&dev_kill_list);
++ rtnl_unlock();
++
+ /* Run all of the network namespace exit methods */
+ list_for_each_entry_reverse(ops, &pernet_list, list)
+ ops_exit_list(ops, &net_exit_list);
+@@ -1134,7 +1153,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_
+ {
+ ops_pre_exit_list(ops, net_exit_list);
+ synchronize_rcu();
++
++ if (ops->exit_batch_rtnl) {
++ LIST_HEAD(dev_kill_list);
++
++ rtnl_lock();
++ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list);
++ unregister_netdevice_many(&dev_kill_list);
++ rtnl_unlock();
++ }
+ ops_exit_list(ops, net_exit_list);
++
+ ops_free_list(ops, net_exit_list);
+ }
+
+--
+2.39.5
+
--- /dev/null
+From 950cbf1e76c8c2ee981efce9b5f9307edf1e54d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 8 Jan 2025 22:54:33 +0530
+Subject: net: ethernet: ti: cpsw_ale: Fix cpsw_ale_get_field()
+
+From: Sudheer Kumar Doredla <s-doredla@ti.com>
+
+[ Upstream commit 03d120f27d050336f7e7d21879891542c4741f81 ]
+
+CPSW ALE has 75-bit ALE entries stored across three 32-bit words.
+The cpsw_ale_get_field() and cpsw_ale_set_field() functions support
+ALE field entries spanning up to two words at the most.
+
+The cpsw_ale_get_field() and cpsw_ale_set_field() functions work as
+expected when ALE field spanned across word1 and word2, but fails when
+ALE field spanned across word2 and word3.
+
+For example, while reading the ALE field spanned across word2 and word3
+(i.e. bits 62 to 64), the word3 data shifted to an incorrect position
+due to the index becoming zero while flipping.
+The same issue occurred when setting an ALE entry.
+
+This issue has not been seen in practice but will be an issue in the future
+if the driver supports accessing ALE fields spanning word2 and word3
+
+Fix the methods to handle getting/setting fields spanning up to two words.
+
+Fixes: b685f1a58956 ("net: ethernet: ti: cpsw_ale: Fix cpsw_ale_get_field()/cpsw_ale_set_field()")
+Signed-off-by: Sudheer Kumar Doredla <s-doredla@ti.com>
+Reviewed-by: Simon Horman <horms@kernel.org>
+Reviewed-by: Roger Quadros <rogerq@kernel.org>
+Reviewed-by: Siddharth Vadapalli <s-vadapalli@ti.com>
+Link: https://patch.msgid.link/20250108172433.311694-1-s-doredla@ti.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/ti/cpsw_ale.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
+index 8c59e34d8bcaf..348a05454fcaa 100644
+--- a/drivers/net/ethernet/ti/cpsw_ale.c
++++ b/drivers/net/ethernet/ti/cpsw_ale.c
+@@ -104,15 +104,15 @@ struct cpsw_ale_dev_id {
+
+ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+ {
+- int idx, idx2;
++ int idx, idx2, index;
+ u32 hi_val = 0;
+
+ idx = start / 32;
+ idx2 = (start + bits - 1) / 32;
+ /* Check if bits to be fetched exceed a word */
+ if (idx != idx2) {
+- idx2 = 2 - idx2; /* flip */
+- hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
++ index = 2 - idx2; /* flip */
++ hi_val = ale_entry[index] << ((idx2 * 32) - start);
+ }
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+@@ -122,16 +122,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+ static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ u32 value)
+ {
+- int idx, idx2;
++ int idx, idx2, index;
+
+ value &= BITMASK(bits);
+ idx = start / 32;
+ idx2 = (start + bits - 1) / 32;
+ /* Check if bits to be set exceed a word */
+ if (idx != idx2) {
+- idx2 = 2 - idx2; /* flip */
+- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
+- ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
++ index = 2 - idx2; /* flip */
++ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32)));
++ ale_entry[index] |= (value >> ((idx2 * 32) - start));
+ }
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+--
+2.39.5
+
--- /dev/null
+From 2ecb8ed54b64718f46c84f817170b440e3242029 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Oct 2021 15:24:28 +0300
+Subject: net/mlx5: Add priorities for counters in RDMA namespaces
+
+From: Aharon Landau <aharonl@nvidia.com>
+
+[ Upstream commit b8dfed636fc6239396c3a2ae5f812505906cf215 ]
+
+Add additional flow steering priorities in the RDMA namespace.
+This allows adding flow counters to count filtered RDMA traffic and then
+continue processing in the regular RDMA steering flow.
+
+Signed-off-by: Aharon Landau <aharonl@nvidia.com>
+Reviewed-by: Maor Gottlieb <maorg@nvidia.com>
+Signed-off-by: Mark Zhang <markzhang@nvidia.com>
+Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
+Stable-dep-of: c08d3e62b2e7 ("net/mlx5: Fix RDMA TX steering prio")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/fs_core.c | 54 ++++++++++++++++---
+ include/linux/mlx5/device.h | 2 +
+ include/linux/mlx5/fs.h | 2 +
+ 3 files changed, 50 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 665619ce46746..54409a9307c2d 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -99,6 +99,9 @@
+ #define LEFTOVERS_NUM_LEVELS 1
+ #define LEFTOVERS_NUM_PRIOS 1
+
++#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1
++#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1
++
+ #define BY_PASS_PRIO_NUM_LEVELS 1
+ #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\
+ LEFTOVERS_NUM_PRIOS)
+@@ -206,34 +209,63 @@ static struct init_tree_node egress_root_fs = {
+ }
+ };
+
+-#define RDMA_RX_BYPASS_PRIO 0
+-#define RDMA_RX_KERNEL_PRIO 1
++enum {
++ RDMA_RX_COUNTERS_PRIO,
++ RDMA_RX_BYPASS_PRIO,
++ RDMA_RX_KERNEL_PRIO,
++};
++
++#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS
++#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1)
++#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2)
++
+ static struct init_tree_node rdma_rx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+- .ar_size = 2,
++ .ar_size = 3,
+ .children = (struct init_tree_node[]) {
++ [RDMA_RX_COUNTERS_PRIO] =
++ ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0,
++ FS_CHAINING_CAPS,
++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
++ ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS,
++ RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))),
+ [RDMA_RX_BYPASS_PRIO] =
+- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0,
++ ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+ ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ [RDMA_RX_KERNEL_PRIO] =
+- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0,
++ ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN,
+ ADD_MULTIPLE_PRIO(1, 1))),
+ }
+ };
+
++enum {
++ RDMA_TX_COUNTERS_PRIO,
++ RDMA_TX_BYPASS_PRIO,
++};
++
++#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS
++#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1)
++
+ static struct init_tree_node rdma_tx_root_fs = {
+ .type = FS_TYPE_NAMESPACE,
+- .ar_size = 1,
++ .ar_size = 2,
+ .children = (struct init_tree_node[]) {
+- ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0,
++ [RDMA_TX_COUNTERS_PRIO] =
++ ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0,
++ FS_CHAINING_CAPS,
++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
++ ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS,
++ RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))),
++ [RDMA_TX_BYPASS_PRIO] =
++ ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0,
+ FS_CHAINING_CAPS_RDMA_TX,
+ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF,
+- ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS,
++ ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL,
+ BY_PASS_PRIO_NUM_LEVELS))),
+ }
+ };
+@@ -2319,6 +2351,12 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ prio = RDMA_RX_KERNEL_PRIO;
+ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
+ root_ns = steering->rdma_tx_root_ns;
++ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
++ root_ns = steering->rdma_rx_root_ns;
++ prio = RDMA_RX_COUNTERS_PRIO;
++ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
++ root_ns = steering->rdma_tx_root_ns;
++ prio = RDMA_TX_COUNTERS_PRIO;
+ } else { /* Must be NIC RX */
+ root_ns = steering->root_ns;
+ prio = type;
+diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
+index 1bb4945885cec..476d8fd5a7e5b 100644
+--- a/include/linux/mlx5/device.h
++++ b/include/linux/mlx5/device.h
+@@ -1462,6 +1462,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
+ return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
+ }
+
++#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
++#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
+ #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
+ #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
+ #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
+diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
+index 0e43f0fb6d73a..29db7677827d7 100644
+--- a/include/linux/mlx5/fs.h
++++ b/include/linux/mlx5/fs.h
+@@ -83,6 +83,8 @@ enum mlx5_flow_namespace_type {
+ MLX5_FLOW_NAMESPACE_RDMA_RX,
+ MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL,
+ MLX5_FLOW_NAMESPACE_RDMA_TX,
++ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS,
++ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS,
+ };
+
+ enum {
+--
+2.39.5
+
--- /dev/null
+From afbda8aa66a8dcf7b6a4a7248b42383fce3f3acc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jan 2025 13:39:04 +0200
+Subject: net/mlx5: Fix RDMA TX steering prio
+
+From: Patrisious Haddad <phaddad@nvidia.com>
+
+[ Upstream commit c08d3e62b2e73e14da318a1d20b52d0486a28ee0 ]
+
+User added steering rules at RDMA_TX were being added to the first prio,
+which is the counters prio.
+Fix that so that they are correctly added to the BYPASS_PRIO instead.
+
+Fixes: 24670b1a3166 ("net/mlx5: Add support for RDMA TX steering")
+Signed-off-by: Patrisious Haddad <phaddad@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Reviewed-by: Jacob Keller <jacob.e.keller@intel.com>
+Signed-off-by: Tariq Toukan <tariqt@nvidia.com>
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 9632abb83f04c..8ff2b81960de7 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -2366,6 +2366,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ root_ns = steering->rdma_tx_root_ns;
++ prio = RDMA_TX_BYPASS_PRIO;
+ break;
+ case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
+ root_ns = steering->rdma_rx_root_ns;
+--
+2.39.5
+
--- /dev/null
+From 0c97357c7c3a3798bd59918ff2fbc26cc2b4d967 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Dec 2021 11:36:19 -0800
+Subject: net/mlx5: Refactor mlx5_get_flow_namespace
+
+From: Maor Gottlieb <maorg@nvidia.com>
+
+[ Upstream commit 4588fed7beae6d54ef4c67c77fc39364f8fc42af ]
+
+Have all the namespace type check in the same switch case.
+
+Signed-off-by: Maor Gottlieb <maorg@nvidia.com>
+Reviewed-by: Mark Bloch <mbloch@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Signed-off-by: Saeed Mahameed <saeedm@nvidia.com>
+Stable-dep-of: c08d3e62b2e7 ("net/mlx5: Fix RDMA TX steering prio")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/mellanox/mlx5/core/fs_core.c | 44 ++++++++++++++-----
+ 1 file changed, 32 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+index 54409a9307c2d..9632abb83f04c 100644
+--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+@@ -2310,6 +2310,22 @@ struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev,
+ }
+ EXPORT_SYMBOL(mlx5_get_fdb_sub_ns);
+
++static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type)
++{
++ switch (type) {
++ case MLX5_FLOW_NAMESPACE_BYPASS:
++ case MLX5_FLOW_NAMESPACE_LAG:
++ case MLX5_FLOW_NAMESPACE_OFFLOADS:
++ case MLX5_FLOW_NAMESPACE_ETHTOOL:
++ case MLX5_FLOW_NAMESPACE_KERNEL:
++ case MLX5_FLOW_NAMESPACE_LEFTOVERS:
++ case MLX5_FLOW_NAMESPACE_ANCHOR:
++ return true;
++ default:
++ return false;
++ }
++}
++
+ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ enum mlx5_flow_namespace_type type)
+ {
+@@ -2335,31 +2351,35 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
+ if (steering->sniffer_tx_root_ns)
+ return &steering->sniffer_tx_root_ns->ns;
+ return NULL;
+- default:
+- break;
+- }
+-
+- if (type == MLX5_FLOW_NAMESPACE_EGRESS ||
+- type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) {
++ case MLX5_FLOW_NAMESPACE_EGRESS:
++ case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL:
+ root_ns = steering->egress_root_ns;
+ prio = type - MLX5_FLOW_NAMESPACE_EGRESS;
+- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) {
++ break;
++ case MLX5_FLOW_NAMESPACE_RDMA_RX:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_BYPASS_PRIO;
+- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) {
++ break;
++ case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_KERNEL_PRIO;
+- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) {
++ break;
++ case MLX5_FLOW_NAMESPACE_RDMA_TX:
+ root_ns = steering->rdma_tx_root_ns;
+- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) {
++ break;
++ case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS:
+ root_ns = steering->rdma_rx_root_ns;
+ prio = RDMA_RX_COUNTERS_PRIO;
+- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) {
++ break;
++ case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS:
+ root_ns = steering->rdma_tx_root_ns;
+ prio = RDMA_TX_COUNTERS_PRIO;
+- } else { /* Must be NIC RX */
++ break;
++ default: /* Must be NIC RX */
++ WARN_ON(!is_nic_rx_ns(type));
+ root_ns = steering->root_ns;
+ prio = type;
++ break;
+ }
+
+ if (!root_ns)
+--
+2.39.5
+
--- /dev/null
+From 2137dfed09118a801a20809d363353bd241c1ead Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2025 11:30:00 -0500
+Subject: net: xilinx: axienet: Fix IRQ coalescing packet count overflow
+
+From: Sean Anderson <sean.anderson@linux.dev>
+
+[ Upstream commit c17ff476f53afb30f90bb3c2af77de069c81a622 ]
+
+If coalesce_count is greater than 255 it will not fit in the register and
+will overflow. This can be reproduced by running
+
+ # ethtool -C ethX rx-frames 256
+
+which will result in a timeout of 0us instead. Fix this by checking for
+invalid values and reporting an error.
+
+Fixes: 8a3b7a252dca ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver")
+Signed-off-by: Sean Anderson <sean.anderson@linux.dev>
+Reviewed-by: Shannon Nelson <shannon.nelson@amd.com>
+Reviewed-by: Radhey Shyam Pandey <radhey.shyam.pandey@amd.com>
+Link: https://patch.msgid.link/20250113163001.2335235-1-sean.anderson@linux.dev
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+index 56a970357f450..f63d2224ba91a 100644
+--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+@@ -1584,6 +1584,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev,
+ return -EFAULT;
+ }
+
++ if (ecoalesce->rx_max_coalesced_frames > 255 ||
++ ecoalesce->tx_max_coalesced_frames > 255) {
++ NL_SET_ERR_MSG(extack, "frames must be less than 256");
++ return -EINVAL;
++ }
++
+ if (ecoalesce->rx_max_coalesced_frames)
+ lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames;
+ if (ecoalesce->rx_coalesce_usecs)
+--
+2.39.5
+
--- /dev/null
+From 1449e2f69ed8693fd4653cc175f40baed6662ed6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 13 Jan 2025 09:18:39 +0300
+Subject: nfp: bpf: prevent integer overflow in nfp_bpf_event_output()
+
+From: Dan Carpenter <dan.carpenter@linaro.org>
+
+[ Upstream commit 16ebb6f5b6295c9688749862a39a4889c56227f8 ]
+
+The "sizeof(struct cmsg_bpf_event) + pkt_size + data_size" math could
+potentially have an integer wrapping bug on 32bit systems. Check for
+this and return an error.
+
+Fixes: 9816dd35ecec ("nfp: bpf: perf event output helpers support")
+Signed-off-by: Dan Carpenter <dan.carpenter@linaro.org>
+Link: https://patch.msgid.link/6074805b-e78d-4b8a-bf05-e929b5377c28@stanley.mountain
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/netronome/nfp/bpf/offload.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+index 9d97cd281f18e..c03558adda91e 100644
+--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c
++++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c
+@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data,
+ map_id_full = be64_to_cpu(cbe->map_ptr);
+ map_id = map_id_full;
+
+- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
++ if (size_add(pkt_size, data_size) > INT_MAX ||
++ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size)
+ return -EINVAL;
+ if (cbe->hdr.ver != NFP_CCM_ABI_VERSION)
+ return -EINVAL;
+--
+2.39.5
+
--- /dev/null
+From 1a7c4c3cf1e173ba4f5678a9d4431358c6443ef9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Jan 2025 11:30:39 +0300
+Subject: pktgen: Avoid out-of-bounds access in get_imix_entries
+
+From: Artem Chernyshev <artem.chernyshev@red-soft.ru>
+
+[ Upstream commit 76201b5979768500bca362871db66d77cb4c225e ]
+
+Passing a sufficient amount of imix entries leads to invalid access to the
+pkt_dev->imix_entries array because of the incorrect boundary check.
+
+UBSAN: array-index-out-of-bounds in net/core/pktgen.c:874:24
+index 20 is out of range for type 'imix_pkt [20]'
+CPU: 2 PID: 1210 Comm: bash Not tainted 6.10.0-rc1 #121
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996)
+Call Trace:
+<TASK>
+dump_stack_lvl lib/dump_stack.c:117
+__ubsan_handle_out_of_bounds lib/ubsan.c:429
+get_imix_entries net/core/pktgen.c:874
+pktgen_if_write net/core/pktgen.c:1063
+pde_write fs/proc/inode.c:334
+proc_reg_write fs/proc/inode.c:346
+vfs_write fs/read_write.c:593
+ksys_write fs/read_write.c:644
+do_syscall_64 arch/x86/entry/common.c:83
+entry_SYSCALL_64_after_hwframe arch/x86/entry/entry_64.S:130
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Fixes: 52a62f8603f9 ("pktgen: Parse internet mix (imix) input")
+Signed-off-by: Artem Chernyshev <artem.chernyshev@red-soft.ru>
+[ fp: allow to fill the array completely; minor changelog cleanup ]
+Signed-off-by: Fedor Pchelkin <pchelkin@ispras.ru>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/pktgen.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/net/core/pktgen.c b/net/core/pktgen.c
+index a539f26fe4bea..5d5f03471eb0c 100644
+--- a/net/core/pktgen.c
++++ b/net/core/pktgen.c
+@@ -849,6 +849,9 @@ static ssize_t get_imix_entries(const char __user *buffer,
+ unsigned long weight;
+ unsigned long size;
+
++ if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES)
++ return -E2BIG;
++
+ len = num_arg(&buffer[i], max_digits, &size);
+ if (len < 0)
+ return len;
+@@ -878,9 +881,6 @@ static ssize_t get_imix_entries(const char __user *buffer,
+
+ i++;
+ pkt_dev->n_imix_entries++;
+-
+- if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES)
+- return -E2BIG;
+ } while (c == ' ');
+
+ return i;
+--
+2.39.5
+
mptcp-fix-tcp-options-overflow.patch
phy-usb-use-slow-clock-for-wake-enabled-suspend.patch
phy-usb-fix-clock-imbalance-for-suspend-resume.patch
+net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch
+bpf-fix-bpf_sk_select_reuseport-memory-leak.patch
+pktgen-avoid-out-of-bounds-access-in-get_imix_entrie.patch
+net-add-exit_batch_rtnl-method.patch
+gtp-use-exit_batch_rtnl-method.patch
+gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch
+gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch
+nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch
+net-xilinx-axienet-fix-irq-coalescing-packet-count-o.patch
+net-mlx5-add-priorities-for-counters-in-rdma-namespa.patch
+net-mlx5-refactor-mlx5_get_flow_namespace.patch
+net-mlx5-fix-rdma-tx-steering-prio.patch
+drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch