From: Sasha Levin Date: Sat, 18 Jan 2025 17:23:29 +0000 (-0500) Subject: Fixes for 5.10 X-Git-Tag: v6.1.126~11 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=42e54d4aa34bd0e15c9fdbbeeb44003bc11b1357;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 5.10 Signed-off-by: Sasha Levin --- diff --git a/queue-5.10/bpf-fix-bpf_sk_select_reuseport-memory-leak.patch b/queue-5.10/bpf-fix-bpf_sk_select_reuseport-memory-leak.patch new file mode 100644 index 0000000000..c3003a2a7d --- /dev/null +++ b/queue-5.10/bpf-fix-bpf_sk_select_reuseport-memory-leak.patch @@ -0,0 +1,112 @@ +From cddfc6c7f4ff562079bf01fd8fd5335b79b7ed42 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 10 Jan 2025 14:21:55 +0100 +Subject: bpf: Fix bpf_sk_select_reuseport() memory leak + +From: Michal Luczaj + +[ Upstream commit b3af60928ab9129befa65e6df0310d27300942bf ] + +As pointed out in the original comment, lookup in sockmap can return a TCP +ESTABLISHED socket. Such TCP socket may have had SO_ATTACH_REUSEPORT_EBPF +set before it was ESTABLISHED. In other words, a non-NULL sk_reuseport_cb +does not imply a non-refcounted socket. + +Drop sk's reference in both error paths. + +unreferenced object 0xffff888101911800 (size 2048): + comm "test_progs", pid 44109, jiffies 4297131437 + hex dump (first 32 bytes): + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + 80 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + backtrace (crc 9336483b): + __kmalloc_noprof+0x3bf/0x560 + __reuseport_alloc+0x1d/0x40 + reuseport_alloc+0xca/0x150 + reuseport_attach_prog+0x87/0x140 + sk_reuseport_attach_bpf+0xc8/0x100 + sk_setsockopt+0x1181/0x1990 + do_sock_setsockopt+0x12b/0x160 + __sys_setsockopt+0x7b/0xc0 + __x64_sys_setsockopt+0x1b/0x30 + do_syscall_64+0x93/0x180 + entry_SYSCALL_64_after_hwframe+0x76/0x7e + +Fixes: 64d85290d79c ("bpf: Allow bpf_map_lookup_elem for SOCKMAP and SOCKHASH") +Signed-off-by: Michal Luczaj +Reviewed-by: Martin KaFai Lau +Link: https://patch.msgid.link/20250110-reuseport-memleak-v1-1-fa1ddab0adfe@rbox.co +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/core/filter.c | 30 ++++++++++++++++++------------ + 1 file changed, 18 insertions(+), 12 deletions(-) + +diff --git a/net/core/filter.c b/net/core/filter.c +index b80203274d3fc..d9f4d98acc45b 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -10016,6 +10016,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; + struct sock_reuseport *reuse; + struct sock *selected_sk; ++ int err; + + selected_sk = map->ops->map_lookup_elem(map, key); + if (!selected_sk) +@@ -10023,10 +10024,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + + reuse = rcu_dereference(selected_sk->sk_reuseport_cb); + if (!reuse) { +- /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ +- if (sk_is_refcounted(selected_sk)) +- sock_put(selected_sk); +- + /* reuseport_array has only sk with non NULL sk_reuseport_cb. + * The only (!reuse) case here is - the sk has already been + * unhashed (e.g. by close()), so treat it as -ENOENT. +@@ -10034,24 +10031,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + * Other maps (e.g. sock_map) do not provide this guarantee and + * the sk may never be in the reuseport group to begin with. + */ +- return is_sockarray ? -ENOENT : -EINVAL; ++ err = is_sockarray ? -ENOENT : -EINVAL; ++ goto error; + } + + if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { + struct sock *sk = reuse_kern->sk; + +- if (sk->sk_protocol != selected_sk->sk_protocol) +- return -EPROTOTYPE; +- else if (sk->sk_family != selected_sk->sk_family) +- return -EAFNOSUPPORT; +- +- /* Catch all. Likely bound to a different sockaddr. */ +- return -EBADFD; ++ if (sk->sk_protocol != selected_sk->sk_protocol) { ++ err = -EPROTOTYPE; ++ } else if (sk->sk_family != selected_sk->sk_family) { ++ err = -EAFNOSUPPORT; ++ } else { ++ /* Catch all. Likely bound to a different sockaddr. */ ++ err = -EBADFD; ++ } ++ goto error; + } + + reuse_kern->selected_sk = selected_sk; + + return 0; ++error: ++ /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ ++ if (sk_is_refcounted(selected_sk)) ++ sock_put(selected_sk); ++ ++ return err; + } + + static const struct bpf_func_proto sk_select_reuseport_proto = { +-- +2.39.5 + diff --git a/queue-5.10/drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch b/queue-5.10/drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch new file mode 100644 index 0000000000..9d776d0b97 --- /dev/null +++ b/queue-5.10/drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch @@ -0,0 +1,66 @@ +From 88c519d1ae4f3bdfbc19f106b201d3ecd4a98b40 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 13 Jan 2025 12:47:40 -0300 +Subject: drm/v3d: Ensure job pointer is set to NULL after job completion +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Maíra Canal + +[ Upstream commit e4b5ccd392b92300a2b341705cc4805681094e49 ] + +After a job completes, the corresponding pointer in the device must +be set to NULL. Failing to do so triggers a warning when unloading +the driver, as it appears the job is still active. To prevent this, +assign the job pointer to NULL after completing the job, indicating +the job has finished. + +Fixes: 14d1d1908696 ("drm/v3d: Remove the bad signaled() implementation.") +Signed-off-by: Maíra Canal +Reviewed-by: Jose Maria Casanova Crespo +Link: https://patchwork.freedesktop.org/patch/msgid/20250113154741.67520-1-mcanal@igalia.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/v3d/v3d_irq.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c +index c88686489b888..22aa02d75c5cc 100644 +--- a/drivers/gpu/drm/v3d/v3d_irq.c ++++ b/drivers/gpu/drm/v3d/v3d_irq.c +@@ -103,6 +103,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_bcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->bin_job = NULL; + status = IRQ_HANDLED; + } + +@@ -112,6 +113,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_rcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->render_job = NULL; + status = IRQ_HANDLED; + } + +@@ -121,6 +123,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_csd_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->csd_job = NULL; + status = IRQ_HANDLED; + } + +@@ -157,6 +160,7 @@ v3d_hub_irq(int irq, void *arg) + + trace_v3d_tfu_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->tfu_job = NULL; + status = IRQ_HANDLED; + } + +-- +2.39.5 + diff --git a/queue-5.10/gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch b/queue-5.10/gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch new file mode 100644 index 0000000000..4afb3a2790 --- /dev/null +++ b/queue-5.10/gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch @@ -0,0 +1,125 @@ +From fcc0c038720418a7134e2e5052943f7ebc9eaec3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 10 Jan 2025 10:47:53 +0900 +Subject: gtp: Destroy device along with udp socket's netns dismantle. + +From: Kuniyuki Iwashima + +[ Upstream commit eb28fd76c0a08a47b470677c6cef9dd1c60e92d1 ] + +gtp_newlink() links the device to a list in dev_net(dev) instead of +src_net, where a udp tunnel socket is created. + +Even when src_net is removed, the device stays alive on dev_net(dev). +Then, removing src_net triggers the splat below. [0] + +In this example, gtp0 is created in ns2, and the udp socket is created +in ns1. + + ip netns add ns1 + ip netns add ns2 + ip -n ns1 link add netns ns2 name gtp0 type gtp role sgsn + ip netns del ns1 + +Let's link the device to the socket's netns instead. + +Now, gtp_net_exit_batch_rtnl() needs another netdev iteration to remove +all gtp devices in the netns. + +[0]: +ref_tracker: net notrefcnt@000000003d6e7d05 has 1/2 users at + sk_alloc (./include/net/net_namespace.h:345 net/core/sock.c:2236) + inet_create (net/ipv4/af_inet.c:326 net/ipv4/af_inet.c:252) + __sock_create (net/socket.c:1558) + udp_sock_create4 (net/ipv4/udp_tunnel_core.c:18) + gtp_create_sock (./include/net/udp_tunnel.h:59 drivers/net/gtp.c:1423) + gtp_create_sockets (drivers/net/gtp.c:1447) + gtp_newlink (drivers/net/gtp.c:1507) + rtnl_newlink (net/core/rtnetlink.c:3786 net/core/rtnetlink.c:3897 net/core/rtnetlink.c:4012) + rtnetlink_rcv_msg (net/core/rtnetlink.c:6922) + netlink_rcv_skb (net/netlink/af_netlink.c:2542) + netlink_unicast (net/netlink/af_netlink.c:1321 net/netlink/af_netlink.c:1347) + netlink_sendmsg (net/netlink/af_netlink.c:1891) + ____sys_sendmsg (net/socket.c:711 net/socket.c:726 net/socket.c:2583) + ___sys_sendmsg (net/socket.c:2639) + __sys_sendmsg (net/socket.c:2669) + do_syscall_64 (arch/x86/entry/common.c:52 arch/x86/entry/common.c:83) + +WARNING: CPU: 1 PID: 60 at lib/ref_tracker.c:179 ref_tracker_dir_exit (lib/ref_tracker.c:179) +Modules linked in: +CPU: 1 UID: 0 PID: 60 Comm: kworker/u16:2 Not tainted 6.13.0-rc5-00147-g4c1224501e9d #5 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 +Workqueue: netns cleanup_net +RIP: 0010:ref_tracker_dir_exit (lib/ref_tracker.c:179) +Code: 00 00 00 fc ff df 4d 8b 26 49 bd 00 01 00 00 00 00 ad de 4c 39 f5 0f 85 df 00 00 00 48 8b 74 24 08 48 89 df e8 a5 cc 12 02 90 <0f> 0b 90 48 8d 6b 44 be 04 00 00 00 48 89 ef e8 80 de 67 ff 48 89 +RSP: 0018:ff11000009a07b60 EFLAGS: 00010286 +RAX: 0000000000002bd3 RBX: ff1100000f4e1aa0 RCX: 1ffffffff0e40ac6 +RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff8423ee3c +RBP: ff1100000f4e1af0 R08: 0000000000000001 R09: fffffbfff0e395ae +R10: 0000000000000001 R11: 0000000000036001 R12: ff1100000f4e1af0 +R13: dead000000000100 R14: ff1100000f4e1af0 R15: dffffc0000000000 +FS: 0000000000000000(0000) GS:ff1100006ce80000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 00007f9b2464bd98 CR3: 0000000005286005 CR4: 0000000000771ef0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 +PKRU: 55555554 +Call Trace: + + ? __warn (kernel/panic.c:748) + ? ref_tracker_dir_exit (lib/ref_tracker.c:179) + ? report_bug (lib/bug.c:201 lib/bug.c:219) + ? handle_bug (arch/x86/kernel/traps.c:285) + ? exc_invalid_op (arch/x86/kernel/traps.c:309 (discriminator 1)) + ? asm_exc_invalid_op (./arch/x86/include/asm/idtentry.h:621) + ? _raw_spin_unlock_irqrestore (./arch/x86/include/asm/irqflags.h:42 ./arch/x86/include/asm/irqflags.h:97 ./arch/x86/include/asm/irqflags.h:155 ./include/linux/spinlock_api_smp.h:151 kernel/locking/spinlock.c:194) + ? ref_tracker_dir_exit (lib/ref_tracker.c:179) + ? __pfx_ref_tracker_dir_exit (lib/ref_tracker.c:158) + ? kfree (mm/slub.c:4613 mm/slub.c:4761) + net_free (net/core/net_namespace.c:476 net/core/net_namespace.c:467) + cleanup_net (net/core/net_namespace.c:664 (discriminator 3)) + process_one_work (kernel/workqueue.c:3229) + worker_thread (kernel/workqueue.c:3304 kernel/workqueue.c:3391) + kthread (kernel/kthread.c:389) + ret_from_fork (arch/x86/kernel/process.c:147) + ret_from_fork_asm (arch/x86/entry/entry_64.S:257) + + +Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") +Reported-by: Xiao Liang +Closes: https://lore.kernel.org/netdev/20250104125732.17335-1-shaw.leon@gmail.com/ +Signed-off-by: Kuniyuki Iwashima +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/gtp.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 803ebdea4bd1f..dda9b4503e9ce 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -684,7 +684,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, + goto out_encap; + } + +- gn = net_generic(dev_net(dev), gtp_net_id); ++ gn = net_generic(src_net, gtp_net_id); + list_add(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; + +@@ -1398,6 +1398,11 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, + list_for_each_entry(net, net_list, exit_list) { + struct gtp_net *gn = net_generic(net, gtp_net_id); + struct gtp_dev *gtp, *gtp_next; ++ struct net_device *dev; ++ ++ for_each_netdev(net, dev) ++ if (dev->rtnl_link_ops == >p_link_ops) ++ gtp_dellink(dev, dev_to_kill); + + list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) + gtp_dellink(gtp->dev, dev_to_kill); +-- +2.39.5 + diff --git a/queue-5.10/gtp-use-exit_batch_rtnl-method.patch b/queue-5.10/gtp-use-exit_batch_rtnl-method.patch new file mode 100644 index 0000000000..5eabf27d63 --- /dev/null +++ b/queue-5.10/gtp-use-exit_batch_rtnl-method.patch @@ -0,0 +1,66 @@ +From a1056c83661f3d5d82b6a0cb2056cba20bf3df9c Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 14:43:03 +0000 +Subject: gtp: use exit_batch_rtnl() method + +From: Eric Dumazet + +[ Upstream commit 6eedda01b2bfdcf427b37759e053dc27232f3af1 ] + +exit_batch_rtnl() is called while RTNL is held, +and devices to be unregistered can be queued in the dev_kill_list. + +This saves one rtnl_lock()/rtnl_unlock() pair per netns +and one unregister_netdevice_many() call per netns. + +Signed-off-by: Eric Dumazet +Reviewed-by: Antoine Tenart +Link: https://lore.kernel.org/r/20240206144313.2050392-8-edumazet@google.com +Signed-off-by: Jakub Kicinski +Stable-dep-of: 46841c7053e6 ("gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().") +Signed-off-by: Sasha Levin +--- + drivers/net/gtp.c | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 42839cb853f83..e44291e85f9fc 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1387,23 +1387,23 @@ static int __net_init gtp_net_init(struct net *net) + return 0; + } + +-static void __net_exit gtp_net_exit(struct net *net) ++static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, ++ struct list_head *dev_to_kill) + { +- struct gtp_net *gn = net_generic(net, gtp_net_id); +- struct gtp_dev *gtp; +- LIST_HEAD(list); ++ struct net *net; + +- rtnl_lock(); +- list_for_each_entry(gtp, &gn->gtp_dev_list, list) +- gtp_dellink(gtp->dev, &list); ++ list_for_each_entry(net, net_list, exit_list) { ++ struct gtp_net *gn = net_generic(net, gtp_net_id); ++ struct gtp_dev *gtp; + +- unregister_netdevice_many(&list); +- rtnl_unlock(); ++ list_for_each_entry(gtp, &gn->gtp_dev_list, list) ++ gtp_dellink(gtp->dev, dev_to_kill); ++ } + } + + static struct pernet_operations gtp_net_ops = { + .init = gtp_net_init, +- .exit = gtp_net_exit, ++ .exit_batch_rtnl = gtp_net_exit_batch_rtnl, + .id = >p_net_id, + .size = sizeof(struct gtp_net), + }; +-- +2.39.5 + diff --git a/queue-5.10/gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch b/queue-5.10/gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch new file mode 100644 index 0000000000..a19cc9e7d8 --- /dev/null +++ b/queue-5.10/gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch @@ -0,0 +1,91 @@ +From 15a51f3268e6953d7b0a1e0f984b095b1bfef146 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 10 Jan 2025 10:47:52 +0900 +Subject: gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp(). + +From: Kuniyuki Iwashima + +[ Upstream commit 46841c7053e6d25fb33e0534ef023833bf03e382 ] + +gtp_newlink() links the gtp device to a list in dev_net(dev). + +However, even after the gtp device is moved to another netns, +it stays on the list but should be invisible. + +Let's use for_each_netdev_rcu() for netdev traversal in +gtp_genl_dump_pdp(). + +Note that gtp_dev_list is no longer used under RCU, so list +helpers are converted to the non-RCU variant. + +Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") +Reported-by: Xiao Liang +Closes: https://lore.kernel.org/netdev/CABAhCOQdBL6h9M2C+kd+bGivRJ9Q72JUxW+-gur0nub_=PmFPA@mail.gmail.com/ +Signed-off-by: Kuniyuki Iwashima +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/gtp.c | 19 +++++++++++-------- + 1 file changed, 11 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index e44291e85f9fc..803ebdea4bd1f 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -685,7 +685,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, + } + + gn = net_generic(dev_net(dev), gtp_net_id); +- list_add_rcu(>p->list, &gn->gtp_dev_list); ++ list_add(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; + + netdev_dbg(dev, "registered new GTP interface\n"); +@@ -711,7 +711,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head) + hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); + +- list_del_rcu(>p->list); ++ list_del(>p->list); + unregister_netdevice_queue(dev, head); + } + +@@ -1289,16 +1289,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, + struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + int i, j, bucket = cb->args[0], skip = cb->args[1]; + struct net *net = sock_net(skb->sk); ++ struct net_device *dev; + struct pdp_ctx *pctx; +- struct gtp_net *gn; +- +- gn = net_generic(net, gtp_net_id); + + if (cb->args[4]) + return 0; + + rcu_read_lock(); +- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { ++ for_each_netdev_rcu(net, dev) { ++ if (dev->rtnl_link_ops != >p_link_ops) ++ continue; ++ ++ gtp = netdev_priv(dev); ++ + if (last_gtp && last_gtp != gtp) + continue; + else +@@ -1394,9 +1397,9 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, + + list_for_each_entry(net, net_list, exit_list) { + struct gtp_net *gn = net_generic(net, gtp_net_id); +- struct gtp_dev *gtp; ++ struct gtp_dev *gtp, *gtp_next; + +- list_for_each_entry(gtp, &gn->gtp_dev_list, list) ++ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) + gtp_dellink(gtp->dev, dev_to_kill); + } + } +-- +2.39.5 + diff --git a/queue-5.10/net-add-exit_batch_rtnl-method.patch b/queue-5.10/net-add-exit_batch_rtnl-method.patch new file mode 100644 index 0000000000..9b610b2d67 --- /dev/null +++ b/queue-5.10/net-add-exit_batch_rtnl-method.patch @@ -0,0 +1,128 @@ +From 13a6f47fee86032e6e17f1a0c3c40829bc8e1ae4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 14:42:57 +0000 +Subject: net: add exit_batch_rtnl() method + +From: Eric Dumazet + +[ Upstream commit fd4f101edbd9f99567ab2adb1f2169579ede7c13 ] + +Many (struct pernet_operations)->exit_batch() methods have +to acquire rtnl. + +In presence of rtnl mutex pressure, this makes cleanup_net() +very slow. + +This patch adds a new exit_batch_rtnl() method to reduce +number of rtnl acquisitions from cleanup_net(). + +exit_batch_rtnl() handlers are called while rtnl is locked, +and devices to be killed can be queued in a list provided +as their second argument. + +A single unregister_netdevice_many() is called right +before rtnl is released. + +exit_batch_rtnl() handlers are called before ->exit() and +->exit_batch() handlers. + +Signed-off-by: Eric Dumazet +Reviewed-by: Antoine Tenart +Link: https://lore.kernel.org/r/20240206144313.2050392-2-edumazet@google.com +Signed-off-by: Jakub Kicinski +Stable-dep-of: 46841c7053e6 ("gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().") +Signed-off-by: Sasha Levin +--- + include/net/net_namespace.h | 3 +++ + net/core/net_namespace.c | 31 ++++++++++++++++++++++++++++++- + 2 files changed, 33 insertions(+), 1 deletion(-) + +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index eb0e7731f3b1c..c41e922fdd97e 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -393,6 +393,9 @@ struct pernet_operations { + void (*pre_exit)(struct net *net); + void (*exit)(struct net *net); + void (*exit_batch)(struct list_head *net_exit_list); ++ /* Following method is called with RTNL held. */ ++ void (*exit_batch_rtnl)(struct list_head *net_exit_list, ++ struct list_head *dev_kill_list); + unsigned int *id; + size_t size; + }; +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c +index ef19a0eaa55aa..bcf3533cb8ff1 100644 +--- a/net/core/net_namespace.c ++++ b/net/core/net_namespace.c +@@ -331,8 +331,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) + { + /* Must be called with pernet_ops_rwsem held */ + const struct pernet_operations *ops, *saved_ops; +- int error = 0; + LIST_HEAD(net_exit_list); ++ LIST_HEAD(dev_kill_list); ++ int error = 0; + + refcount_set(&net->count, 1); + refcount_set(&net->passive, 1); +@@ -365,6 +366,15 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) + + synchronize_rcu(); + ++ ops = saved_ops; ++ rtnl_lock(); ++ list_for_each_entry_continue_reverse(ops, &pernet_list, list) { ++ if (ops->exit_batch_rtnl) ++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); ++ } ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ + ops = saved_ops; + list_for_each_entry_continue_reverse(ops, &pernet_list, list) + ops_exit_list(ops, &net_exit_list); +@@ -569,6 +579,7 @@ static void cleanup_net(struct work_struct *work) + struct net *net, *tmp, *last; + struct llist_node *net_kill_list; + LIST_HEAD(net_exit_list); ++ LIST_HEAD(dev_kill_list); + + /* Atomically snapshot the list of namespaces to cleanup */ + net_kill_list = llist_del_all(&cleanup_list); +@@ -609,6 +620,14 @@ static void cleanup_net(struct work_struct *work) + */ + synchronize_rcu(); + ++ rtnl_lock(); ++ list_for_each_entry_reverse(ops, &pernet_list, list) { ++ if (ops->exit_batch_rtnl) ++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); ++ } ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ + /* Run all of the network namespace exit methods */ + list_for_each_entry_reverse(ops, &pernet_list, list) + ops_exit_list(ops, &net_exit_list); +@@ -1160,7 +1179,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_ + { + ops_pre_exit_list(ops, net_exit_list); + synchronize_rcu(); ++ ++ if (ops->exit_batch_rtnl) { ++ LIST_HEAD(dev_kill_list); ++ ++ rtnl_lock(); ++ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list); ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ } + ops_exit_list(ops, net_exit_list); ++ + ops_free_list(ops, net_exit_list); + } + +-- +2.39.5 + diff --git a/queue-5.10/net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch b/queue-5.10/net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch new file mode 100644 index 0000000000..a6d1350990 --- /dev/null +++ b/queue-5.10/net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch @@ -0,0 +1,86 @@ +From 8bf8cd9920fef2618ec3b7e7233479cee3ab035d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 8 Jan 2025 22:54:33 +0530 +Subject: net: ethernet: ti: cpsw_ale: Fix cpsw_ale_get_field() + +From: Sudheer Kumar Doredla + +[ Upstream commit 03d120f27d050336f7e7d21879891542c4741f81 ] + +CPSW ALE has 75-bit ALE entries stored across three 32-bit words. +The cpsw_ale_get_field() and cpsw_ale_set_field() functions support +ALE field entries spanning up to two words at the most. + +The cpsw_ale_get_field() and cpsw_ale_set_field() functions work as +expected when ALE field spanned across word1 and word2, but fails when +ALE field spanned across word2 and word3. + +For example, while reading the ALE field spanned across word2 and word3 +(i.e. bits 62 to 64), the word3 data shifted to an incorrect position +due to the index becoming zero while flipping. +The same issue occurred when setting an ALE entry. + +This issue has not been seen in practice but will be an issue in the future +if the driver supports accessing ALE fields spanning word2 and word3 + +Fix the methods to handle getting/setting fields spanning up to two words. + +Fixes: b685f1a58956 ("net: ethernet: ti: cpsw_ale: Fix cpsw_ale_get_field()/cpsw_ale_set_field()") +Signed-off-by: Sudheer Kumar Doredla +Reviewed-by: Simon Horman +Reviewed-by: Roger Quadros +Reviewed-by: Siddharth Vadapalli +Link: https://patch.msgid.link/20250108172433.311694-1-s-doredla@ti.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/ti/cpsw_ale.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c +index 73efc8b453643..bec6a68a973c4 100644 +--- a/drivers/net/ethernet/ti/cpsw_ale.c ++++ b/drivers/net/ethernet/ti/cpsw_ale.c +@@ -104,15 +104,15 @@ struct cpsw_ale_dev_id { + + static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) + { +- int idx, idx2; ++ int idx, idx2, index; + u32 hi_val = 0; + + idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be fetched exceed a word */ + if (idx != idx2) { +- idx2 = 2 - idx2; /* flip */ +- hi_val = ale_entry[idx2] << ((idx2 * 32) - start); ++ index = 2 - idx2; /* flip */ ++ hi_val = ale_entry[index] << ((idx2 * 32) - start); + } + start -= idx * 32; + idx = 2 - idx; /* flip */ +@@ -122,16 +122,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) + static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, + u32 value) + { +- int idx, idx2; ++ int idx, idx2, index; + + value &= BITMASK(bits); + idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be set exceed a word */ + if (idx != idx2) { +- idx2 = 2 - idx2; /* flip */ +- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32))); +- ale_entry[idx2] |= (value >> ((idx2 * 32) - start)); ++ index = 2 - idx2; /* flip */ ++ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32))); ++ ale_entry[index] |= (value >> ((idx2 * 32) - start)); + } + start -= idx * 32; + idx = 2 - idx; /* flip */ +-- +2.39.5 + diff --git a/queue-5.10/net-mlx5-add-priorities-for-counters-in-rdma-namespa.patch b/queue-5.10/net-mlx5-add-priorities-for-counters-in-rdma-namespa.patch new file mode 100644 index 0000000000..8ec019ffcf --- /dev/null +++ b/queue-5.10/net-mlx5-add-priorities-for-counters-in-rdma-namespa.patch @@ -0,0 +1,153 @@ +From e0220e5f4dbce86478621497cdced270c110e583 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 8 Oct 2021 15:24:28 +0300 +Subject: net/mlx5: Add priorities for counters in RDMA namespaces + +From: Aharon Landau + +[ Upstream commit b8dfed636fc6239396c3a2ae5f812505906cf215 ] + +Add additional flow steering priorities in the RDMA namespace. +This allows adding flow counters to count filtered RDMA traffic and then +continue processing in the regular RDMA steering flow. + +Signed-off-by: Aharon Landau +Reviewed-by: Maor Gottlieb +Signed-off-by: Mark Zhang +Signed-off-by: Leon Romanovsky +Stable-dep-of: c08d3e62b2e7 ("net/mlx5: Fix RDMA TX steering prio") +Signed-off-by: Sasha Levin +--- + .../net/ethernet/mellanox/mlx5/core/fs_core.c | 54 ++++++++++++++++--- + include/linux/mlx5/device.h | 2 + + include/linux/mlx5/fs.h | 2 + + 3 files changed, 50 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index c1a0d4e616b4b..3f49eff271cf2 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -98,6 +98,9 @@ + #define LEFTOVERS_NUM_LEVELS 1 + #define LEFTOVERS_NUM_PRIOS 1 + ++#define RDMA_RX_COUNTERS_PRIO_NUM_LEVELS 1 ++#define RDMA_TX_COUNTERS_PRIO_NUM_LEVELS 1 ++ + #define BY_PASS_PRIO_NUM_LEVELS 1 + #define BY_PASS_MIN_LEVEL (ETHTOOL_MIN_LEVEL + MLX5_BY_PASS_NUM_PRIOS +\ + LEFTOVERS_NUM_PRIOS) +@@ -205,34 +208,63 @@ static struct init_tree_node egress_root_fs = { + } + }; + +-#define RDMA_RX_BYPASS_PRIO 0 +-#define RDMA_RX_KERNEL_PRIO 1 ++enum { ++ RDMA_RX_COUNTERS_PRIO, ++ RDMA_RX_BYPASS_PRIO, ++ RDMA_RX_KERNEL_PRIO, ++}; ++ ++#define RDMA_RX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_REGULAR_PRIOS ++#define RDMA_RX_KERNEL_MIN_LEVEL (RDMA_RX_BYPASS_MIN_LEVEL + 1) ++#define RDMA_RX_COUNTERS_MIN_LEVEL (RDMA_RX_KERNEL_MIN_LEVEL + 2) ++ + static struct init_tree_node rdma_rx_root_fs = { + .type = FS_TYPE_NAMESPACE, +- .ar_size = 2, ++ .ar_size = 3, + .children = (struct init_tree_node[]) { ++ [RDMA_RX_COUNTERS_PRIO] = ++ ADD_PRIO(0, RDMA_RX_COUNTERS_MIN_LEVEL, 0, ++ FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(MLX5_RDMA_RX_NUM_COUNTERS_PRIOS, ++ RDMA_RX_COUNTERS_PRIO_NUM_LEVELS))), + [RDMA_RX_BYPASS_PRIO] = +- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS, 0, ++ ADD_PRIO(0, RDMA_RX_BYPASS_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, + ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_REGULAR_PRIOS, + BY_PASS_PRIO_NUM_LEVELS))), + [RDMA_RX_KERNEL_PRIO] = +- ADD_PRIO(0, MLX5_BY_PASS_NUM_REGULAR_PRIOS + 1, 0, ++ ADD_PRIO(0, RDMA_RX_KERNEL_MIN_LEVEL, 0, + FS_CHAINING_CAPS, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_SWITCH_DOMAIN, + ADD_MULTIPLE_PRIO(1, 1))), + } + }; + ++enum { ++ RDMA_TX_COUNTERS_PRIO, ++ RDMA_TX_BYPASS_PRIO, ++}; ++ ++#define RDMA_TX_BYPASS_MIN_LEVEL MLX5_BY_PASS_NUM_PRIOS ++#define RDMA_TX_COUNTERS_MIN_LEVEL (RDMA_TX_BYPASS_MIN_LEVEL + 1) ++ + static struct init_tree_node rdma_tx_root_fs = { + .type = FS_TYPE_NAMESPACE, +- .ar_size = 1, ++ .ar_size = 2, + .children = (struct init_tree_node[]) { +- ADD_PRIO(0, MLX5_BY_PASS_NUM_PRIOS, 0, ++ [RDMA_TX_COUNTERS_PRIO] = ++ ADD_PRIO(0, RDMA_TX_COUNTERS_MIN_LEVEL, 0, ++ FS_CHAINING_CAPS, ++ ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ++ ADD_MULTIPLE_PRIO(MLX5_RDMA_TX_NUM_COUNTERS_PRIOS, ++ RDMA_TX_COUNTERS_PRIO_NUM_LEVELS))), ++ [RDMA_TX_BYPASS_PRIO] = ++ ADD_PRIO(0, RDMA_TX_BYPASS_MIN_LEVEL, 0, + FS_CHAINING_CAPS_RDMA_TX, + ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, +- ADD_MULTIPLE_PRIO(MLX5_BY_PASS_NUM_PRIOS, ++ ADD_MULTIPLE_PRIO(RDMA_TX_BYPASS_MIN_LEVEL, + BY_PASS_PRIO_NUM_LEVELS))), + } + }; +@@ -2311,6 +2343,12 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + prio = RDMA_RX_KERNEL_PRIO; + } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) { + root_ns = steering->rdma_tx_root_ns; ++ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) { ++ root_ns = steering->rdma_rx_root_ns; ++ prio = RDMA_RX_COUNTERS_PRIO; ++ } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) { ++ root_ns = steering->rdma_tx_root_ns; ++ prio = RDMA_TX_COUNTERS_PRIO; + } else { /* Must be NIC RX */ + root_ns = steering->root_ns; + prio = type; +diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h +index cf824366a7d1b..969ac95e2edec 100644 +--- a/include/linux/mlx5/device.h ++++ b/include/linux/mlx5/device.h +@@ -1418,6 +1418,8 @@ static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz) + return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz; + } + ++#define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2 ++#define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1 + #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16 + #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16 + #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1 +diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h +index 846d94ad04bcc..3f0e67ee60243 100644 +--- a/include/linux/mlx5/fs.h ++++ b/include/linux/mlx5/fs.h +@@ -80,6 +80,8 @@ enum mlx5_flow_namespace_type { + MLX5_FLOW_NAMESPACE_RDMA_RX, + MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL, + MLX5_FLOW_NAMESPACE_RDMA_TX, ++ MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS, ++ MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS, + }; + + enum { +-- +2.39.5 + diff --git a/queue-5.10/net-mlx5-fix-rdma-tx-steering-prio.patch b/queue-5.10/net-mlx5-fix-rdma-tx-steering-prio.patch new file mode 100644 index 0000000000..015cb1cbc4 --- /dev/null +++ b/queue-5.10/net-mlx5-fix-rdma-tx-steering-prio.patch @@ -0,0 +1,39 @@ +From b3f82d3091cd05bb025838ac6659a7d8d73417ad Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 15 Jan 2025 13:39:04 +0200 +Subject: net/mlx5: Fix RDMA TX steering prio + +From: Patrisious Haddad + +[ Upstream commit c08d3e62b2e73e14da318a1d20b52d0486a28ee0 ] + +User added steering rules at RDMA_TX were being added to the first prio, +which is the counters prio. +Fix that so that they are correctly added to the BYPASS_PRIO instead. + +Fixes: 24670b1a3166 ("net/mlx5: Add support for RDMA TX steering") +Signed-off-by: Patrisious Haddad +Reviewed-by: Mark Bloch +Reviewed-by: Jacob Keller +Signed-off-by: Tariq Toukan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 3c5e9bf1cde33..c1a33f05702ec 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2358,6 +2358,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + break; + case MLX5_FLOW_NAMESPACE_RDMA_TX: + root_ns = steering->rdma_tx_root_ns; ++ prio = RDMA_TX_BYPASS_PRIO; + break; + case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: + root_ns = steering->rdma_rx_root_ns; +-- +2.39.5 + diff --git a/queue-5.10/net-mlx5-refactor-mlx5_get_flow_namespace.patch b/queue-5.10/net-mlx5-refactor-mlx5_get_flow_namespace.patch new file mode 100644 index 0000000000..cafdb28123 --- /dev/null +++ b/queue-5.10/net-mlx5-refactor-mlx5_get_flow_namespace.patch @@ -0,0 +1,100 @@ +From fa6378ea9e26d81b8ddf7085f3cc856f28657af3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 1 Dec 2021 11:36:19 -0800 +Subject: net/mlx5: Refactor mlx5_get_flow_namespace + +From: Maor Gottlieb + +[ Upstream commit 4588fed7beae6d54ef4c67c77fc39364f8fc42af ] + +Have all the namespace type check in the same switch case. + +Signed-off-by: Maor Gottlieb +Reviewed-by: Mark Bloch +Signed-off-by: Saeed Mahameed +Reviewed-by: Leon Romanovsky +Signed-off-by: Saeed Mahameed +Stable-dep-of: c08d3e62b2e7 ("net/mlx5: Fix RDMA TX steering prio") +Signed-off-by: Sasha Levin +--- + .../net/ethernet/mellanox/mlx5/core/fs_core.c | 44 ++++++++++++++----- + 1 file changed, 32 insertions(+), 12 deletions(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 3f49eff271cf2..3c5e9bf1cde33 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2302,6 +2302,22 @@ struct mlx5_flow_namespace *mlx5_get_fdb_sub_ns(struct mlx5_core_dev *dev, + } + EXPORT_SYMBOL(mlx5_get_fdb_sub_ns); + ++static bool is_nic_rx_ns(enum mlx5_flow_namespace_type type) ++{ ++ switch (type) { ++ case MLX5_FLOW_NAMESPACE_BYPASS: ++ case MLX5_FLOW_NAMESPACE_LAG: ++ case MLX5_FLOW_NAMESPACE_OFFLOADS: ++ case MLX5_FLOW_NAMESPACE_ETHTOOL: ++ case MLX5_FLOW_NAMESPACE_KERNEL: ++ case MLX5_FLOW_NAMESPACE_LEFTOVERS: ++ case MLX5_FLOW_NAMESPACE_ANCHOR: ++ return true; ++ default: ++ return false; ++ } ++} ++ + struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + enum mlx5_flow_namespace_type type) + { +@@ -2327,31 +2343,35 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + if (steering->sniffer_tx_root_ns) + return &steering->sniffer_tx_root_ns->ns; + return NULL; +- default: +- break; +- } +- +- if (type == MLX5_FLOW_NAMESPACE_EGRESS || +- type == MLX5_FLOW_NAMESPACE_EGRESS_KERNEL) { ++ case MLX5_FLOW_NAMESPACE_EGRESS: ++ case MLX5_FLOW_NAMESPACE_EGRESS_KERNEL: + root_ns = steering->egress_root_ns; + prio = type - MLX5_FLOW_NAMESPACE_EGRESS; +- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX) { ++ break; ++ case MLX5_FLOW_NAMESPACE_RDMA_RX: + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_BYPASS_PRIO; +- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL) { ++ break; ++ case MLX5_FLOW_NAMESPACE_RDMA_RX_KERNEL: + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_KERNEL_PRIO; +- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX) { ++ break; ++ case MLX5_FLOW_NAMESPACE_RDMA_TX: + root_ns = steering->rdma_tx_root_ns; +- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS) { ++ break; ++ case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: + root_ns = steering->rdma_rx_root_ns; + prio = RDMA_RX_COUNTERS_PRIO; +- } else if (type == MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS) { ++ break; ++ case MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS: + root_ns = steering->rdma_tx_root_ns; + prio = RDMA_TX_COUNTERS_PRIO; +- } else { /* Must be NIC RX */ ++ break; ++ default: /* Must be NIC RX */ ++ WARN_ON(!is_nic_rx_ns(type)); + root_ns = steering->root_ns; + prio = type; ++ break; + } + + if (!root_ns) +-- +2.39.5 + diff --git a/queue-5.10/net-net_namespace-optimize-the-code.patch b/queue-5.10/net-net_namespace-optimize-the-code.patch new file mode 100644 index 0000000000..c5e2843b6f --- /dev/null +++ b/queue-5.10/net-net_namespace-optimize-the-code.patch @@ -0,0 +1,160 @@ +From 8620ac544b69d7581bd6ce731caba551ae837c1d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 17 Aug 2021 23:23:00 +0800 +Subject: net: net_namespace: Optimize the code + +From: Yajun Deng + +[ Upstream commit 41467d2ff4dfe1837cbb0f45e2088e6e787580c6 ] + +There is only one caller for ops_free(), so inline it. +Separate net_drop_ns() and net_free(), so the net_free() +can be called directly. +Add free_exit_list() helper function for free net_exit_list. + +==================== +v2: + - v1 does not apply, rebase it. +==================== + +Signed-off-by: Yajun Deng +Signed-off-by: David S. Miller +Stable-dep-of: 46841c7053e6 ("gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().") +Signed-off-by: Sasha Levin +--- + net/core/net_namespace.c | 52 +++++++++++++++++++--------------------- + 1 file changed, 24 insertions(+), 28 deletions(-) + +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c +index 6192a05ebcce2..ef19a0eaa55aa 100644 +--- a/net/core/net_namespace.c ++++ b/net/core/net_namespace.c +@@ -113,7 +113,7 @@ static int net_assign_generic(struct net *net, unsigned int id, void *data) + } + + ng = net_alloc_generic(); +- if (ng == NULL) ++ if (!ng) + return -ENOMEM; + + /* +@@ -170,13 +170,6 @@ static int ops_init(const struct pernet_operations *ops, struct net *net) + return err; + } + +-static void ops_free(const struct pernet_operations *ops, struct net *net) +-{ +- if (ops->id && ops->size) { +- kfree(net_generic(net, *ops->id)); +- } +-} +- + static void ops_pre_exit_list(const struct pernet_operations *ops, + struct list_head *net_exit_list) + { +@@ -208,7 +201,7 @@ static void ops_free_list(const struct pernet_operations *ops, + struct net *net; + if (ops->size && ops->id) { + list_for_each_entry(net, net_exit_list, exit_list) +- ops_free(ops, net); ++ kfree(net_generic(net, *ops->id)); + } + } + +@@ -454,15 +447,18 @@ static struct net *net_alloc(void) + + static void net_free(struct net *net) + { +- kfree(rcu_access_pointer(net->gen)); +- kmem_cache_free(net_cachep, net); ++ if (refcount_dec_and_test(&net->passive)) { ++ kfree(rcu_access_pointer(net->gen)); ++ kmem_cache_free(net_cachep, net); ++ } + } + + void net_drop_ns(void *p) + { +- struct net *ns = p; +- if (ns && refcount_dec_and_test(&ns->passive)) +- net_free(ns); ++ struct net *net = (struct net *)p; ++ ++ if (net) ++ net_free(net); + } + + struct net *copy_net_ns(unsigned long flags, +@@ -502,7 +498,7 @@ struct net *copy_net_ns(unsigned long flags, + key_remove_domain(net->key_domain); + #endif + put_user_ns(user_ns); +- net_drop_ns(net); ++ net_free(net); + dec_ucounts: + dec_net_namespaces(ucounts); + return ERR_PTR(rv); +@@ -636,7 +632,7 @@ static void cleanup_net(struct work_struct *work) + key_remove_domain(net->key_domain); + #endif + put_user_ns(net->user_ns); +- net_drop_ns(net); ++ net_free(net); + } + } + +@@ -1160,6 +1156,14 @@ static int __init net_ns_init(void) + + pure_initcall(net_ns_init); + ++static void free_exit_list(struct pernet_operations *ops, struct list_head *net_exit_list) ++{ ++ ops_pre_exit_list(ops, net_exit_list); ++ synchronize_rcu(); ++ ops_exit_list(ops, net_exit_list); ++ ops_free_list(ops, net_exit_list); ++} ++ + #ifdef CONFIG_NET_NS + static int __register_pernet_operations(struct list_head *list, + struct pernet_operations *ops) +@@ -1185,10 +1189,7 @@ static int __register_pernet_operations(struct list_head *list, + out_undo: + /* If I have an error cleanup all namespaces I initialized */ + list_del(&ops->list); +- ops_pre_exit_list(ops, &net_exit_list); +- synchronize_rcu(); +- ops_exit_list(ops, &net_exit_list); +- ops_free_list(ops, &net_exit_list); ++ free_exit_list(ops, &net_exit_list); + return error; + } + +@@ -1201,10 +1202,8 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) + /* See comment in __register_pernet_operations() */ + for_each_net(net) + list_add_tail(&net->exit_list, &net_exit_list); +- ops_pre_exit_list(ops, &net_exit_list); +- synchronize_rcu(); +- ops_exit_list(ops, &net_exit_list); +- ops_free_list(ops, &net_exit_list); ++ ++ free_exit_list(ops, &net_exit_list); + } + + #else +@@ -1227,10 +1226,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops) + } else { + LIST_HEAD(net_exit_list); + list_add(&init_net.exit_list, &net_exit_list); +- ops_pre_exit_list(ops, &net_exit_list); +- synchronize_rcu(); +- ops_exit_list(ops, &net_exit_list); +- ops_free_list(ops, &net_exit_list); ++ free_exit_list(ops, &net_exit_list); + } + } + +-- +2.39.5 + diff --git a/queue-5.10/nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch b/queue-5.10/nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch new file mode 100644 index 0000000000..511eab580a --- /dev/null +++ b/queue-5.10/nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch @@ -0,0 +1,39 @@ +From 0f6f8bf52a674abb2ddaa417a75977ec3f15bab9 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 13 Jan 2025 09:18:39 +0300 +Subject: nfp: bpf: prevent integer overflow in nfp_bpf_event_output() + +From: Dan Carpenter + +[ Upstream commit 16ebb6f5b6295c9688749862a39a4889c56227f8 ] + +The "sizeof(struct cmsg_bpf_event) + pkt_size + data_size" math could +potentially have an integer wrapping bug on 32bit systems. Check for +this and return an error. + +Fixes: 9816dd35ecec ("nfp: bpf: perf event output helpers support") +Signed-off-by: Dan Carpenter +Link: https://patch.msgid.link/6074805b-e78d-4b8a-bf05-e929b5377c28@stanley.mountain +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/netronome/nfp/bpf/offload.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c +index 9d97cd281f18e..c03558adda91e 100644 +--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c ++++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c +@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, + map_id_full = be64_to_cpu(cbe->map_ptr); + map_id = map_id_full; + +- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) ++ if (size_add(pkt_size, data_size) > INT_MAX || ++ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + return -EINVAL; + if (cbe->hdr.ver != NFP_CCM_ABI_VERSION) + return -EINVAL; +-- +2.39.5 + diff --git a/queue-5.10/series b/queue-5.10/series index 81c0cc5913..ecd278c598 100644 --- a/queue-5.10/series +++ b/queue-5.10/series @@ -79,3 +79,15 @@ drm-adv7511-fix-use-after-free-in-adv7533_attach_dsi.patch sctp-sysctl-rto_min-max-avoid-using-current-nsproxy.patch phy-usb-use-slow-clock-for-wake-enabled-suspend.patch phy-usb-fix-clock-imbalance-for-suspend-resume.patch +net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch +bpf-fix-bpf_sk_select_reuseport-memory-leak.patch +net-net_namespace-optimize-the-code.patch +net-add-exit_batch_rtnl-method.patch +gtp-use-exit_batch_rtnl-method.patch +gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch +gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch +nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch +net-mlx5-add-priorities-for-counters-in-rdma-namespa.patch +net-mlx5-refactor-mlx5_get_flow_namespace.patch +net-mlx5-fix-rdma-tx-steering-prio.patch +drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch