From: Sasha Levin Date: Sat, 18 Jan 2025 17:23:27 +0000 (-0500) Subject: Fixes for 6.1 X-Git-Tag: v6.1.126~13 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=a5bf8a10390cd46f8a8463a0746d451333629b42;p=thirdparty%2Fkernel%2Fstable-queue.git Fixes for 6.1 Signed-off-by: Sasha Levin --- diff --git a/queue-6.1/bpf-fix-bpf_sk_select_reuseport-memory-leak.patch b/queue-6.1/bpf-fix-bpf_sk_select_reuseport-memory-leak.patch new file mode 100644 index 0000000000..7cd78ae28b --- /dev/null +++ b/queue-6.1/bpf-fix-bpf_sk_select_reuseport-memory-leak.patch @@ -0,0 +1,112 @@ +From f7d07fe9b3a1d8059170b733e42a8d3701dc663d Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 10 Jan 2025 14:21:55 +0100 +Subject: bpf: Fix bpf_sk_select_reuseport() memory leak + +From: Michal Luczaj + +[ Upstream commit b3af60928ab9129befa65e6df0310d27300942bf ] + +As pointed out in the original comment, lookup in sockmap can return a TCP +ESTABLISHED socket. Such TCP socket may have had SO_ATTACH_REUSEPORT_EBPF +set before it was ESTABLISHED. In other words, a non-NULL sk_reuseport_cb +does not imply a non-refcounted socket. + +Drop sk's reference in both error paths. + +unreferenced object 0xffff888101911800 (size 2048): + comm "test_progs", pid 44109, jiffies 4297131437 + hex dump (first 32 bytes): + 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + 80 00 01 00 00 00 00 00 00 00 00 00 00 00 00 00 ................ + backtrace (crc 9336483b): + __kmalloc_noprof+0x3bf/0x560 + __reuseport_alloc+0x1d/0x40 + reuseport_alloc+0xca/0x150 + reuseport_attach_prog+0x87/0x140 + sk_reuseport_attach_bpf+0xc8/0x100 + sk_setsockopt+0x1181/0x1990 + do_sock_setsockopt+0x12b/0x160 + __sys_setsockopt+0x7b/0xc0 + __x64_sys_setsockopt+0x1b/0x30 + do_syscall_64+0x93/0x180 + entry_SYSCALL_64_after_hwframe+0x76/0x7e + +Fixes: 64d85290d79c ("bpf: Allow bpf_map_lookup_elem for SOCKMAP and SOCKHASH") +Signed-off-by: Michal Luczaj +Reviewed-by: Martin KaFai Lau +Link: https://patch.msgid.link/20250110-reuseport-memleak-v1-1-fa1ddab0adfe@rbox.co +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/core/filter.c | 30 ++++++++++++++++++------------ + 1 file changed, 18 insertions(+), 12 deletions(-) + +diff --git a/net/core/filter.c b/net/core/filter.c +index 7f9d703b00e7c..b35615c469e27 100644 +--- a/net/core/filter.c ++++ b/net/core/filter.c +@@ -11109,6 +11109,7 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + bool is_sockarray = map->map_type == BPF_MAP_TYPE_REUSEPORT_SOCKARRAY; + struct sock_reuseport *reuse; + struct sock *selected_sk; ++ int err; + + selected_sk = map->ops->map_lookup_elem(map, key); + if (!selected_sk) +@@ -11116,10 +11117,6 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + + reuse = rcu_dereference(selected_sk->sk_reuseport_cb); + if (!reuse) { +- /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ +- if (sk_is_refcounted(selected_sk)) +- sock_put(selected_sk); +- + /* reuseport_array has only sk with non NULL sk_reuseport_cb. + * The only (!reuse) case here is - the sk has already been + * unhashed (e.g. by close()), so treat it as -ENOENT. +@@ -11127,24 +11124,33 @@ BPF_CALL_4(sk_select_reuseport, struct sk_reuseport_kern *, reuse_kern, + * Other maps (e.g. sock_map) do not provide this guarantee and + * the sk may never be in the reuseport group to begin with. + */ +- return is_sockarray ? -ENOENT : -EINVAL; ++ err = is_sockarray ? -ENOENT : -EINVAL; ++ goto error; + } + + if (unlikely(reuse->reuseport_id != reuse_kern->reuseport_id)) { + struct sock *sk = reuse_kern->sk; + +- if (sk->sk_protocol != selected_sk->sk_protocol) +- return -EPROTOTYPE; +- else if (sk->sk_family != selected_sk->sk_family) +- return -EAFNOSUPPORT; +- +- /* Catch all. Likely bound to a different sockaddr. */ +- return -EBADFD; ++ if (sk->sk_protocol != selected_sk->sk_protocol) { ++ err = -EPROTOTYPE; ++ } else if (sk->sk_family != selected_sk->sk_family) { ++ err = -EAFNOSUPPORT; ++ } else { ++ /* Catch all. Likely bound to a different sockaddr. */ ++ err = -EBADFD; ++ } ++ goto error; + } + + reuse_kern->selected_sk = selected_sk; + + return 0; ++error: ++ /* Lookup in sock_map can return TCP ESTABLISHED sockets. */ ++ if (sk_is_refcounted(selected_sk)) ++ sock_put(selected_sk); ++ ++ return err; + } + + static const struct bpf_func_proto sk_select_reuseport_proto = { +-- +2.39.5 + diff --git a/queue-6.1/drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch b/queue-6.1/drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch new file mode 100644 index 0000000000..59c8c58e15 --- /dev/null +++ b/queue-6.1/drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch @@ -0,0 +1,66 @@ +From 8f85a9d6bc8d9295af65645c7d337342d5acc8fd Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 13 Jan 2025 12:47:40 -0300 +Subject: drm/v3d: Ensure job pointer is set to NULL after job completion +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +From: Maíra Canal + +[ Upstream commit e4b5ccd392b92300a2b341705cc4805681094e49 ] + +After a job completes, the corresponding pointer in the device must +be set to NULL. Failing to do so triggers a warning when unloading +the driver, as it appears the job is still active. To prevent this, +assign the job pointer to NULL after completing the job, indicating +the job has finished. + +Fixes: 14d1d1908696 ("drm/v3d: Remove the bad signaled() implementation.") +Signed-off-by: Maíra Canal +Reviewed-by: Jose Maria Casanova Crespo +Link: https://patchwork.freedesktop.org/patch/msgid/20250113154741.67520-1-mcanal@igalia.com +Signed-off-by: Sasha Levin +--- + drivers/gpu/drm/v3d/v3d_irq.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c +index e714d5318f309..76806039691a2 100644 +--- a/drivers/gpu/drm/v3d/v3d_irq.c ++++ b/drivers/gpu/drm/v3d/v3d_irq.c +@@ -103,6 +103,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_bcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->bin_job = NULL; + status = IRQ_HANDLED; + } + +@@ -112,6 +113,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_rcl_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->render_job = NULL; + status = IRQ_HANDLED; + } + +@@ -121,6 +123,7 @@ v3d_irq(int irq, void *arg) + + trace_v3d_csd_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->csd_job = NULL; + status = IRQ_HANDLED; + } + +@@ -157,6 +160,7 @@ v3d_hub_irq(int irq, void *arg) + + trace_v3d_tfu_irq(&v3d->drm, fence->seqno); + dma_fence_signal(&fence->base); ++ v3d->tfu_job = NULL; + status = IRQ_HANDLED; + } + +-- +2.39.5 + diff --git a/queue-6.1/gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch b/queue-6.1/gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch new file mode 100644 index 0000000000..1522483575 --- /dev/null +++ b/queue-6.1/gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch @@ -0,0 +1,125 @@ +From f658f2a2bc67818b0e0c1935e04419c17ce508e3 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 10 Jan 2025 10:47:53 +0900 +Subject: gtp: Destroy device along with udp socket's netns dismantle. + +From: Kuniyuki Iwashima + +[ Upstream commit eb28fd76c0a08a47b470677c6cef9dd1c60e92d1 ] + +gtp_newlink() links the device to a list in dev_net(dev) instead of +src_net, where a udp tunnel socket is created. + +Even when src_net is removed, the device stays alive on dev_net(dev). +Then, removing src_net triggers the splat below. [0] + +In this example, gtp0 is created in ns2, and the udp socket is created +in ns1. + + ip netns add ns1 + ip netns add ns2 + ip -n ns1 link add netns ns2 name gtp0 type gtp role sgsn + ip netns del ns1 + +Let's link the device to the socket's netns instead. + +Now, gtp_net_exit_batch_rtnl() needs another netdev iteration to remove +all gtp devices in the netns. + +[0]: +ref_tracker: net notrefcnt@000000003d6e7d05 has 1/2 users at + sk_alloc (./include/net/net_namespace.h:345 net/core/sock.c:2236) + inet_create (net/ipv4/af_inet.c:326 net/ipv4/af_inet.c:252) + __sock_create (net/socket.c:1558) + udp_sock_create4 (net/ipv4/udp_tunnel_core.c:18) + gtp_create_sock (./include/net/udp_tunnel.h:59 drivers/net/gtp.c:1423) + gtp_create_sockets (drivers/net/gtp.c:1447) + gtp_newlink (drivers/net/gtp.c:1507) + rtnl_newlink (net/core/rtnetlink.c:3786 net/core/rtnetlink.c:3897 net/core/rtnetlink.c:4012) + rtnetlink_rcv_msg (net/core/rtnetlink.c:6922) + netlink_rcv_skb (net/netlink/af_netlink.c:2542) + netlink_unicast (net/netlink/af_netlink.c:1321 net/netlink/af_netlink.c:1347) + netlink_sendmsg (net/netlink/af_netlink.c:1891) + ____sys_sendmsg (net/socket.c:711 net/socket.c:726 net/socket.c:2583) + ___sys_sendmsg (net/socket.c:2639) + __sys_sendmsg (net/socket.c:2669) + do_syscall_64 (arch/x86/entry/common.c:52 arch/x86/entry/common.c:83) + +WARNING: CPU: 1 PID: 60 at lib/ref_tracker.c:179 ref_tracker_dir_exit (lib/ref_tracker.c:179) +Modules linked in: +CPU: 1 UID: 0 PID: 60 Comm: kworker/u16:2 Not tainted 6.13.0-rc5-00147-g4c1224501e9d #5 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 +Workqueue: netns cleanup_net +RIP: 0010:ref_tracker_dir_exit (lib/ref_tracker.c:179) +Code: 00 00 00 fc ff df 4d 8b 26 49 bd 00 01 00 00 00 00 ad de 4c 39 f5 0f 85 df 00 00 00 48 8b 74 24 08 48 89 df e8 a5 cc 12 02 90 <0f> 0b 90 48 8d 6b 44 be 04 00 00 00 48 89 ef e8 80 de 67 ff 48 89 +RSP: 0018:ff11000009a07b60 EFLAGS: 00010286 +RAX: 0000000000002bd3 RBX: ff1100000f4e1aa0 RCX: 1ffffffff0e40ac6 +RDX: 0000000000000000 RSI: 0000000000000000 RDI: ffffffff8423ee3c +RBP: ff1100000f4e1af0 R08: 0000000000000001 R09: fffffbfff0e395ae +R10: 0000000000000001 R11: 0000000000036001 R12: ff1100000f4e1af0 +R13: dead000000000100 R14: ff1100000f4e1af0 R15: dffffc0000000000 +FS: 0000000000000000(0000) GS:ff1100006ce80000(0000) knlGS:0000000000000000 +CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +CR2: 00007f9b2464bd98 CR3: 0000000005286005 CR4: 0000000000771ef0 +DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 +DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 +PKRU: 55555554 +Call Trace: + + ? __warn (kernel/panic.c:748) + ? ref_tracker_dir_exit (lib/ref_tracker.c:179) + ? report_bug (lib/bug.c:201 lib/bug.c:219) + ? handle_bug (arch/x86/kernel/traps.c:285) + ? exc_invalid_op (arch/x86/kernel/traps.c:309 (discriminator 1)) + ? asm_exc_invalid_op (./arch/x86/include/asm/idtentry.h:621) + ? _raw_spin_unlock_irqrestore (./arch/x86/include/asm/irqflags.h:42 ./arch/x86/include/asm/irqflags.h:97 ./arch/x86/include/asm/irqflags.h:155 ./include/linux/spinlock_api_smp.h:151 kernel/locking/spinlock.c:194) + ? ref_tracker_dir_exit (lib/ref_tracker.c:179) + ? __pfx_ref_tracker_dir_exit (lib/ref_tracker.c:158) + ? kfree (mm/slub.c:4613 mm/slub.c:4761) + net_free (net/core/net_namespace.c:476 net/core/net_namespace.c:467) + cleanup_net (net/core/net_namespace.c:664 (discriminator 3)) + process_one_work (kernel/workqueue.c:3229) + worker_thread (kernel/workqueue.c:3304 kernel/workqueue.c:3391) + kthread (kernel/kthread.c:389) + ret_from_fork (arch/x86/kernel/process.c:147) + ret_from_fork_asm (arch/x86/entry/entry_64.S:257) + + +Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") +Reported-by: Xiao Liang +Closes: https://lore.kernel.org/netdev/20250104125732.17335-1-shaw.leon@gmail.com/ +Signed-off-by: Kuniyuki Iwashima +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/gtp.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index f360b9a51b645..0de3dcd07cb7e 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1094,7 +1094,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, + goto out_encap; + } + +- gn = net_generic(dev_net(dev), gtp_net_id); ++ gn = net_generic(src_net, gtp_net_id); + list_add(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; + +@@ -1894,6 +1894,11 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, + list_for_each_entry(net, net_list, exit_list) { + struct gtp_net *gn = net_generic(net, gtp_net_id); + struct gtp_dev *gtp, *gtp_next; ++ struct net_device *dev; ++ ++ for_each_netdev(net, dev) ++ if (dev->rtnl_link_ops == >p_link_ops) ++ gtp_dellink(dev, dev_to_kill); + + list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) + gtp_dellink(gtp->dev, dev_to_kill); +-- +2.39.5 + diff --git a/queue-6.1/gtp-use-exit_batch_rtnl-method.patch b/queue-6.1/gtp-use-exit_batch_rtnl-method.patch new file mode 100644 index 0000000000..87e000d43a --- /dev/null +++ b/queue-6.1/gtp-use-exit_batch_rtnl-method.patch @@ -0,0 +1,66 @@ +From 59d6f622393421de3a34b8827c8c6083b49dad9e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 14:43:03 +0000 +Subject: gtp: use exit_batch_rtnl() method + +From: Eric Dumazet + +[ Upstream commit 6eedda01b2bfdcf427b37759e053dc27232f3af1 ] + +exit_batch_rtnl() is called while RTNL is held, +and devices to be unregistered can be queued in the dev_kill_list. + +This saves one rtnl_lock()/rtnl_unlock() pair per netns +and one unregister_netdevice_many() call per netns. + +Signed-off-by: Eric Dumazet +Reviewed-by: Antoine Tenart +Link: https://lore.kernel.org/r/20240206144313.2050392-8-edumazet@google.com +Signed-off-by: Jakub Kicinski +Stable-dep-of: 46841c7053e6 ("gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().") +Signed-off-by: Sasha Levin +--- + drivers/net/gtp.c | 20 ++++++++++---------- + 1 file changed, 10 insertions(+), 10 deletions(-) + +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 5e0332c9d0d73..0e1dfc6157224 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1883,23 +1883,23 @@ static int __net_init gtp_net_init(struct net *net) + return 0; + } + +-static void __net_exit gtp_net_exit(struct net *net) ++static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, ++ struct list_head *dev_to_kill) + { +- struct gtp_net *gn = net_generic(net, gtp_net_id); +- struct gtp_dev *gtp; +- LIST_HEAD(list); ++ struct net *net; + +- rtnl_lock(); +- list_for_each_entry(gtp, &gn->gtp_dev_list, list) +- gtp_dellink(gtp->dev, &list); ++ list_for_each_entry(net, net_list, exit_list) { ++ struct gtp_net *gn = net_generic(net, gtp_net_id); ++ struct gtp_dev *gtp; + +- unregister_netdevice_many(&list); +- rtnl_unlock(); ++ list_for_each_entry(gtp, &gn->gtp_dev_list, list) ++ gtp_dellink(gtp->dev, dev_to_kill); ++ } + } + + static struct pernet_operations gtp_net_ops = { + .init = gtp_net_init, +- .exit = gtp_net_exit, ++ .exit_batch_rtnl = gtp_net_exit_batch_rtnl, + .id = >p_net_id, + .size = sizeof(struct gtp_net), + }; +-- +2.39.5 + diff --git a/queue-6.1/gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch b/queue-6.1/gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch new file mode 100644 index 0000000000..48a02ea457 --- /dev/null +++ b/queue-6.1/gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch @@ -0,0 +1,91 @@ +From 879ced1f446466d78514afb74a35cbc135251f2e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Fri, 10 Jan 2025 10:47:52 +0900 +Subject: gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp(). + +From: Kuniyuki Iwashima + +[ Upstream commit 46841c7053e6d25fb33e0534ef023833bf03e382 ] + +gtp_newlink() links the gtp device to a list in dev_net(dev). + +However, even after the gtp device is moved to another netns, +it stays on the list but should be invisible. + +Let's use for_each_netdev_rcu() for netdev traversal in +gtp_genl_dump_pdp(). + +Note that gtp_dev_list is no longer used under RCU, so list +helpers are converted to the non-RCU variant. + +Fixes: 459aa660eb1d ("gtp: add initial driver for datapath of GPRS Tunneling Protocol (GTP-U)") +Reported-by: Xiao Liang +Closes: https://lore.kernel.org/netdev/CABAhCOQdBL6h9M2C+kd+bGivRJ9Q72JUxW+-gur0nub_=PmFPA@mail.gmail.com/ +Signed-off-by: Kuniyuki Iwashima +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/gtp.c | 19 +++++++++++-------- + 1 file changed, 11 insertions(+), 8 deletions(-) + +diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c +index 0e1dfc6157224..f360b9a51b645 100644 +--- a/drivers/net/gtp.c ++++ b/drivers/net/gtp.c +@@ -1095,7 +1095,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev, + } + + gn = net_generic(dev_net(dev), gtp_net_id); +- list_add_rcu(>p->list, &gn->gtp_dev_list); ++ list_add(>p->list, &gn->gtp_dev_list); + dev->priv_destructor = gtp_destructor; + + netdev_dbg(dev, "registered new GTP interface\n"); +@@ -1121,7 +1121,7 @@ static void gtp_dellink(struct net_device *dev, struct list_head *head) + hlist_for_each_entry_safe(pctx, next, >p->tid_hash[i], hlist_tid) + pdp_context_delete(pctx); + +- list_del_rcu(>p->list); ++ list_del(>p->list); + unregister_netdevice_queue(dev, head); + } + +@@ -1689,16 +1689,19 @@ static int gtp_genl_dump_pdp(struct sk_buff *skb, + struct gtp_dev *last_gtp = (struct gtp_dev *)cb->args[2], *gtp; + int i, j, bucket = cb->args[0], skip = cb->args[1]; + struct net *net = sock_net(skb->sk); ++ struct net_device *dev; + struct pdp_ctx *pctx; +- struct gtp_net *gn; +- +- gn = net_generic(net, gtp_net_id); + + if (cb->args[4]) + return 0; + + rcu_read_lock(); +- list_for_each_entry_rcu(gtp, &gn->gtp_dev_list, list) { ++ for_each_netdev_rcu(net, dev) { ++ if (dev->rtnl_link_ops != >p_link_ops) ++ continue; ++ ++ gtp = netdev_priv(dev); ++ + if (last_gtp && last_gtp != gtp) + continue; + else +@@ -1890,9 +1893,9 @@ static void __net_exit gtp_net_exit_batch_rtnl(struct list_head *net_list, + + list_for_each_entry(net, net_list, exit_list) { + struct gtp_net *gn = net_generic(net, gtp_net_id); +- struct gtp_dev *gtp; ++ struct gtp_dev *gtp, *gtp_next; + +- list_for_each_entry(gtp, &gn->gtp_dev_list, list) ++ list_for_each_entry_safe(gtp, gtp_next, &gn->gtp_dev_list, list) + gtp_dellink(gtp->dev, dev_to_kill); + } + } +-- +2.39.5 + diff --git a/queue-6.1/net-add-exit_batch_rtnl-method.patch b/queue-6.1/net-add-exit_batch_rtnl-method.patch new file mode 100644 index 0000000000..c3fd463b1b --- /dev/null +++ b/queue-6.1/net-add-exit_batch_rtnl-method.patch @@ -0,0 +1,128 @@ +From 00ff9d4a761144095014f0356cf63f87aa56e625 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Tue, 6 Feb 2024 14:42:57 +0000 +Subject: net: add exit_batch_rtnl() method + +From: Eric Dumazet + +[ Upstream commit fd4f101edbd9f99567ab2adb1f2169579ede7c13 ] + +Many (struct pernet_operations)->exit_batch() methods have +to acquire rtnl. + +In presence of rtnl mutex pressure, this makes cleanup_net() +very slow. + +This patch adds a new exit_batch_rtnl() method to reduce +number of rtnl acquisitions from cleanup_net(). + +exit_batch_rtnl() handlers are called while rtnl is locked, +and devices to be killed can be queued in a list provided +as their second argument. + +A single unregister_netdevice_many() is called right +before rtnl is released. + +exit_batch_rtnl() handlers are called before ->exit() and +->exit_batch() handlers. + +Signed-off-by: Eric Dumazet +Reviewed-by: Antoine Tenart +Link: https://lore.kernel.org/r/20240206144313.2050392-2-edumazet@google.com +Signed-off-by: Jakub Kicinski +Stable-dep-of: 46841c7053e6 ("gtp: Use for_each_netdev_rcu() in gtp_genl_dump_pdp().") +Signed-off-by: Sasha Levin +--- + include/net/net_namespace.h | 3 +++ + net/core/net_namespace.c | 31 ++++++++++++++++++++++++++++++- + 2 files changed, 33 insertions(+), 1 deletion(-) + +diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h +index 7ca4b7af57ca6..17c7a88418345 100644 +--- a/include/net/net_namespace.h ++++ b/include/net/net_namespace.h +@@ -426,6 +426,9 @@ struct pernet_operations { + void (*pre_exit)(struct net *net); + void (*exit)(struct net *net); + void (*exit_batch)(struct list_head *net_exit_list); ++ /* Following method is called with RTNL held. */ ++ void (*exit_batch_rtnl)(struct list_head *net_exit_list, ++ struct list_head *dev_kill_list); + unsigned int *id; + size_t size; + }; +diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c +index 6c6d2a785c004..abf1e1751d6c8 100644 +--- a/net/core/net_namespace.c ++++ b/net/core/net_namespace.c +@@ -314,8 +314,9 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) + { + /* Must be called with pernet_ops_rwsem held */ + const struct pernet_operations *ops, *saved_ops; +- int error = 0; + LIST_HEAD(net_exit_list); ++ LIST_HEAD(dev_kill_list); ++ int error = 0; + + refcount_set(&net->ns.count, 1); + ref_tracker_dir_init(&net->refcnt_tracker, 128); +@@ -353,6 +354,15 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns) + + synchronize_rcu(); + ++ ops = saved_ops; ++ rtnl_lock(); ++ list_for_each_entry_continue_reverse(ops, &pernet_list, list) { ++ if (ops->exit_batch_rtnl) ++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); ++ } ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ + ops = saved_ops; + list_for_each_entry_continue_reverse(ops, &pernet_list, list) + ops_exit_list(ops, &net_exit_list); +@@ -576,6 +586,7 @@ static void cleanup_net(struct work_struct *work) + struct net *net, *tmp, *last; + struct llist_node *net_kill_list; + LIST_HEAD(net_exit_list); ++ LIST_HEAD(dev_kill_list); + + /* Atomically snapshot the list of namespaces to cleanup */ + net_kill_list = llist_del_all(&cleanup_list); +@@ -616,6 +627,14 @@ static void cleanup_net(struct work_struct *work) + */ + synchronize_rcu(); + ++ rtnl_lock(); ++ list_for_each_entry_reverse(ops, &pernet_list, list) { ++ if (ops->exit_batch_rtnl) ++ ops->exit_batch_rtnl(&net_exit_list, &dev_kill_list); ++ } ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ + /* Run all of the network namespace exit methods */ + list_for_each_entry_reverse(ops, &pernet_list, list) + ops_exit_list(ops, &net_exit_list); +@@ -1159,7 +1178,17 @@ static void free_exit_list(struct pernet_operations *ops, struct list_head *net_ + { + ops_pre_exit_list(ops, net_exit_list); + synchronize_rcu(); ++ ++ if (ops->exit_batch_rtnl) { ++ LIST_HEAD(dev_kill_list); ++ ++ rtnl_lock(); ++ ops->exit_batch_rtnl(net_exit_list, &dev_kill_list); ++ unregister_netdevice_many(&dev_kill_list); ++ rtnl_unlock(); ++ } + ops_exit_list(ops, net_exit_list); ++ + ops_free_list(ops, net_exit_list); + } + +-- +2.39.5 + diff --git a/queue-6.1/net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch b/queue-6.1/net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch new file mode 100644 index 0000000000..473a579afe --- /dev/null +++ b/queue-6.1/net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch @@ -0,0 +1,86 @@ +From c5a7a66d7fdd18778bf4bc3a838c268f6e77f8c4 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 8 Jan 2025 22:54:33 +0530 +Subject: net: ethernet: ti: cpsw_ale: Fix cpsw_ale_get_field() + +From: Sudheer Kumar Doredla + +[ Upstream commit 03d120f27d050336f7e7d21879891542c4741f81 ] + +CPSW ALE has 75-bit ALE entries stored across three 32-bit words. +The cpsw_ale_get_field() and cpsw_ale_set_field() functions support +ALE field entries spanning up to two words at the most. + +The cpsw_ale_get_field() and cpsw_ale_set_field() functions work as +expected when ALE field spanned across word1 and word2, but fails when +ALE field spanned across word2 and word3. + +For example, while reading the ALE field spanned across word2 and word3 +(i.e. bits 62 to 64), the word3 data shifted to an incorrect position +due to the index becoming zero while flipping. +The same issue occurred when setting an ALE entry. + +This issue has not been seen in practice but will be an issue in the future +if the driver supports accessing ALE fields spanning word2 and word3 + +Fix the methods to handle getting/setting fields spanning up to two words. + +Fixes: b685f1a58956 ("net: ethernet: ti: cpsw_ale: Fix cpsw_ale_get_field()/cpsw_ale_set_field()") +Signed-off-by: Sudheer Kumar Doredla +Reviewed-by: Simon Horman +Reviewed-by: Roger Quadros +Reviewed-by: Siddharth Vadapalli +Link: https://patch.msgid.link/20250108172433.311694-1-s-doredla@ti.com +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/ti/cpsw_ale.c | 14 +++++++------- + 1 file changed, 7 insertions(+), 7 deletions(-) + +diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c +index 2647c18d40d95..3d42ca15e8779 100644 +--- a/drivers/net/ethernet/ti/cpsw_ale.c ++++ b/drivers/net/ethernet/ti/cpsw_ale.c +@@ -106,15 +106,15 @@ struct cpsw_ale_dev_id { + + static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) + { +- int idx, idx2; ++ int idx, idx2, index; + u32 hi_val = 0; + + idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be fetched exceed a word */ + if (idx != idx2) { +- idx2 = 2 - idx2; /* flip */ +- hi_val = ale_entry[idx2] << ((idx2 * 32) - start); ++ index = 2 - idx2; /* flip */ ++ hi_val = ale_entry[index] << ((idx2 * 32) - start); + } + start -= idx * 32; + idx = 2 - idx; /* flip */ +@@ -124,16 +124,16 @@ static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits) + static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits, + u32 value) + { +- int idx, idx2; ++ int idx, idx2, index; + + value &= BITMASK(bits); + idx = start / 32; + idx2 = (start + bits - 1) / 32; + /* Check if bits to be set exceed a word */ + if (idx != idx2) { +- idx2 = 2 - idx2; /* flip */ +- ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32))); +- ale_entry[idx2] |= (value >> ((idx2 * 32) - start)); ++ index = 2 - idx2; /* flip */ ++ ale_entry[index] &= ~(BITMASK(bits + start - (idx2 * 32))); ++ ale_entry[index] |= (value >> ((idx2 * 32) - start)); + } + start -= idx * 32; + idx = 2 - idx; /* flip */ +-- +2.39.5 + diff --git a/queue-6.1/net-mlx5-clear-port-select-structure-when-fail-to-cr.patch b/queue-6.1/net-mlx5-clear-port-select-structure-when-fail-to-cr.patch new file mode 100644 index 0000000000..ca6cc01d6e --- /dev/null +++ b/queue-6.1/net-mlx5-clear-port-select-structure-when-fail-to-cr.patch @@ -0,0 +1,107 @@ +From 95d3f5861d3afe03c083cdf9c53921c983d69b0e Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 15 Jan 2025 13:39:07 +0200 +Subject: net/mlx5: Clear port select structure when fail to create + +From: Mark Zhang + +[ Upstream commit 5641e82cb55b4ecbc6366a499300917d2f3e6790 ] + +Clear the port select structure on error so no stale values left after +definers are destroyed. That's because the mlx5_lag_destroy_definers() +always try to destroy all lag definers in the tt_map, so in the flow +below lag definers get double-destroyed and cause kernel crash: + + mlx5_lag_port_sel_create() + mlx5_lag_create_definers() + mlx5_lag_create_definer() <- Failed on tt 1 + mlx5_lag_destroy_definers() <- definers[tt=0] gets destroyed + mlx5_lag_port_sel_create() + mlx5_lag_create_definers() + mlx5_lag_create_definer() <- Failed on tt 0 + mlx5_lag_destroy_definers() <- definers[tt=0] gets double-destroyed + + Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008 + Mem abort info: + ESR = 0x0000000096000005 + EC = 0x25: DABT (current EL), IL = 32 bits + SET = 0, FnV = 0 + EA = 0, S1PTW = 0 + FSC = 0x05: level 1 translation fault + Data abort info: + ISV = 0, ISS = 0x00000005, ISS2 = 0x00000000 + CM = 0, WnR = 0, TnD = 0, TagAccess = 0 + GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 + user pgtable: 64k pages, 48-bit VAs, pgdp=0000000112ce2e00 + [0000000000000008] pgd=0000000000000000, p4d=0000000000000000, pud=0000000000000000 + Internal error: Oops: 0000000096000005 [#1] PREEMPT SMP + Modules linked in: iptable_raw bonding ip_gre ip6_gre gre ip6_tunnel tunnel6 geneve ip6_udp_tunnel udp_tunnel ipip tunnel4 ip_tunnel rdma_ucm(OE) rdma_cm(OE) iw_cm(OE) ib_ipoib(OE) ib_cm(OE) ib_umad(OE) mlx5_ib(OE) ib_uverbs(OE) mlx5_fwctl(OE) fwctl(OE) mlx5_core(OE) mlxdevm(OE) ib_core(OE) mlxfw(OE) memtrack(OE) mlx_compat(OE) openvswitch nsh nf_conncount psample xt_conntrack xt_MASQUERADE nf_conntrack_netlink nfnetlink xfrm_user xfrm_algo xt_addrtype iptable_filter iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 br_netfilter bridge stp llc netconsole overlay efi_pstore sch_fq_codel zram ip_tables crct10dif_ce qemu_fw_cfg fuse ipv6 crc_ccitt [last unloaded: mlx_compat(OE)] + CPU: 3 UID: 0 PID: 217 Comm: kworker/u53:2 Tainted: G OE 6.11.0+ #2 + Tainted: [O]=OOT_MODULE, [E]=UNSIGNED_MODULE + Hardware name: QEMU KVM Virtual Machine, BIOS 0.0.0 02/06/2015 + Workqueue: mlx5_lag mlx5_do_bond_work [mlx5_core] + pstate: 60400005 (nZCv daif +PAN -UAO -TCO -DIT -SSBS BTYPE=--) + pc : mlx5_del_flow_rules+0x24/0x2c0 [mlx5_core] + lr : mlx5_lag_destroy_definer+0x54/0x100 [mlx5_core] + sp : ffff800085fafb00 + x29: ffff800085fafb00 x28: ffff0000da0c8000 x27: 0000000000000000 + x26: ffff0000da0c8000 x25: ffff0000da0c8000 x24: ffff0000da0c8000 + x23: ffff0000c31f81a0 x22: 0400000000000000 x21: ffff0000da0c8000 + x20: 0000000000000000 x19: 0000000000000001 x18: 0000000000000000 + x17: 0000000000000000 x16: 0000000000000000 x15: 0000ffff8b0c9350 + x14: 0000000000000000 x13: ffff800081390d18 x12: ffff800081dc3cc0 + x11: 0000000000000001 x10: 0000000000000b10 x9 : ffff80007ab7304c + x8 : ffff0000d00711f0 x7 : 0000000000000004 x6 : 0000000000000190 + x5 : ffff00027edb3010 x4 : 0000000000000000 x3 : 0000000000000000 + x2 : ffff0000d39b8000 x1 : ffff0000d39b8000 x0 : 0400000000000000 + Call trace: + mlx5_del_flow_rules+0x24/0x2c0 [mlx5_core] + mlx5_lag_destroy_definer+0x54/0x100 [mlx5_core] + mlx5_lag_destroy_definers+0xa0/0x108 [mlx5_core] + mlx5_lag_port_sel_create+0x2d4/0x6f8 [mlx5_core] + mlx5_activate_lag+0x60c/0x6f8 [mlx5_core] + mlx5_do_bond_work+0x284/0x5c8 [mlx5_core] + process_one_work+0x170/0x3e0 + worker_thread+0x2d8/0x3e0 + kthread+0x11c/0x128 + ret_from_fork+0x10/0x20 + Code: a9025bf5 aa0003f6 a90363f7 f90023f9 (f9400400) + ---[ end trace 0000000000000000 ]--- + +Fixes: dc48516ec7d3 ("net/mlx5: Lag, add support to create definers for LAG") +Signed-off-by: Mark Zhang +Reviewed-by: Leon Romanovsky +Reviewed-by: Mark Bloch +Reviewed-by: Jacob Keller +Signed-off-by: Tariq Toukan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +index 005661248c7e9..9faa9ef863a1b 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/port_sel.c +@@ -540,7 +540,7 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, + set_tt_map(port_sel, hash_type); + err = mlx5_lag_create_definers(ldev, hash_type, ports); + if (err) +- return err; ++ goto clear_port_sel; + + if (port_sel->tunnel) { + err = mlx5_lag_create_inner_ttc_table(ldev); +@@ -559,6 +559,8 @@ int mlx5_lag_port_sel_create(struct mlx5_lag *ldev, + mlx5_destroy_ttc_table(port_sel->inner.ttc); + destroy_definers: + mlx5_lag_destroy_definers(ldev); ++clear_port_sel: ++ memset(port_sel, 0, sizeof(*port_sel)); + return err; + } + +-- +2.39.5 + diff --git a/queue-6.1/net-mlx5-fix-rdma-tx-steering-prio.patch b/queue-6.1/net-mlx5-fix-rdma-tx-steering-prio.patch new file mode 100644 index 0000000000..981e403fed --- /dev/null +++ b/queue-6.1/net-mlx5-fix-rdma-tx-steering-prio.patch @@ -0,0 +1,39 @@ +From 64083ffebe523416904a7c387b2247dfa832e650 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Wed, 15 Jan 2025 13:39:04 +0200 +Subject: net/mlx5: Fix RDMA TX steering prio + +From: Patrisious Haddad + +[ Upstream commit c08d3e62b2e73e14da318a1d20b52d0486a28ee0 ] + +User added steering rules at RDMA_TX were being added to the first prio, +which is the counters prio. +Fix that so that they are correctly added to the BYPASS_PRIO instead. + +Fixes: 24670b1a3166 ("net/mlx5: Add support for RDMA TX steering") +Signed-off-by: Patrisious Haddad +Reviewed-by: Mark Bloch +Reviewed-by: Jacob Keller +Signed-off-by: Tariq Toukan +Signed-off-by: Paolo Abeni +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +index 50fdc3cbb778e..2717450e96661 100644 +--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c ++++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +@@ -2421,6 +2421,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev, + break; + case MLX5_FLOW_NAMESPACE_RDMA_TX: + root_ns = steering->rdma_tx_root_ns; ++ prio = RDMA_TX_BYPASS_PRIO; + break; + case MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS: + root_ns = steering->rdma_rx_root_ns; +-- +2.39.5 + diff --git a/queue-6.1/net-xilinx-axienet-fix-irq-coalescing-packet-count-o.patch b/queue-6.1/net-xilinx-axienet-fix-irq-coalescing-packet-count-o.patch new file mode 100644 index 0000000000..a7a6680584 --- /dev/null +++ b/queue-6.1/net-xilinx-axienet-fix-irq-coalescing-packet-count-o.patch @@ -0,0 +1,48 @@ +From c8e768517f2424a8d64edae351203dac972d6752 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 13 Jan 2025 11:30:00 -0500 +Subject: net: xilinx: axienet: Fix IRQ coalescing packet count overflow + +From: Sean Anderson + +[ Upstream commit c17ff476f53afb30f90bb3c2af77de069c81a622 ] + +If coalesce_count is greater than 255 it will not fit in the register and +will overflow. This can be reproduced by running + + # ethtool -C ethX rx-frames 256 + +which will result in a timeout of 0us instead. Fix this by checking for +invalid values and reporting an error. + +Fixes: 8a3b7a252dca ("drivers/net/ethernet/xilinx: added Xilinx AXI Ethernet driver") +Signed-off-by: Sean Anderson +Reviewed-by: Shannon Nelson +Reviewed-by: Radhey Shyam Pandey +Link: https://patch.msgid.link/20250113163001.2335235-1-sean.anderson@linux.dev +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/xilinx/xilinx_axienet_main.c | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +index ce0dd78826af0..a957721581761 100644 +--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c ++++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +@@ -1570,6 +1570,12 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, + return -EFAULT; + } + ++ if (ecoalesce->rx_max_coalesced_frames > 255 || ++ ecoalesce->tx_max_coalesced_frames > 255) { ++ NL_SET_ERR_MSG(extack, "frames must be less than 256"); ++ return -EINVAL; ++ } ++ + if (ecoalesce->rx_max_coalesced_frames) + lp->coalesce_count_rx = ecoalesce->rx_max_coalesced_frames; + if (ecoalesce->rx_coalesce_usecs) +-- +2.39.5 + diff --git a/queue-6.1/nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch b/queue-6.1/nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch new file mode 100644 index 0000000000..7beec3ff61 --- /dev/null +++ b/queue-6.1/nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch @@ -0,0 +1,39 @@ +From 9c298cfb6435ad378b31a9e75db49942aabf1f31 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Mon, 13 Jan 2025 09:18:39 +0300 +Subject: nfp: bpf: prevent integer overflow in nfp_bpf_event_output() + +From: Dan Carpenter + +[ Upstream commit 16ebb6f5b6295c9688749862a39a4889c56227f8 ] + +The "sizeof(struct cmsg_bpf_event) + pkt_size + data_size" math could +potentially have an integer wrapping bug on 32bit systems. Check for +this and return an error. + +Fixes: 9816dd35ecec ("nfp: bpf: perf event output helpers support") +Signed-off-by: Dan Carpenter +Link: https://patch.msgid.link/6074805b-e78d-4b8a-bf05-e929b5377c28@stanley.mountain +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + drivers/net/ethernet/netronome/nfp/bpf/offload.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c +index 9d97cd281f18e..c03558adda91e 100644 +--- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c ++++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c +@@ -458,7 +458,8 @@ int nfp_bpf_event_output(struct nfp_app_bpf *bpf, const void *data, + map_id_full = be64_to_cpu(cbe->map_ptr); + map_id = map_id_full; + +- if (len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) ++ if (size_add(pkt_size, data_size) > INT_MAX || ++ len < sizeof(struct cmsg_bpf_event) + pkt_size + data_size) + return -EINVAL; + if (cbe->hdr.ver != NFP_CCM_ABI_VERSION) + return -EINVAL; +-- +2.39.5 + diff --git a/queue-6.1/openvswitch-fix-lockup-on-tx-to-unregistering-netdev.patch b/queue-6.1/openvswitch-fix-lockup-on-tx-to-unregistering-netdev.patch new file mode 100644 index 0000000000..39c12311a0 --- /dev/null +++ b/queue-6.1/openvswitch-fix-lockup-on-tx-to-unregistering-netdev.patch @@ -0,0 +1,79 @@ +From cfaa9eaa21b73281613469a6d71f6cc73633b9d6 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 9 Jan 2025 13:21:24 +0100 +Subject: openvswitch: fix lockup on tx to unregistering netdev with carrier + +From: Ilya Maximets + +[ Upstream commit 47e55e4b410f7d552e43011baa5be1aab4093990 ] + +Commit in a fixes tag attempted to fix the issue in the following +sequence of calls: + + do_output + -> ovs_vport_send + -> dev_queue_xmit + -> __dev_queue_xmit + -> netdev_core_pick_tx + -> skb_tx_hash + +When device is unregistering, the 'dev->real_num_tx_queues' goes to +zero and the 'while (unlikely(hash >= qcount))' loop inside the +'skb_tx_hash' becomes infinite, locking up the core forever. + +But unfortunately, checking just the carrier status is not enough to +fix the issue, because some devices may still be in unregistering +state while reporting carrier status OK. + +One example of such device is a net/dummy. It sets carrier ON +on start, but it doesn't implement .ndo_stop to set the carrier off. +And it makes sense, because dummy doesn't really have a carrier. +Therefore, while this device is unregistering, it's still easy to hit +the infinite loop in the skb_tx_hash() from the OVS datapath. There +might be other drivers that do the same, but dummy by itself is +important for the OVS ecosystem, because it is frequently used as a +packet sink for tcpdump while debugging OVS deployments. And when the +issue is hit, the only way to recover is to reboot. + +Fix that by also checking if the device is running. The running +state is handled by the net core during unregistering, so it covers +unregistering case better, and we don't really need to send packets +to devices that are not running anyway. + +While only checking the running state might be enough, the carrier +check is preserved. The running and the carrier states seem disjoined +throughout the code and different drivers. And other core functions +like __dev_direct_xmit() check both before attempting to transmit +a packet. So, it seems safer to check both flags in OVS as well. + +Fixes: 066b86787fa3 ("net: openvswitch: fix race on port output") +Reported-by: Friedrich Weber +Closes: https://mail.openvswitch.org/pipermail/ovs-discuss/2025-January/053423.html +Signed-off-by: Ilya Maximets +Tested-by: Friedrich Weber +Reviewed-by: Aaron Conole +Link: https://patch.msgid.link/20250109122225.4034688-1-i.maximets@ovn.org +Signed-off-by: Jakub Kicinski +Signed-off-by: Sasha Levin +--- + net/openvswitch/actions.c | 4 +++- + 1 file changed, 3 insertions(+), 1 deletion(-) + +diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c +index 21102ffe44709..18d360aaf09bc 100644 +--- a/net/openvswitch/actions.c ++++ b/net/openvswitch/actions.c +@@ -913,7 +913,9 @@ static void do_output(struct datapath *dp, struct sk_buff *skb, int out_port, + { + struct vport *vport = ovs_vport_rcu(dp, out_port); + +- if (likely(vport && netif_carrier_ok(vport->dev))) { ++ if (likely(vport && ++ netif_running(vport->dev) && ++ netif_carrier_ok(vport->dev))) { + u16 mru = OVS_CB(skb)->mru; + u32 cutlen = OVS_CB(skb)->cutlen; + +-- +2.39.5 + diff --git a/queue-6.1/pktgen-avoid-out-of-bounds-access-in-get_imix_entrie.patch b/queue-6.1/pktgen-avoid-out-of-bounds-access-in-get_imix_entrie.patch new file mode 100644 index 0000000000..b515f11e65 --- /dev/null +++ b/queue-6.1/pktgen-avoid-out-of-bounds-access-in-get_imix_entrie.patch @@ -0,0 +1,68 @@ +From c0a045d68cae3a4f81a08ed307a1aa43df0da414 Mon Sep 17 00:00:00 2001 +From: Sasha Levin +Date: Thu, 9 Jan 2025 11:30:39 +0300 +Subject: pktgen: Avoid out-of-bounds access in get_imix_entries + +From: Artem Chernyshev + +[ Upstream commit 76201b5979768500bca362871db66d77cb4c225e ] + +Passing a sufficient amount of imix entries leads to invalid access to the +pkt_dev->imix_entries array because of the incorrect boundary check. + +UBSAN: array-index-out-of-bounds in net/core/pktgen.c:874:24 +index 20 is out of range for type 'imix_pkt [20]' +CPU: 2 PID: 1210 Comm: bash Not tainted 6.10.0-rc1 #121 +Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) +Call Trace: + +dump_stack_lvl lib/dump_stack.c:117 +__ubsan_handle_out_of_bounds lib/ubsan.c:429 +get_imix_entries net/core/pktgen.c:874 +pktgen_if_write net/core/pktgen.c:1063 +pde_write fs/proc/inode.c:334 +proc_reg_write fs/proc/inode.c:346 +vfs_write fs/read_write.c:593 +ksys_write fs/read_write.c:644 +do_syscall_64 arch/x86/entry/common.c:83 +entry_SYSCALL_64_after_hwframe arch/x86/entry/entry_64.S:130 + +Found by Linux Verification Center (linuxtesting.org) with SVACE. + +Fixes: 52a62f8603f9 ("pktgen: Parse internet mix (imix) input") +Signed-off-by: Artem Chernyshev +[ fp: allow to fill the array completely; minor changelog cleanup ] +Signed-off-by: Fedor Pchelkin +Signed-off-by: David S. Miller +Signed-off-by: Sasha Levin +--- + net/core/pktgen.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 471d4effa8b49..a2fb951996b85 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -850,6 +850,9 @@ static ssize_t get_imix_entries(const char __user *buffer, + unsigned long weight; + unsigned long size; + ++ if (pkt_dev->n_imix_entries >= MAX_IMIX_ENTRIES) ++ return -E2BIG; ++ + len = num_arg(&buffer[i], max_digits, &size); + if (len < 0) + return len; +@@ -879,9 +882,6 @@ static ssize_t get_imix_entries(const char __user *buffer, + + i++; + pkt_dev->n_imix_entries++; +- +- if (pkt_dev->n_imix_entries > MAX_IMIX_ENTRIES) +- return -E2BIG; + } while (c == ' '); + + return i; +-- +2.39.5 + diff --git a/queue-6.1/series b/queue-6.1/series new file mode 100644 index 0000000000..c509f1a854 --- /dev/null +++ b/queue-6.1/series @@ -0,0 +1,13 @@ +net-ethernet-ti-cpsw_ale-fix-cpsw_ale_get_field.patch +bpf-fix-bpf_sk_select_reuseport-memory-leak.patch +openvswitch-fix-lockup-on-tx-to-unregistering-netdev.patch +pktgen-avoid-out-of-bounds-access-in-get_imix_entrie.patch +net-add-exit_batch_rtnl-method.patch +gtp-use-exit_batch_rtnl-method.patch +gtp-use-for_each_netdev_rcu-in-gtp_genl_dump_pdp.patch +gtp-destroy-device-along-with-udp-socket-s-netns-dis.patch +nfp-bpf-prevent-integer-overflow-in-nfp_bpf_event_ou.patch +net-xilinx-axienet-fix-irq-coalescing-packet-count-o.patch +net-mlx5-fix-rdma-tx-steering-prio.patch +net-mlx5-clear-port-select-structure-when-fail-to-cr.patch +drm-v3d-ensure-job-pointer-is-set-to-null-after-job-.patch