]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: core: fix lockdep splat on device unregister
authorFlorian Westphal <fw@strlen.de>
Mon, 13 Oct 2025 18:50:52 +0000 (20:50 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 23 Oct 2025 14:24:30 +0000 (16:24 +0200)
[ Upstream commit 7f0fddd817ba6daebea1445ae9fab4b6d2294fa8 ]

Since blamed commit, unregister_netdevice_many_notify() takes the netdev
mutex if the device needs it.

If the device list is too long, this will lock more device mutexes than
lockdep can handle:

unshare -n \
 bash -c 'for i in $(seq 1 100);do ip link add foo$i type dummy;done'

BUG: MAX_LOCK_DEPTH too low!
turning off the locking correctness validator.
depth: 48  max: 48!
48 locks held by kworker/u16:1/69:
 #0: ..148 ((wq_completion)netns){+.+.}-{0:0}, at: process_one_work
 #1: ..d40 (net_cleanup_work){+.+.}-{0:0}, at: process_one_work
 #2: ..bd0 (pernet_ops_rwsem){++++}-{4:4}, at: cleanup_net
 #3: ..aa8 (rtnl_mutex){+.+.}-{4:4}, at: default_device_exit_batch
 #4: ..cb0 (&dev_instance_lock_key#3){+.+.}-{4:4}, at: unregister_netdevice_many_notify
[..]

Add a helper to close and then unlock a list of net_devices.
Devices that are not up have to be skipped - netif_close_many always
removes them from the list without any other actions taken, so they'd
remain in locked state.

Close devices whenever we've used up half of the tracking slots or we
processed entire list without hitting the limit.

Fixes: 7e4d784f5810 ("net: hold netdev instance lock during rtnetlink operations")
Signed-off-by: Florian Westphal <fw@strlen.de>
Link: https://patch.msgid.link/20251013185052.14021-1-fw@strlen.de
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
net/core/dev.c

index 8d49b2198d072f154df2022d44155d15ab1aac01..5194b70769cc52d33f092713d1586138827f28a3 100644 (file)
@@ -12088,6 +12088,35 @@ static void dev_memory_provider_uninstall(struct net_device *dev)
        }
 }
 
+/* devices must be UP and netdev_lock()'d */
+static void netif_close_many_and_unlock(struct list_head *close_head)
+{
+       struct net_device *dev, *tmp;
+
+       netif_close_many(close_head, false);
+
+       /* ... now unlock them */
+       list_for_each_entry_safe(dev, tmp, close_head, close_list) {
+               netdev_unlock(dev);
+               list_del_init(&dev->close_list);
+       }
+}
+
+static void netif_close_many_and_unlock_cond(struct list_head *close_head)
+{
+#ifdef CONFIG_LOCKDEP
+       /* We can only track up to MAX_LOCK_DEPTH locks per task.
+        *
+        * Reserve half the available slots for additional locks possibly
+        * taken by notifiers and (soft)irqs.
+        */
+       unsigned int limit = MAX_LOCK_DEPTH / 2;
+
+       if (lockdep_depth(current) > limit)
+               netif_close_many_and_unlock(close_head);
+#endif
+}
+
 void unregister_netdevice_many_notify(struct list_head *head,
                                      u32 portid, const struct nlmsghdr *nlh)
 {
@@ -12120,17 +12149,18 @@ void unregister_netdevice_many_notify(struct list_head *head,
 
        /* If device is running, close it first. Start with ops locked... */
        list_for_each_entry(dev, head, unreg_list) {
+               if (!(dev->flags & IFF_UP))
+                       continue;
                if (netdev_need_ops_lock(dev)) {
                        list_add_tail(&dev->close_list, &close_head);
                        netdev_lock(dev);
                }
+               netif_close_many_and_unlock_cond(&close_head);
        }
-       netif_close_many(&close_head, true);
-       /* ... now unlock them and go over the rest. */
+       netif_close_many_and_unlock(&close_head);
+       /* ... now go over the rest. */
        list_for_each_entry(dev, head, unreg_list) {
-               if (netdev_need_ops_lock(dev))
-                       netdev_unlock(dev);
-               else
+               if (!netdev_need_ops_lock(dev))
                        list_add_tail(&dev->close_list, &close_head);
        }
        netif_close_many(&close_head, true);