]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
net: net->nsid_lock does not need BH safety
authorEric Dumazet <edumazet@google.com>
Fri, 27 Jun 2025 16:32:42 +0000 (16:32 +0000)
committerJakub Kicinski <kuba@kernel.org>
Tue, 1 Jul 2025 01:58:20 +0000 (18:58 -0700)
At the time of commit bc51dddf98c9 ("netns: avoid disabling irq
for netns id") peernet2id() was not yet using RCU.

Commit 2dce224f469f ("netns: protect netns
ID lookups with RCU") changed peernet2id() to no longer
acquire net->nsid_lock (potentially from BH context).

We do not need to block soft interrupts when acquiring
net->nsid_lock anymore.

Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Guillaume Nault <gnault@redhat.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Reviewed-by: Kuniyuki Iwashima <kuniyu@google.com>
Link: https://patch.msgid.link/20250627163242.230866-1-edumazet@google.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
net/core/net_namespace.c

index d0f607507ee8d0b6d31f11a49421b5f0a985bd3b..419604d9cf32e2e2a9af59dfef1fbcc7fab81e20 100644 (file)
@@ -319,10 +319,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
        if (refcount_read(&net->ns.count) == 0)
                return NETNSA_NSID_NOT_ASSIGNED;
 
-       spin_lock_bh(&net->nsid_lock);
+       spin_lock(&net->nsid_lock);
        id = __peernet2id(net, peer);
        if (id >= 0) {
-               spin_unlock_bh(&net->nsid_lock);
+               spin_unlock(&net->nsid_lock);
                return id;
        }
 
@@ -332,12 +332,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
         * just been idr_remove()'d from there in cleanup_net().
         */
        if (!maybe_get_net(peer)) {
-               spin_unlock_bh(&net->nsid_lock);
+               spin_unlock(&net->nsid_lock);
                return NETNSA_NSID_NOT_ASSIGNED;
        }
 
        id = alloc_netid(net, peer, -1);
-       spin_unlock_bh(&net->nsid_lock);
+       spin_unlock(&net->nsid_lock);
 
        put_net(peer);
        if (id < 0)
@@ -628,20 +628,20 @@ static void unhash_nsid(struct net *net, struct net *last)
        for_each_net(tmp) {
                int id;
 
-               spin_lock_bh(&tmp->nsid_lock);
+               spin_lock(&tmp->nsid_lock);
                id = __peernet2id(tmp, net);
                if (id >= 0)
                        idr_remove(&tmp->netns_ids, id);
-               spin_unlock_bh(&tmp->nsid_lock);
+               spin_unlock(&tmp->nsid_lock);
                if (id >= 0)
                        rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL,
                                          GFP_KERNEL);
                if (tmp == last)
                        break;
        }
-       spin_lock_bh(&net->nsid_lock);
+       spin_lock(&net->nsid_lock);
        idr_destroy(&net->netns_ids);
-       spin_unlock_bh(&net->nsid_lock);
+       spin_unlock(&net->nsid_lock);
 }
 
 static LLIST_HEAD(cleanup_list);
@@ -880,9 +880,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
                return PTR_ERR(peer);
        }
 
-       spin_lock_bh(&net->nsid_lock);
+       spin_lock(&net->nsid_lock);
        if (__peernet2id(net, peer) >= 0) {
-               spin_unlock_bh(&net->nsid_lock);
+               spin_unlock(&net->nsid_lock);
                err = -EEXIST;
                NL_SET_BAD_ATTR(extack, nla);
                NL_SET_ERR_MSG(extack,
@@ -891,7 +891,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh,
        }
 
        err = alloc_netid(net, peer, nsid);
-       spin_unlock_bh(&net->nsid_lock);
+       spin_unlock(&net->nsid_lock);
        if (err >= 0) {
                rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid,
                                  nlh, GFP_KERNEL);