]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
pid: drop irq disablement around pidmap_lock
authorMateusz Guzik <mjguzik@gmail.com>
Thu, 6 Feb 2025 16:44:14 +0000 (17:44 +0100)
committerChristian Brauner <brauner@kernel.org>
Fri, 7 Feb 2025 10:22:44 +0000 (11:22 +0100)
It no longer serves any purpose now that the tasklist_lock ->
pidmap_lock ordering got eliminated.

Reviewed-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Mateusz Guzik <mjguzik@gmail.com>
Link: https://lore.kernel.org/r/20250206164415.450051-6-mjguzik@gmail.com
Acked-by: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
Signed-off-by: Christian Brauner <brauner@kernel.org>
kernel/pid.c

index 73625f28c1662d577aeeb6e07a3835429b726d06..900193de42324d21781fba2a48c45d0816892332 100644 (file)
@@ -115,11 +115,10 @@ static void delayed_put_pid(struct rcu_head *rhp)
 void free_pid(struct pid *pid)
 {
        int i;
-       unsigned long flags;
 
        lockdep_assert_not_held(&tasklist_lock);
 
-       spin_lock_irqsave(&pidmap_lock, flags);
+       spin_lock(&pidmap_lock);
        for (i = 0; i <= pid->level; i++) {
                struct upid *upid = pid->numbers + i;
                struct pid_namespace *ns = upid->ns;
@@ -142,7 +141,7 @@ void free_pid(struct pid *pid)
                idr_remove(&ns->idr, upid->nr);
        }
        pidfs_remove_pid(pid);
-       spin_unlock_irqrestore(&pidmap_lock, flags);
+       spin_unlock(&pidmap_lock);
 
        call_rcu(&pid->rcu, delayed_put_pid);
 }
@@ -210,7 +209,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
                }
 
                idr_preload(GFP_KERNEL);
-               spin_lock_irq(&pidmap_lock);
+               spin_lock(&pidmap_lock);
 
                if (tid) {
                        nr = idr_alloc(&tmp->idr, NULL, tid,
@@ -237,7 +236,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
                        nr = idr_alloc_cyclic(&tmp->idr, NULL, pid_min,
                                              pid_max, GFP_ATOMIC);
                }
-               spin_unlock_irq(&pidmap_lock);
+               spin_unlock(&pidmap_lock);
                idr_preload_end();
 
                if (nr < 0) {
@@ -271,7 +270,7 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
 
        upid = pid->numbers + ns->level;
        idr_preload(GFP_KERNEL);
-       spin_lock_irq(&pidmap_lock);
+       spin_lock(&pidmap_lock);
        if (!(ns->pid_allocated & PIDNS_ADDING))
                goto out_unlock;
        pidfs_add_pid(pid);
@@ -280,18 +279,18 @@ struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
                idr_replace(&upid->ns->idr, pid, upid->nr);
                upid->ns->pid_allocated++;
        }
-       spin_unlock_irq(&pidmap_lock);
+       spin_unlock(&pidmap_lock);
        idr_preload_end();
 
        return pid;
 
 out_unlock:
-       spin_unlock_irq(&pidmap_lock);
+       spin_unlock(&pidmap_lock);
        idr_preload_end();
        put_pid_ns(ns);
 
 out_free:
-       spin_lock_irq(&pidmap_lock);
+       spin_lock(&pidmap_lock);
        while (++i <= ns->level) {
                upid = pid->numbers + i;
                idr_remove(&upid->ns->idr, upid->nr);
@@ -301,7 +300,7 @@ out_free:
        if (ns->pid_allocated == PIDNS_ADDING)
                idr_set_cursor(&ns->idr, 0);
 
-       spin_unlock_irq(&pidmap_lock);
+       spin_unlock(&pidmap_lock);
 
        kmem_cache_free(ns->pid_cachep, pid);
        return ERR_PTR(retval);
@@ -309,9 +308,9 @@ out_free:
 
 void disable_pid_allocation(struct pid_namespace *ns)
 {
-       spin_lock_irq(&pidmap_lock);
+       spin_lock(&pidmap_lock);
        ns->pid_allocated &= ~PIDNS_ADDING;
-       spin_unlock_irq(&pidmap_lock);
+       spin_unlock(&pidmap_lock);
 }
 
 struct pid *find_pid_ns(int nr, struct pid_namespace *ns)