]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
cgroup/cpuset: Remove redundant rcu_read_lock/unlock() in spin_lock
authorpengdonglin <pengdonglin@xiaomi.com>
Tue, 16 Sep 2025 04:47:30 +0000 (12:47 +0800)
committerTejun Heo <tj@kernel.org>
Tue, 16 Sep 2025 18:36:24 +0000 (08:36 -1000)
Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions")
there is no difference between rcu_read_lock(), rcu_read_lock_bh() and
rcu_read_lock_sched() in terms of RCU read section and the relevant grace
period. That means that spin_lock(), which implies rcu_read_lock_sched(),
also implies rcu_read_lock().

There is no need no explicitly start a RCU read section if one has already
been started implicitly by spin_lock().

Simplify the code and remove the inner rcu_read_lock() invocation.

Cc: Waiman Long <longman@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Waiman Long <longman@redhat.com>
Signed-off-by: pengdonglin <pengdonglin@xiaomi.com>
Signed-off-by: pengdonglin <dolinux.peng@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/cgroup/cpuset.c

index 0d41b4993f8cf4d7feda6096c47d0c82e46b375c..caa885823eebc7486290e6f8097a6a6ff1c2091a 100644 (file)
@@ -4107,7 +4107,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
        struct cpuset *cs;
 
        spin_lock_irqsave(&callback_lock, flags);
-       rcu_read_lock();
 
        cs = task_cs(tsk);
        if (cs != &top_cpuset)
@@ -4129,7 +4128,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
                        cpumask_copy(pmask, possible_mask);
        }
 
-       rcu_read_unlock();
        spin_unlock_irqrestore(&callback_lock, flags);
 }
 
@@ -4202,9 +4200,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
        unsigned long flags;
 
        spin_lock_irqsave(&callback_lock, flags);
-       rcu_read_lock();
        guarantee_online_mems(task_cs(tsk), &mask);
-       rcu_read_unlock();
        spin_unlock_irqrestore(&callback_lock, flags);
 
        return mask;
@@ -4299,10 +4295,8 @@ bool cpuset_current_node_allowed(int node, gfp_t gfp_mask)
        /* Not hardwall and node outside mems_allowed: scan up cpusets */
        spin_lock_irqsave(&callback_lock, flags);
 
-       rcu_read_lock();
        cs = nearest_hardwall_ancestor(task_cs(current));
        allowed = node_isset(node, cs->mems_allowed);
-       rcu_read_unlock();
 
        spin_unlock_irqrestore(&callback_lock, flags);
        return allowed;