From: pengdonglin Date: Tue, 16 Sep 2025 04:47:30 +0000 (+0800) Subject: cgroup/cpuset: Remove redundant rcu_read_lock/unlock() in spin_lock X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=58ab6d25a1bfca42510979cb2b6921f1c807bd02;p=thirdparty%2Fkernel%2Fstable.git cgroup/cpuset: Remove redundant rcu_read_lock/unlock() in spin_lock Since commit a8bb74acd8efe ("rcu: Consolidate RCU-sched update-side function definitions") there is no difference between rcu_read_lock(), rcu_read_lock_bh() and rcu_read_lock_sched() in terms of RCU read section and the relevant grace period. That means that spin_lock(), which implies rcu_read_lock_sched(), also implies rcu_read_lock(). There is no need no explicitly start a RCU read section if one has already been started implicitly by spin_lock(). Simplify the code and remove the inner rcu_read_lock() invocation. Cc: Waiman Long Cc: Johannes Weiner Acked-by: Waiman Long Signed-off-by: pengdonglin Signed-off-by: pengdonglin Signed-off-by: Tejun Heo --- diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index 0d41b4993f8cf..caa885823eebc 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -4107,7 +4107,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) struct cpuset *cs; spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); cs = task_cs(tsk); if (cs != &top_cpuset) @@ -4129,7 +4128,6 @@ void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) cpumask_copy(pmask, possible_mask); } - rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); } @@ -4202,9 +4200,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) unsigned long flags; spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); guarantee_online_mems(task_cs(tsk), &mask); - rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); return mask; @@ -4299,10 +4295,8 @@ bool cpuset_current_node_allowed(int node, gfp_t gfp_mask) /* Not hardwall and node outside mems_allowed: scan up cpusets */ spin_lock_irqsave(&callback_lock, flags); - rcu_read_lock(); cs = nearest_hardwall_ancestor(task_cs(current)); allowed = node_isset(node, cs->mems_allowed); - rcu_read_unlock(); spin_unlock_irqrestore(&callback_lock, flags); return allowed;