From a03fee333a2f1e065a739bdbe5edbc5512fab9a4 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 14 Nov 2025 11:00:55 +0100 Subject: [PATCH] sched/fair: Remove superfluous rcu_read_lock() With fair switched to rcu_dereference_all() validation, having IRQ or preemption disabled is sufficient, remove the rcu_read_lock() clutter. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar Link: https://patch.msgid.link/20251127154725.647502625@infradead.org --- kernel/sched/fair.c | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 44a359d6a299f..496a30a418540 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12856,21 +12856,16 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) */ rq_unpin_lock(this_rq, rf); - rcu_read_lock(); sd = rcu_dereference_sched_domain(this_rq->sd); - if (!sd) { - rcu_read_unlock(); + if (!sd) goto out; - } if (!get_rd_overloaded(this_rq->rd) || this_rq->avg_idle < sd->max_newidle_lb_cost) { update_next_balance(sd, &next_balance); - rcu_read_unlock(); goto out; } - rcu_read_unlock(); /* * Include sched_balance_update_blocked_averages() in the cost @@ -12883,7 +12878,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) rq_modified_clear(this_rq); raw_spin_rq_unlock(this_rq); - rcu_read_lock(); for_each_domain(this_cpu, sd) { u64 domain_cost; @@ -12933,7 +12927,6 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf) if (pulled_task || !continue_balancing) break; } - rcu_read_unlock(); raw_spin_rq_lock(this_rq); -- 2.47.3