]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Simplify SIS_UTIL handling in select_idle_cpu()
authorK Prateek Nayak <kprateek.nayak@amd.com>
Thu, 12 Mar 2026 04:44:34 +0000 (04:44 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 18 Mar 2026 08:06:50 +0000 (09:06 +0100)
Use the "sd_llc" passed to select_idle_cpu() to obtain the
"sd_llc_shared" instead of dereferencing the per-CPU variable.

Since "sd->shared" is always reclaimed at the same time as "sd" via
call_rcu() and update_top_cache_domain() always ensures a valid
"sd->shared" assignment when "sd_llc" is present, "sd_llc->shared" can
always be dereferenced without needing an additional check.

While at it move the cpumask_and() operation after the SIS_UTIL bailout
check to avoid unnecessarily computing the cpumask.

Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Chen Yu <yu.c.chen@intel.com>
Reviewed-by: Shrikanth Hegde <sshegde@linux.ibm.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://patch.msgid.link/20260312044434.1974-10-kprateek.nayak@amd.com
kernel/sched/fair.c

index 85c22f0f8de80c1723e044cad467e19c0ac0b4ed..0a35a82e47920b6863f1c4c65c7b2244a3e51723 100644 (file)
@@ -7876,21 +7876,26 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, bool
 {
        struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_rq_mask);
        int i, cpu, idle_cpu = -1, nr = INT_MAX;
-       struct sched_domain_shared *sd_share;
-
-       cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr);
 
        if (sched_feat(SIS_UTIL)) {
-               sd_share = rcu_dereference_all(per_cpu(sd_llc_shared, target));
-               if (sd_share) {
-                       /* because !--nr is the condition to stop scan */
-                       nr = READ_ONCE(sd_share->nr_idle_scan) + 1;
-                       /* overloaded LLC is unlikely to have idle cpu/core */
-                       if (nr == 1)
-                               return -1;
-               }
+               /*
+                * Increment because !--nr is the condition to stop scan.
+                *
+                * Since "sd" is "sd_llc" for target CPU dereferenced in the
+                * caller, it is safe to directly dereference "sd->shared".
+                * Topology bits always ensure it assigned for "sd_llc" abd it
+                * cannot disappear as long as we have a RCU protected
+                * reference to one the associated "sd" here.
+                */
+               nr = READ_ONCE(sd->shared->nr_idle_scan) + 1;
+               /* overloaded LLC is unlikely to have idle cpu/core */
+               if (nr == 1)
+                       return -1;
        }
 
+       if (!cpumask_and(cpus, sched_domain_span(sd), p->cpus_ptr))
+               return -1;
+
        if (static_branch_unlikely(&sched_cluster_active)) {
                struct sched_group *sg = sd->groups;