]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Simplify the entry condition for update_idle_cpu_scan()
authorK Prateek Nayak <kprateek.nayak@amd.com>
Thu, 12 Mar 2026 04:44:33 +0000 (04:44 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 18 Mar 2026 08:06:50 +0000 (09:06 +0100)
Only the topmost SD_SHARE_LLC domain has the "sd->shared" assigned.
Simply use "sd->shared" as an indicator for load balancing at the highest
SD_SHARE_LLC domain in update_idle_cpu_scan() instead of relying on
llc_size.

Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Chen Yu <yu.c.chen@intel.com>
Reviewed-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Tested-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
Link: https://patch.msgid.link/20260312044434.1974-9-kprateek.nayak@amd.com
kernel/sched/fair.c

index 3e24d3e16522c38bdcd5bab692083cfb774f8530..85c22f0f8de80c1723e044cad467e19c0ac0b4ed 100644 (file)
@@ -11234,6 +11234,7 @@ static void update_idle_cpu_scan(struct lb_env *env,
                                 unsigned long sum_util)
 {
        struct sched_domain_shared *sd_share;
+       struct sched_domain *sd = env->sd;
        int llc_weight, pct;
        u64 x, y, tmp;
        /*
@@ -11247,11 +11248,7 @@ static void update_idle_cpu_scan(struct lb_env *env,
        if (!sched_feat(SIS_UTIL) || env->idle == CPU_NEWLY_IDLE)
                return;
 
-       llc_weight = per_cpu(sd_llc_size, env->dst_cpu);
-       if (env->sd->span_weight != llc_weight)
-               return;
-
-       sd_share = rcu_dereference_all(per_cpu(sd_llc_shared, env->dst_cpu));
+       sd_share = sd->shared;
        if (!sd_share)
                return;
 
@@ -11285,10 +11282,11 @@ static void update_idle_cpu_scan(struct lb_env *env,
         */
        /* equation [3] */
        x = sum_util;
+       llc_weight = sd->span_weight;
        do_div(x, llc_weight);
 
        /* equation [4] */
-       pct = env->sd->imbalance_pct;
+       pct = sd->imbalance_pct;
        tmp = x * x * pct * pct;
        do_div(tmp, 10000 * SCHED_CAPACITY_SCALE);
        tmp = min_t(long, tmp, SCHED_CAPACITY_SCALE);