]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched/fair: Use READ_ONCE() to read sg->asym_prefer_cpu
authorK Prateek Nayak <kprateek.nayak@amd.com>
Wed, 9 Apr 2025 05:34:43 +0000 (05:34 +0000)
committerPeter Zijlstra <peterz@infradead.org>
Wed, 16 Apr 2025 19:09:11 +0000 (21:09 +0200)
Subsequent commits add the support to dynamically update the sched_group
struct's "asym_prefer_cpu" member from a remote CPU. Use READ_ONCE()
when reading the "sg->asym_prefer_cpu" to ensure load balancer always
reads the latest value.

Signed-off-by: K Prateek Nayak <kprateek.nayak@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20250409053446.23367-2-kprateek.nayak@amd.com
kernel/sched/fair.c

index 0c19459c80422fb250e53ed9406137a88a0f6b3c..5e1bd9e8464cddee5d9307437344a3390de0a2e3 100644 (file)
@@ -10251,7 +10251,7 @@ sched_group_asym(struct lb_env *env, struct sg_lb_stats *sgs, struct sched_group
            (sgs->group_weight - sgs->idle_cpus != 1))
                return false;
 
-       return sched_asym(env->sd, env->dst_cpu, group->asym_prefer_cpu);
+       return sched_asym(env->sd, env->dst_cpu, READ_ONCE(group->asym_prefer_cpu));
 }
 
 /* One group has more than one SMT CPU while the other group does not */
@@ -10488,7 +10488,8 @@ static bool update_sd_pick_busiest(struct lb_env *env,
 
        case group_asym_packing:
                /* Prefer to move from lowest priority CPU's work */
-               return sched_asym_prefer(sds->busiest->asym_prefer_cpu, sg->asym_prefer_cpu);
+               return sched_asym_prefer(READ_ONCE(sds->busiest->asym_prefer_cpu),
+                                        READ_ONCE(sg->asym_prefer_cpu));
 
        case group_misfit_task:
                /*