]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
sched/fair: Take the scheduling domain into account in select_idle_smt()
authorKeisuke Nishimura <keisuke.nishimura@inria.fr>
Wed, 10 Jan 2024 13:17:06 +0000 (14:17 +0100)
committerSasha Levin <sashal@kernel.org>
Tue, 26 Mar 2024 22:16:30 +0000 (18:16 -0400)
[ Upstream commit 8aeaffef8c6eceab0e1498486fdd4f3dc3b7066c ]

When picking a CPU on task wakeup, select_idle_smt() has to take
into account the scheduling domain of @target. This is because the
"isolcpus" kernel command line option can remove CPUs from the domain to
isolate them from other SMT siblings.

This fix checks if the candidate CPU is in the target scheduling domain.

Commit:

  df3cb4ea1fb6 ("sched/fair: Fix wrong cpu selecting from isolated domain")

... originally introduced this fix by adding the check of the scheduling
domain in the loop.

However, commit:

  3e6efe87cd5cc ("sched/fair: Remove redundant check in select_idle_smt()")

... accidentally removed the check. Bring it back.

Fixes: 3e6efe87cd5c ("sched/fair: Remove redundant check in select_idle_smt()")
Signed-off-by: Keisuke Nishimura <keisuke.nishimura@inria.fr>
Signed-off-by: Julia Lawall <julia.lawall@inria.fr>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
Link: https://lore.kernel.org/r/20240110131707.437301-1-keisuke.nishimura@inria.fr
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/sched/fair.c

index 533547e3c90a755f58bd0261570c7904a2bffb9e..66457d4b8965cdad760bf0c1001b3cc5552f394d 100644 (file)
@@ -7311,13 +7311,19 @@ static int select_idle_core(struct task_struct *p, int core, struct cpumask *cpu
 /*
  * Scan the local SMT mask for idle CPUs.
  */
-static int select_idle_smt(struct task_struct *p, int target)
+static int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
 {
        int cpu;
 
        for_each_cpu_and(cpu, cpu_smt_mask(target), p->cpus_ptr) {
                if (cpu == target)
                        continue;
+               /*
+                * Check if the CPU is in the LLC scheduling domain of @target.
+                * Due to isolcpus, there is no guarantee that all the siblings are in the domain.
+                */
+               if (!cpumask_test_cpu(cpu, sched_domain_span(sd)))
+                       continue;
                if (available_idle_cpu(cpu) || sched_idle_cpu(cpu))
                        return cpu;
        }
@@ -7341,7 +7347,7 @@ static inline int select_idle_core(struct task_struct *p, int core, struct cpuma
        return __select_idle_cpu(core, p);
 }
 
-static inline int select_idle_smt(struct task_struct *p, int target)
+static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd, int target)
 {
        return -1;
 }
@@ -7591,7 +7597,7 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
                has_idle_core = test_idle_cores(target);
 
                if (!has_idle_core && cpus_share_cache(prev, target)) {
-                       i = select_idle_smt(p, prev);
+                       i = select_idle_smt(p, sd, prev);
                        if ((unsigned int)i < nr_cpumask_bits)
                                return i;
                }