*found = false;
- if (!static_branch_likely(&scx_builtin_idle_enabled)) {
- scx_ops_error("built-in idle tracking is disabled");
- return prev_cpu;
- }
-
/*
- * If WAKE_SYNC, the waker's local DSQ is empty, and the system is
- * under utilized, wake up @p to the local DSQ of the waker. Checking
- * only for an empty local DSQ is insufficient as it could give the
- * wakee an unfair advantage when the system is oversaturated.
- * Checking only for the presence of idle CPUs is also insufficient as
- * the local DSQ of the waker could have tasks piled up on it even if
- * there is an idle core elsewhere on the system.
- */
- cpu = smp_processor_id();
- if ((wake_flags & SCX_WAKE_SYNC) &&
- !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) &&
- cpu_rq(cpu)->scx.local_dsq.nr == 0) {
- if (cpumask_test_cpu(cpu, p->cpus_ptr))
+ * Determine the scheduling domain only if the task is allowed to run
+ * on all CPUs.
+ *
+ * This is done primarily for efficiency, as it avoids the overhead of
+ * updating a cpumask every time we need to select an idle CPU (which
+ * can be costly in large SMP systems), but it also aligns logically:
+ * if a task's scheduling domain is restricted by user-space (through
+ * CPU affinity), the task will simply use the flat scheduling domain
+ * defined by user-space.
+ */
+ if (p->nr_cpus_allowed >= num_possible_cpus()) {
+ if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa))
+ numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu));
+
+ if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) {
+ struct sched_domain *sd;
+
+ sd = rcu_dereference(per_cpu(sd_llc, prev_cpu));
+ if (sd)
+ llc_cpus = sched_domain_span(sd);
+ }
+ }
+
+ /*
+ * If WAKE_SYNC, try to migrate the wakee to the waker's CPU.
+ */
+ if (wake_flags & SCX_WAKE_SYNC) {
+ cpu = smp_processor_id();
+
+ /*
+ * If the waker's CPU is cache affine and prev_cpu is idle,
+ * then avoid a migration.
+ */
+ if (cpus_share_cache(cpu, prev_cpu) &&
+ test_and_clear_cpu_idle(prev_cpu)) {
+ cpu = prev_cpu;
goto cpu_found;
+ }
+
+ /*
+ * If the waker's local DSQ is empty, and the system is under
+ * utilized, try to wake up @p to the local DSQ of the waker.
+ *
+ * Checking only for an empty local DSQ is insufficient as it
+ * could give the wakee an unfair advantage when the system is
+ * oversaturated.
+ *
+ * Checking only for the presence of idle CPUs is also
+ * insufficient as the local DSQ of the waker could have tasks
+ * piled up on it even if there is an idle core elsewhere on
+ * the system.
+ */
+ if (!cpumask_empty(idle_masks.cpu) &&
+ !(current->flags & PF_EXITING) &&
+ cpu_rq(cpu)->scx.local_dsq.nr == 0) {
+ if (cpumask_test_cpu(cpu, p->cpus_ptr))
+ goto cpu_found;
+ }
}
/*