From: Tejun Heo Date: Fri, 8 Nov 2024 20:40:44 +0000 (-1000) Subject: Merge branch 'for-6.12-fixes' into for-6.13 X-Git-Tag: v6.13-rc1~164^2~9 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=f07b806ad8ebcca0a25738b78557736ef9674797;p=thirdparty%2Fkernel%2Flinux.git Merge branch 'for-6.12-fixes' into for-6.13 Pull sched_ext/for-6.12-fixes to receive 0e7ffff1b811 ("scx: Fix raciness in scx_ops_bypass()"). Planned updates for scx_ops_bypass() depends on it. Signed-off-by: Tejun Heo --- f07b806ad8ebcca0a25738b78557736ef9674797 diff --cc kernel/sched/ext.c index 012a7fc772630,3bdb08fc2056a..c074a64c20f03 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@@ -3274,70 -3087,21 +3302,65 @@@ static s32 scx_select_cpu_dfl(struct ta *found = false; - if (!static_branch_likely(&scx_builtin_idle_enabled)) { - scx_ops_error("built-in idle tracking is disabled"); - return prev_cpu; - } - /* - * If WAKE_SYNC, the waker's local DSQ is empty, and the system is - * under utilized, wake up @p to the local DSQ of the waker. Checking - * only for an empty local DSQ is insufficient as it could give the - * wakee an unfair advantage when the system is oversaturated. - * Checking only for the presence of idle CPUs is also insufficient as - * the local DSQ of the waker could have tasks piled up on it even if - * there is an idle core elsewhere on the system. - */ - cpu = smp_processor_id(); - if ((wake_flags & SCX_WAKE_SYNC) && - !cpumask_empty(idle_masks.cpu) && !(current->flags & PF_EXITING) && - cpu_rq(cpu)->scx.local_dsq.nr == 0) { - if (cpumask_test_cpu(cpu, p->cpus_ptr)) + * Determine the scheduling domain only if the task is allowed to run + * on all CPUs. + * + * This is done primarily for efficiency, as it avoids the overhead of + * updating a cpumask every time we need to select an idle CPU (which + * can be costly in large SMP systems), but it also aligns logically: + * if a task's scheduling domain is restricted by user-space (through + * CPU affinity), the task will simply use the flat scheduling domain + * defined by user-space. + */ + if (p->nr_cpus_allowed >= num_possible_cpus()) { + if (static_branch_maybe(CONFIG_NUMA, &scx_selcpu_topo_numa)) + numa_cpus = cpumask_of_node(cpu_to_node(prev_cpu)); + + if (static_branch_maybe(CONFIG_SCHED_MC, &scx_selcpu_topo_llc)) { + struct sched_domain *sd; + + sd = rcu_dereference(per_cpu(sd_llc, prev_cpu)); + if (sd) + llc_cpus = sched_domain_span(sd); + } + } + + /* + * If WAKE_SYNC, try to migrate the wakee to the waker's CPU. + */ + if (wake_flags & SCX_WAKE_SYNC) { + cpu = smp_processor_id(); + + /* + * If the waker's CPU is cache affine and prev_cpu is idle, + * then avoid a migration. + */ + if (cpus_share_cache(cpu, prev_cpu) && + test_and_clear_cpu_idle(prev_cpu)) { + cpu = prev_cpu; goto cpu_found; + } + + /* + * If the waker's local DSQ is empty, and the system is under + * utilized, try to wake up @p to the local DSQ of the waker. + * + * Checking only for an empty local DSQ is insufficient as it + * could give the wakee an unfair advantage when the system is + * oversaturated. + * + * Checking only for the presence of idle CPUs is also + * insufficient as the local DSQ of the waker could have tasks + * piled up on it even if there is an idle core elsewhere on + * the system. + */ + if (!cpumask_empty(idle_masks.cpu) && + !(current->flags & PF_EXITING) && + cpu_rq(cpu)->scx.local_dsq.nr == 0) { + if (cpumask_test_cpu(cpu, p->cpus_ptr)) + goto cpu_found; + } } /*