]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Always use SMP versions in kernel/sched/ext_idle.c
authorCheng-Yang Chou <yphbchou0911@gmail.com>
Wed, 11 Jun 2025 13:54:03 +0000 (21:54 +0800)
committerTejun Heo <tj@kernel.org>
Sat, 14 Jun 2025 00:47:52 +0000 (14:47 -1000)
Simplify the scheduler by making formerly SMP-only primitives and data
structures unconditional.

tj: Updated subject for clarity. Fixed stray #else block which wasn't
    removed causing build failure.

Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
kernel/sched/ext_idle.c

index 17802693e304309467825f8360b9e82cfa18db40..b79cbdb7999a2059d7054e953d3235b1dd791293 100644 (file)
@@ -17,7 +17,6 @@ static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
 /* Enable/disable per-node idle cpumasks */
 static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_per_node);
 
-#ifdef CONFIG_SMP
 /* Enable/disable LLC aware optimizations */
 static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
 
@@ -794,17 +793,6 @@ static void reset_idle_masks(struct sched_ext_ops *ops)
                cpumask_and(idle_cpumask(node)->smt, cpu_online_mask, node_mask);
        }
 }
-#else  /* !CONFIG_SMP */
-static bool scx_idle_test_and_clear_cpu(int cpu)
-{
-       return -EBUSY;
-}
-
-static s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, int node, u64 flags)
-{
-       return -EBUSY;
-}
-#endif /* CONFIG_SMP */
 
 void scx_idle_enable(struct sched_ext_ops *ops)
 {
@@ -818,9 +806,7 @@ void scx_idle_enable(struct sched_ext_ops *ops)
        else
                static_branch_disable_cpuslocked(&scx_builtin_idle_per_node);
 
-#ifdef CONFIG_SMP
        reset_idle_masks(ops);
-#endif
 }
 
 void scx_idle_disable(void)
@@ -906,7 +892,6 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
        if (!rq)
                lockdep_assert_held(&p->pi_lock);
 
-#ifdef CONFIG_SMP
        /*
         * This may also be called from ops.enqueue(), so we need to handle
         * per-CPU tasks as well. For these tasks, we can skip all idle CPU
@@ -923,9 +908,7 @@ static s32 select_cpu_from_kfunc(struct task_struct *p, s32 prev_cpu, u64 wake_f
                cpu = scx_select_cpu_dfl(p, prev_cpu, wake_flags,
                                         allowed ?: p->cpus_ptr, flags);
        }
-#else
-       cpu = -EBUSY;
-#endif
+
        if (scx_kf_allowed_if_unlocked())
                task_rq_unlock(rq, p, &rf);
 
@@ -1016,11 +999,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask_node(int node)
        if (node < 0)
                return cpu_none_mask;
 
-#ifdef CONFIG_SMP
        return idle_cpumask(node)->cpu;
-#else
-       return cpu_none_mask;
-#endif
 }
 
 /**
@@ -1040,11 +1019,7 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
        if (!check_builtin_idle_enabled())
                return cpu_none_mask;
 
-#ifdef CONFIG_SMP
        return idle_cpumask(NUMA_NO_NODE)->cpu;
-#else
-       return cpu_none_mask;
-#endif
 }
 
 /**
@@ -1063,14 +1038,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask_node(int node)
        if (node < 0)
                return cpu_none_mask;
 
-#ifdef CONFIG_SMP
        if (sched_smt_active())
                return idle_cpumask(node)->smt;
        else
                return idle_cpumask(node)->cpu;
-#else
-       return cpu_none_mask;
-#endif
 }
 
 /**
@@ -1091,14 +1062,10 @@ __bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
        if (!check_builtin_idle_enabled())
                return cpu_none_mask;
 
-#ifdef CONFIG_SMP
        if (sched_smt_active())
                return idle_cpumask(NUMA_NO_NODE)->smt;
        else
                return idle_cpumask(NUMA_NO_NODE)->cpu;
-#else
-       return cpu_none_mask;
-#endif
 }
 
 /**