]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Reorganize enable/disable path for multi-scheduler support
authorTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:02 +0000 (07:58 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:02 +0000 (07:58 -1000)
In preparation for multiple scheduler support, reorganize the enable and
disable paths to make scheduler instances explicit. Extract
scx_root_disable() from scx_disable_workfn(). Rename scx_enable_workfn()
to scx_root_enable_workfn(). Change scx_disable() to take @sch parameter
and only queue disable_work if scx_claim_exit() succeeds for consistency.
Move exit_kind validation into scx_claim_exit(). The sysrq handler now
prints a message when no scheduler is loaded.

These changes don't materially affect user-visible behavior.

v2: Keep scx_enable() name as-is and only rename the workfn to
    scx_root_enable_workfn(). Change scx_enable() return type to s32.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c

index d42a22f5a098e52dd989b5e8b32a439765c46ae9..142845bcddaaaaeec6a137edd8f5d3547860e3ba 100644 (file)
@@ -3267,8 +3267,8 @@ void sched_ext_dead(struct task_struct *p)
        raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
 
        /*
-        * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
-        * transitions can't race us. Disable ops for @p.
+        * @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
+        * ENABLED transitions can't race us. Disable ops for @p.
         */
        if (scx_get_task_state(p) != SCX_TASK_NONE) {
                struct rq_flags rf;
@@ -4430,24 +4430,12 @@ static void free_kick_syncs(void)
        }
 }
 
-static void scx_disable_workfn(struct kthread_work *work)
+static void scx_root_disable(struct scx_sched *sch)
 {
-       struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
        struct scx_exit_info *ei = sch->exit_info;
        struct scx_task_iter sti;
        struct task_struct *p;
-       int kind, cpu;
-
-       kind = atomic_read(&sch->exit_kind);
-       while (true) {
-               if (kind == SCX_EXIT_DONE)      /* already disabled? */
-                       return;
-               WARN_ON_ONCE(kind == SCX_EXIT_NONE);
-               if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
-                       break;
-       }
-       ei->kind = kind;
-       ei->reason = scx_exit_reason(ei->kind);
+       int cpu;
 
        /* guarantee forward progress by bypassing scx_ops */
        scx_bypass(true);
@@ -4591,6 +4579,9 @@ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
 
        lockdep_assert_preemption_disabled();
 
+       if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
+               kind = SCX_EXIT_ERROR;
+
        if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
                return false;
 
@@ -4603,21 +4594,31 @@ static bool scx_claim_exit(struct scx_sched *sch, enum scx_exit_kind kind)
        return true;
 }
 
-static void scx_disable(enum scx_exit_kind kind)
+static void scx_disable_workfn(struct kthread_work *work)
 {
-       struct scx_sched *sch;
+       struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
+       struct scx_exit_info *ei = sch->exit_info;
+       int kind;
 
-       if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
-               kind = SCX_EXIT_ERROR;
+       kind = atomic_read(&sch->exit_kind);
+       while (true) {
+               if (kind == SCX_EXIT_DONE)      /* already disabled? */
+                       return;
+               WARN_ON_ONCE(kind == SCX_EXIT_NONE);
+               if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
+                       break;
+       }
+       ei->kind = kind;
+       ei->reason = scx_exit_reason(ei->kind);
 
-       rcu_read_lock();
-       sch = rcu_dereference(scx_root);
-       if (sch) {
-               guard(preempt)();
-               scx_claim_exit(sch, kind);
+       scx_root_disable(sch);
+}
+
+static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind)
+{
+       guard(preempt)();
+       if (scx_claim_exit(sch, kind))
                kthread_queue_work(sch->helper, &sch->disable_work);
-       }
-       rcu_read_unlock();
 }
 
 static void dump_newline(struct seq_buf *s)
@@ -5135,10 +5136,9 @@ struct scx_enable_cmd {
        int                     ret;
 };
 
-static void scx_enable_workfn(struct kthread_work *work)
+static void scx_root_enable_workfn(struct kthread_work *work)
 {
-       struct scx_enable_cmd *cmd =
-               container_of(work, struct scx_enable_cmd, work);
+       struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
        struct sched_ext_ops *ops = cmd->ops;
        struct scx_sched *sch;
        struct scx_task_iter sti;
@@ -5387,12 +5387,12 @@ err_disable:
         * Flush scx_disable_work to ensure that error is reported before init
         * completion. sch's base reference will be put by bpf_scx_unreg().
         */
-       scx_error(sch, "scx_enable() failed (%d)", ret);
+       scx_error(sch, "scx_root_enable() failed (%d)", ret);
        kthread_flush_work(&sch->disable_work);
        cmd->ret = 0;
 }
 
-static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+static s32 scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
 {
        static struct kthread_worker *helper;
        static DEFINE_MUTEX(helper_mutex);
@@ -5418,7 +5418,7 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
                mutex_unlock(&helper_mutex);
        }
 
-       kthread_init_work(&cmd.work, scx_enable_workfn);
+       kthread_init_work(&cmd.work, scx_root_enable_workfn);
        cmd.ops = ops;
 
        kthread_queue_work(READ_ONCE(helper), &cmd.work);
@@ -5561,7 +5561,7 @@ static void bpf_scx_unreg(void *kdata, struct bpf_link *link)
        struct sched_ext_ops *ops = kdata;
        struct scx_sched *sch = ops->priv;
 
-       scx_disable(SCX_EXIT_UNREG);
+       scx_disable(sch, SCX_EXIT_UNREG);
        kthread_flush_work(&sch->disable_work);
        kobject_put(&sch->kobj);
 }
@@ -5689,7 +5689,15 @@ static struct bpf_struct_ops bpf_sched_ext_ops = {
 
 static void sysrq_handle_sched_ext_reset(u8 key)
 {
-       scx_disable(SCX_EXIT_SYSRQ);
+       struct scx_sched *sch;
+
+       rcu_read_lock();
+       sch = rcu_dereference(scx_root);
+       if (likely(sch))
+               scx_disable(sch, SCX_EXIT_SYSRQ);
+       else
+               pr_info("sched_ext: BPF schedulers not loaded\n");
+       rcu_read_unlock();
 }
 
 static const struct sysrq_key_op sysrq_sched_ext_reset_op = {