]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Replace SCX_TASK_OFF_TASKS flag with SCX_TASK_DEAD state
authorTejun Heo <tj@kernel.org>
Sun, 10 May 2026 20:08:16 +0000 (10:08 -1000)
committerTejun Heo <tj@kernel.org>
Sun, 10 May 2026 20:08:16 +0000 (10:08 -1000)
SCX_TASK_OFF_TASKS marked tasks already through sched_ext_dead() so cgroup
task iteration would skip them. This can be expressed better with a task
state. Replace the flag with SCX_TASK_DEAD.

scx_disable_and_exit_task() resets state to NONE on its way out, so
sched_ext_dead() now sets DEAD after the wrapper returns. The validation
matrix grows NONE -> DEAD, warns on DEAD -> NONE, and tightens READY's
predecessor to INIT or ENABLED so the new DEAD value cannot silently
transition to READY.

Prepares for the following enable vs dead race fix.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
include/linux/sched/ext.h
kernel/sched/ext.c

index adb9a4de068ae8826b2ae3ac9bb0cab6b82a2fd6..9f1a326ad03ec74c3130feeb4be03fd1feca4abc 100644 (file)
@@ -101,24 +101,25 @@ enum scx_ent_flags {
        SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */
        SCX_TASK_SUB_INIT       = 1 << 4, /* task being initialized for a sub sched */
        SCX_TASK_IMMED          = 1 << 5, /* task is on local DSQ with %SCX_ENQ_IMMED */
-       SCX_TASK_OFF_TASKS      = 1 << 6, /* removed from scx_tasks by sched_ext_dead() */
 
        /*
-        * Bits 8 and 9 are used to carry task state:
+        * Bits 8 to 10 are used to carry task state:
         *
         * NONE         ops.init_task() not called yet
         * INIT         ops.init_task() succeeded, but task can be cancelled
         * READY        fully initialized, but not in sched_ext
         * ENABLED      fully initialized and in sched_ext
+        * DEAD         terminal state set by sched_ext_dead()
         */
-       SCX_TASK_STATE_SHIFT    = 8,      /* bits 8 and 9 are used to carry task state */
-       SCX_TASK_STATE_BITS     = 2,
+       SCX_TASK_STATE_SHIFT    = 8,
+       SCX_TASK_STATE_BITS     = 3,
        SCX_TASK_STATE_MASK     = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT,
 
        SCX_TASK_NONE           = 0 << SCX_TASK_STATE_SHIFT,
        SCX_TASK_INIT           = 1 << SCX_TASK_STATE_SHIFT,
        SCX_TASK_READY          = 2 << SCX_TASK_STATE_SHIFT,
        SCX_TASK_ENABLED        = 3 << SCX_TASK_STATE_SHIFT,
+       SCX_TASK_DEAD           = 4 << SCX_TASK_STATE_SHIFT,
 
        /*
         * Bits 12 and 13 are used to carry reenqueue reason. In addition to
index 81841277a54f6ed64360ecfc48d32a5d57b999cd..2fc4a12711f9d556a4a0bf3065fdc60c1833c7c4 100644 (file)
@@ -723,17 +723,22 @@ static void scx_set_task_state(struct task_struct *p, u32 state)
 
        switch (state) {
        case SCX_TASK_NONE:
+               warn = prev_state == SCX_TASK_DEAD;
                break;
        case SCX_TASK_INIT:
                warn = prev_state != SCX_TASK_NONE;
                p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
                break;
        case SCX_TASK_READY:
-               warn = prev_state == SCX_TASK_NONE;
+               warn = !(prev_state == SCX_TASK_INIT ||
+                        prev_state == SCX_TASK_ENABLED);
                break;
        case SCX_TASK_ENABLED:
                warn = prev_state != SCX_TASK_READY;
                break;
+       case SCX_TASK_DEAD:
+               warn = prev_state != SCX_TASK_NONE;
+               break;
        default:
                WARN_ONCE(1, "sched_ext: Invalid task state %d -> %d for %s[%d]",
                          prev_state, state, p->comm, p->pid);
@@ -972,11 +977,11 @@ static struct task_struct *scx_task_iter_next_locked(struct scx_task_iter *iter)
                /*
                 * cgroup_task_dead() removes the dead tasks from cset->tasks
                 * after sched_ext_dead() and cgroup iteration may see tasks
-                * which already finished sched_ext_dead(). %SCX_TASK_OFF_TASKS
-                * is set by sched_ext_dead() under @p's rq lock. Test it to
+                * which already finished sched_ext_dead(). %SCX_TASK_DEAD is
+                * set by sched_ext_dead() under @p's rq lock. Test it to
                 * avoid visiting tasks which are already dead from SCX POV.
                 */
-               if (p->scx.flags & SCX_TASK_OFF_TASKS) {
+               if (scx_get_task_state(p) == SCX_TASK_DEAD) {
                        __scx_task_iter_rq_unlock(iter);
                        continue;
                }
@@ -3847,7 +3852,7 @@ void sched_ext_dead(struct task_struct *p)
         * @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
         * ENABLED transitions can't race us. Disable ops for @p.
         *
-        * %SCX_TASK_OFF_TASKS synchronizes against cgroup task iteration - see
+        * %SCX_TASK_DEAD synchronizes against cgroup task iteration - see
         * scx_task_iter_next_locked(). NONE tasks need no marking: cgroup
         * iteration is only used from sub-sched paths, which require root
         * enabled. Root enable transitions every live task to at least READY.
@@ -3858,7 +3863,7 @@ void sched_ext_dead(struct task_struct *p)
 
                rq = task_rq_lock(p, &rf);
                scx_disable_and_exit_task(scx_task_sched(p), p);
-               p->scx.flags |= SCX_TASK_OFF_TASKS;
+               scx_set_task_state(p, SCX_TASK_DEAD);
                task_rq_unlock(rq, p, &rf);
        }
 }