]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
tools/sched_ext: scx_qmap: Silence task_ctx lookup miss
authorTejun Heo <tj@kernel.org>
Tue, 21 Apr 2026 07:17:11 +0000 (21:17 -1000)
committerTejun Heo <tj@kernel.org>
Tue, 21 Apr 2026 16:18:58 +0000 (06:18 -1000)
scx_fork() dispatches ops.init_task to exactly one scheduler - the one
owning the forking task's cgroup. A task forked inside a sub-scheduler's
cgroup is init'd into the sub only; the root scheduler has no task_ctx
entry for it. When that task later appears as @prev in the root's
qmap_dispatch() (or flows through core-sched comparison via task_qdist),
the bpf_task_storage_get() legitimately misses.

qmap treated those misses as fatal via scx_bpf_error("task_ctx lookup
failed") and aborted the scheduler as soon as the first cross-sched
task hit the root. Drop the error in the sites where the miss is
legitimate: lookup_task_ctx() (helper; callers already check for NULL),
qmap_dispatch()'s @prev branch (bookkeeping-only), task_qdist()
(returns 0 which makes the comparison a no-op), and qmap_select_cpu()
(returns prev_cpu as a no-op fallback instead of -ESRCH). The existing
scx_error was a paranoid guard from the pre-sub-sched world where every
task was owned by the one and only scheduler.

v2: qmap_select_cpu() returns prev_cpu on NULL instead of -ESRCH, so
    the root scheduler doesn't error on cross-sched tasks that pass
    through it (Andrea Righi).

Fixes: 4f8b122848db ("sched_ext: Add basic building blocks for nested sub-scheduler dispatching")
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Reviewed-by: Zhao Mengmeng <zhaomengmeng@kylinos.cn>
tools/sched_ext/scx_qmap.bpf.c

index b68abb9e760b892f2df055649dc80c71ed57651c..aad698fe294bf4136058f9eda18693f1736de220 100644 (file)
@@ -159,13 +159,7 @@ static s32 pick_direct_dispatch_cpu(struct task_struct *p, s32 prev_cpu)
 
 static struct task_ctx *lookup_task_ctx(struct task_struct *p)
 {
-       struct task_ctx *tctx;
-
-       if (!(tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0))) {
-               scx_bpf_error("task_ctx lookup failed");
-               return NULL;
-       }
-       return tctx;
+       return bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
 }
 
 s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p,
@@ -175,7 +169,7 @@ s32 BPF_STRUCT_OPS(qmap_select_cpu, struct task_struct *p,
        s32 cpu;
 
        if (!(tctx = lookup_task_ctx(p)))
-               return -ESRCH;
+               return prev_cpu;
 
        if (p->scx.weight < 2 && !(p->flags & PF_KTHREAD))
                return prev_cpu;
@@ -540,13 +534,9 @@ void BPF_STRUCT_OPS(qmap_dispatch, s32 cpu, struct task_struct *prev)
         */
        if (prev) {
                tctx = bpf_task_storage_get(&task_ctx_stor, prev, 0, 0);
-               if (!tctx) {
-                       scx_bpf_error("task_ctx lookup failed");
-                       return;
-               }
-
-               tctx->core_sched_seq =
-                       core_sched_tail_seqs[weight_to_idx(prev->scx.weight)]++;
+               if (tctx)
+                       tctx->core_sched_seq =
+                               core_sched_tail_seqs[weight_to_idx(prev->scx.weight)]++;
        }
 }
 
@@ -584,10 +574,8 @@ static s64 task_qdist(struct task_struct *p)
        s64 qdist;
 
        tctx = bpf_task_storage_get(&task_ctx_stor, p, 0, 0);
-       if (!tctx) {
-               scx_bpf_error("task_ctx lookup failed");
+       if (!tctx)
                return 0;
-       }
 
        qdist = tctx->core_sched_seq - core_sched_head_seqs[idx];