]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Pass held rq to SCX_CALL_OP() for dump_cpu/dump_task
authorTejun Heo <tj@kernel.org>
Sat, 25 Apr 2026 00:31:36 +0000 (14:31 -1000)
committerTejun Heo <tj@kernel.org>
Sat, 25 Apr 2026 00:31:36 +0000 (14:31 -1000)
scx_dump_state() walks CPUs with rq_lock_irqsave() held and invokes
ops.dump_cpu / ops.dump_task with NULL locked_rq, leaving
scx_locked_rq_state NULL. If the BPF callback calls a kfunc that
re-acquires rq based on scx_locked_rq() - e.g. scx_bpf_cpuperf_set(cpu)
- it re-acquires the already-held rq.

Pass the held rq to SCX_CALL_OP(). Thread it into scx_dump_task() too.
The pre-loop ops.dump call runs before rq_lock_irqsave() so keeps
rq=NULL.

Fixes: 07814a9439a3 ("sched_ext: Print debug dump after an error exit")
Cc: stable@vger.kernel.org # v6.12+
Reported-by: Chris Mason <clm@meta.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c

index 26968d0a67528b47c9a7084e234c8068c02c9e92..73d629559d6d11b157d55bbba247cd9199cc1860 100644 (file)
@@ -6117,9 +6117,8 @@ static void ops_dump_exit(void)
        scx_dump_data.cpu = -1;
 }
 
-static void scx_dump_task(struct scx_sched *sch,
-                         struct seq_buf *s, struct scx_dump_ctx *dctx,
-                         struct task_struct *p, char marker)
+static void scx_dump_task(struct scx_sched *sch, struct seq_buf *s, struct scx_dump_ctx *dctx,
+                         struct rq *rq, struct task_struct *p, char marker)
 {
        static unsigned long bt[SCX_EXIT_BT_LEN];
        struct scx_sched *task_sch = scx_task_sched(p);
@@ -6160,7 +6159,7 @@ static void scx_dump_task(struct scx_sched *sch,
 
        if (SCX_HAS_OP(sch, dump_task)) {
                ops_dump_init(s, "    ");
-               SCX_CALL_OP(sch, dump_task, NULL, dctx, p);
+               SCX_CALL_OP(sch, dump_task, rq, dctx, p);
                ops_dump_exit();
        }
 
@@ -6284,8 +6283,7 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
                used = seq_buf_used(&ns);
                if (SCX_HAS_OP(sch, dump_cpu)) {
                        ops_dump_init(&ns, "  ");
-                       SCX_CALL_OP(sch, dump_cpu, NULL,
-                                   &dctx, cpu, idle);
+                       SCX_CALL_OP(sch, dump_cpu, rq, &dctx, cpu, idle);
                        ops_dump_exit();
                }
 
@@ -6308,11 +6306,11 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
 
                if (rq->curr->sched_class == &ext_sched_class &&
                    (dump_all_tasks || scx_task_on_sched(sch, rq->curr)))
-                       scx_dump_task(sch, &s, &dctx, rq->curr, '*');
+                       scx_dump_task(sch, &s, &dctx, rq, rq->curr, '*');
 
                list_for_each_entry(p, &rq->scx.runnable_list, scx.runnable_node)
                        if (dump_all_tasks || scx_task_on_sched(sch, p))
-                               scx_dump_task(sch, &s, &dctx, p, ' ');
+                               scx_dump_task(sch, &s, &dctx, rq, p, ' ');
        next:
                rq_unlock_irqrestore(rq, &rf);
        }