]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Add scx_dump_lock and dump_disabled
authorTejun Heo <tj@kernel.org>
Tue, 10 Mar 2026 17:12:21 +0000 (07:12 -1000)
committerTejun Heo <tj@kernel.org>
Tue, 10 Mar 2026 17:12:21 +0000 (07:12 -1000)
Add a dedicated scx_dump_lock and per-sched dump_disabled flag so that
debug dumping can be safely disabled during sched teardown without
relying on scx_sched_lock. This is a prep for the next patch which
decouples the sysrq dump path from scx_sched_lock to resolve a lock
ordering issue.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c
kernel/sched/ext_internal.h

index 4fa0be4980d4fbcc4b713d86113494680f256fd4..5cfac2c97bf3e2d926eccc5b040838ccb1247c7f 100644 (file)
@@ -136,6 +136,8 @@ static DEFINE_RAW_SPINLOCK(scx_exit_bstr_buf_lock);
 static struct scx_bstr_buf scx_exit_bstr_buf;
 
 /* ops debug dump */
+static DEFINE_RAW_SPINLOCK(scx_dump_lock);
+
 struct scx_dump_data {
        s32                     cpu;
        bool                    first;
@@ -5279,6 +5281,17 @@ static void scx_unlink_sched(struct scx_sched *sch)
        refresh_watchdog();
 }
 
+/*
+ * Called to disable future dumps and wait for in-progress one while disabling
+ * @sch. Once @sch becomes empty during disable, there's no point in dumping it.
+ * This prevents calling dump ops on a dead sch.
+ */
+static void scx_disable_dump(struct scx_sched *sch)
+{
+       guard(raw_spinlock_irqsave)(&scx_dump_lock);
+       sch->dump_disabled = true;
+}
+
 #ifdef CONFIG_EXT_SUB_SCHED
 static DECLARE_WAIT_QUEUE_HEAD(scx_unlink_waitq);
 
@@ -5414,6 +5427,8 @@ static void scx_sub_disable(struct scx_sched *sch)
        }
        scx_task_iter_stop(&sti);
 
+       scx_disable_dump(sch);
+
        scx_cgroup_unlock();
        percpu_up_write(&scx_fork_rwsem);
 
@@ -5525,6 +5540,8 @@ static void scx_root_disable(struct scx_sched *sch)
        }
        scx_task_iter_stop(&sti);
 
+       scx_disable_dump(sch);
+
        scx_cgroup_lock();
        set_cgroup_sched(sch_cgroup(sch), NULL);
        scx_cgroup_unlock();
@@ -5680,7 +5697,7 @@ static __printf(2, 3) void dump_line(struct seq_buf *s, const char *fmt, ...)
 
 #ifdef CONFIG_TRACEPOINTS
        if (trace_sched_ext_dump_enabled()) {
-               /* protected by scx_dump_state()::dump_lock */
+               /* protected by scx_dump_lock */
                static char line_buf[SCX_EXIT_MSG_LEN];
 
                va_start(args, fmt);
@@ -5842,7 +5859,6 @@ static void scx_dump_task(struct scx_sched *sch,
 static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
                           size_t dump_len, bool dump_all_tasks)
 {
-       static DEFINE_RAW_SPINLOCK(dump_lock);
        static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
        struct scx_dump_ctx dctx = {
                .kind = ei->kind,
@@ -5856,7 +5872,10 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
        char *buf;
        int cpu;
 
-       guard(raw_spinlock_irqsave)(&dump_lock);
+       guard(raw_spinlock_irqsave)(&scx_dump_lock);
+
+       if (sch->dump_disabled)
+               return;
 
        seq_buf_init(&s, ei->dump, dump_len);
 
index bec4d22890b0cfb994bacb1216db5eea0b25ef15..3623de2c30a196ab7882846aef21d7f6094a5d91 100644 (file)
@@ -1003,6 +1003,7 @@ struct scx_sched {
        atomic_t                bypass_dsp_enable_depth;
 
        bool                    aborting;
+       bool                    dump_disabled;  /* protected by scx_dump_lock */
        u32                     dsp_max_batch;
        s32                     level;