static struct scx_bstr_buf scx_exit_bstr_buf;
/* ops debug dump */
+static DEFINE_RAW_SPINLOCK(scx_dump_lock);
+
struct scx_dump_data {
s32 cpu;
bool first;
refresh_watchdog();
}
+/*
+ * Called to disable future dumps and wait for in-progress one while disabling
+ * @sch. Once @sch becomes empty during disable, there's no point in dumping it.
+ * This prevents calling dump ops on a dead sch.
+ */
+static void scx_disable_dump(struct scx_sched *sch)
+{
+ guard(raw_spinlock_irqsave)(&scx_dump_lock);
+ sch->dump_disabled = true;
+}
+
#ifdef CONFIG_EXT_SUB_SCHED
static DECLARE_WAIT_QUEUE_HEAD(scx_unlink_waitq);
}
scx_task_iter_stop(&sti);
+ scx_disable_dump(sch);
+
scx_cgroup_unlock();
percpu_up_write(&scx_fork_rwsem);
}
scx_task_iter_stop(&sti);
+ scx_disable_dump(sch);
+
scx_cgroup_lock();
set_cgroup_sched(sch_cgroup(sch), NULL);
scx_cgroup_unlock();
#ifdef CONFIG_TRACEPOINTS
if (trace_sched_ext_dump_enabled()) {
- /* protected by scx_dump_state()::dump_lock */
+ /* protected by scx_dump_lock */
static char line_buf[SCX_EXIT_MSG_LEN];
va_start(args, fmt);
static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
size_t dump_len, bool dump_all_tasks)
{
- static DEFINE_RAW_SPINLOCK(dump_lock);
static const char trunc_marker[] = "\n\n~~~~ TRUNCATED ~~~~\n";
struct scx_dump_ctx dctx = {
.kind = ei->kind,
char *buf;
int cpu;
- guard(raw_spinlock_irqsave)(&dump_lock);
+ guard(raw_spinlock_irqsave)(&scx_dump_lock);
+
+ if (sch->dump_disabled)
+ return;
seq_buf_init(&s, ei->dump, dump_len);