]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Always bounce scx_disable() through irq_work
authorTejun Heo <tj@kernel.org>
Tue, 10 Mar 2026 17:12:21 +0000 (07:12 -1000)
committerTejun Heo <tj@kernel.org>
Tue, 10 Mar 2026 17:12:21 +0000 (07:12 -1000)
scx_disable() directly called kthread_queue_work() which can acquire
worker->lock, pi_lock and rq->__lock. This made scx_disable() unsafe to
call while holding locks that conflict with this chain - in particular,
scx_claim_exit() calls scx_disable() for each descendant while holding
scx_sched_lock, which nests inside rq->__lock in scx_bypass().

The error path (scx_vexit()) was already bouncing through irq_work to
avoid this issue. Generalize the pattern to all scx_disable() calls by
always going through irq_work. irq_work_queue() is lockless and safe to
call from any context, and the actual kthread_queue_work() call happens
in the irq_work handler outside any locks.

Rename error_irq_work to disable_irq_work to reflect the broader usage.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c
kernel/sched/ext_internal.h

index 5cfac2c97bf3e2d926eccc5b040838ccb1247c7f..bc6ce05bb98e6580075e681b3d5a599b82729608 100644 (file)
@@ -4498,7 +4498,7 @@ static void scx_sched_free_rcu_work(struct work_struct *work)
        struct scx_dispatch_q *dsq;
        int cpu, node;
 
-       irq_work_sync(&sch->error_irq_work);
+       irq_work_sync(&sch->disable_irq_work);
        kthread_destroy_worker(sch->helper);
        timer_shutdown_sync(&sch->bypass_lb_timer);
 
@@ -5679,7 +5679,7 @@ static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind)
 {
        guard(preempt)();
        if (scx_claim_exit(sch, kind))
-               kthread_queue_work(sch->helper, &sch->disable_work);
+               irq_work_queue(&sch->disable_irq_work);
 }
 
 static void dump_newline(struct seq_buf *s)
@@ -6012,9 +6012,9 @@ static void scx_dump_state(struct scx_sched *sch, struct scx_exit_info *ei,
                       trunc_marker, sizeof(trunc_marker));
 }
 
-static void scx_error_irq_workfn(struct irq_work *irq_work)
+static void scx_disable_irq_workfn(struct irq_work *irq_work)
 {
-       struct scx_sched *sch = container_of(irq_work, struct scx_sched, error_irq_work);
+       struct scx_sched *sch = container_of(irq_work, struct scx_sched, disable_irq_work);
        struct scx_exit_info *ei = sch->exit_info;
 
        if (ei->kind >= SCX_EXIT_ERROR)
@@ -6048,7 +6048,7 @@ static bool scx_vexit(struct scx_sched *sch,
        ei->kind = kind;
        ei->reason = scx_exit_reason(ei->kind);
 
-       irq_work_queue(&sch->error_irq_work);
+       irq_work_queue(&sch->disable_irq_work);
        return true;
 }
 
@@ -6184,7 +6184,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
 
        sch->slice_dfl = SCX_SLICE_DFL;
        atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
-       init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
+       init_irq_work(&sch->disable_irq_work, scx_disable_irq_workfn);
        kthread_init_work(&sch->disable_work, scx_disable_workfn);
        timer_setup(&sch->bypass_lb_timer, scx_bypass_lb_timerfn, 0);
        sch->ops = *ops;
index 3623de2c30a196ab7882846aef21d7f6094a5d91..c78dadaadab88d0d9e99f03ef3568b0b8bfbd932 100644 (file)
@@ -1042,7 +1042,7 @@ struct scx_sched {
        struct kobject          kobj;
 
        struct kthread_worker   *helper;
-       struct irq_work         error_irq_work;
+       struct irq_work         disable_irq_work;
        struct kthread_work     disable_work;
        struct timer_list       bypass_lb_timer;
        struct rcu_work         rcu_work;