]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Factor out scx_link_sched() and scx_unlink_sched()
authorTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:04 +0000 (07:58 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:04 +0000 (07:58 -1000)
Factor out scx_link_sched() and scx_unlink_sched() functions to reduce
code duplication in the scheduler enable/disable paths.

No functional change.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c

index f10a9667b491c1fa2b7301c8f3dc1571d1a209a4..8674a5fa543772c0cbb96e413f312ec7c60f23de 100644 (file)
@@ -4830,6 +4830,33 @@ static void refresh_watchdog(void)
                cancel_delayed_work_sync(&scx_watchdog_work);
 }
 
+static void scx_link_sched(struct scx_sched *sch)
+{
+       scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
+#ifdef CONFIG_EXT_SUB_SCHED
+               struct scx_sched *parent = scx_parent(sch);
+               if (parent)
+                       list_add_tail(&sch->sibling, &parent->children);
+#endif /* CONFIG_EXT_SUB_SCHED */
+               list_add_tail_rcu(&sch->all, &scx_sched_all);
+       }
+
+       refresh_watchdog();
+}
+
+static void scx_unlink_sched(struct scx_sched *sch)
+{
+       scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
+#ifdef CONFIG_EXT_SUB_SCHED
+               if (scx_parent(sch))
+                       list_del_init(&sch->sibling);
+#endif /* CONFIG_EXT_SUB_SCHED */
+               list_del_rcu(&sch->all);
+       }
+
+       refresh_watchdog();
+}
+
 #ifdef CONFIG_EXT_SUB_SCHED
 static DECLARE_WAIT_QUEUE_HEAD(scx_unlink_waitq);
 
@@ -4979,12 +5006,7 @@ static void scx_sub_disable(struct scx_sched *sch)
        synchronize_rcu_expedited();
        disable_bypass_dsp(sch);
 
-       raw_spin_lock_irq(&scx_sched_lock);
-       list_del_init(&sch->sibling);
-       list_del_rcu(&sch->all);
-       raw_spin_unlock_irq(&scx_sched_lock);
-
-       refresh_watchdog();
+       scx_unlink_sched(sch);
 
        mutex_unlock(&scx_enable_mutex);
 
@@ -5120,11 +5142,7 @@ static void scx_root_disable(struct scx_sched *sch)
        if (sch->ops.exit)
                SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
 
-       raw_spin_lock_irq(&scx_sched_lock);
-       list_del_rcu(&sch->all);
-       raw_spin_unlock_irq(&scx_sched_lock);
-
-       refresh_watchdog();
+       scx_unlink_sched(sch);
 
        /*
         * scx_root clearing must be inside cpus_read_lock(). See
@@ -5888,11 +5906,7 @@ static void scx_root_enable_workfn(struct kthread_work *work)
         */
        rcu_assign_pointer(scx_root, sch);
 
-       raw_spin_lock_irq(&scx_sched_lock);
-       list_add_tail_rcu(&sch->all, &scx_sched_all);
-       raw_spin_unlock_irq(&scx_sched_lock);
-
-       refresh_watchdog();
+       scx_link_sched(sch);
 
        scx_idle_enable(ops);
 
@@ -6157,12 +6171,7 @@ static void scx_sub_enable_workfn(struct kthread_work *work)
                goto out_put_cgrp;
        }
 
-       raw_spin_lock_irq(&scx_sched_lock);
-       list_add_tail(&sch->sibling, &parent->children);
-       list_add_tail_rcu(&sch->all, &scx_sched_all);
-       raw_spin_unlock_irq(&scx_sched_lock);
-
-       refresh_watchdog();
+       scx_link_sched(sch);
 
        if (sch->level >= SCX_SUB_MAX_DEPTH) {
                scx_error(sch, "max nesting depth %d violated",