cancel_delayed_work_sync(&scx_watchdog_work);
}
+static void scx_link_sched(struct scx_sched *sch)
+{
+ scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
+#ifdef CONFIG_EXT_SUB_SCHED
+ struct scx_sched *parent = scx_parent(sch);
+ if (parent)
+ list_add_tail(&sch->sibling, &parent->children);
+#endif /* CONFIG_EXT_SUB_SCHED */
+ list_add_tail_rcu(&sch->all, &scx_sched_all);
+ }
+
+ refresh_watchdog();
+}
+
+static void scx_unlink_sched(struct scx_sched *sch)
+{
+ scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
+#ifdef CONFIG_EXT_SUB_SCHED
+ if (scx_parent(sch))
+ list_del_init(&sch->sibling);
+#endif /* CONFIG_EXT_SUB_SCHED */
+ list_del_rcu(&sch->all);
+ }
+
+ refresh_watchdog();
+}
+
#ifdef CONFIG_EXT_SUB_SCHED
static DECLARE_WAIT_QUEUE_HEAD(scx_unlink_waitq);
synchronize_rcu_expedited();
disable_bypass_dsp(sch);
- raw_spin_lock_irq(&scx_sched_lock);
- list_del_init(&sch->sibling);
- list_del_rcu(&sch->all);
- raw_spin_unlock_irq(&scx_sched_lock);
-
- refresh_watchdog();
+ scx_unlink_sched(sch);
mutex_unlock(&scx_enable_mutex);
if (sch->ops.exit)
SCX_CALL_OP(sch, SCX_KF_UNLOCKED, exit, NULL, ei);
- raw_spin_lock_irq(&scx_sched_lock);
- list_del_rcu(&sch->all);
- raw_spin_unlock_irq(&scx_sched_lock);
-
- refresh_watchdog();
+ scx_unlink_sched(sch);
/*
* scx_root clearing must be inside cpus_read_lock(). See
*/
rcu_assign_pointer(scx_root, sch);
- raw_spin_lock_irq(&scx_sched_lock);
- list_add_tail_rcu(&sch->all, &scx_sched_all);
- raw_spin_unlock_irq(&scx_sched_lock);
-
- refresh_watchdog();
+ scx_link_sched(sch);
scx_idle_enable(ops);
goto out_put_cgrp;
}
- raw_spin_lock_irq(&scx_sched_lock);
- list_add_tail(&sch->sibling, &parent->children);
- list_add_tail_rcu(&sch->all, &scx_sched_all);
- raw_spin_unlock_irq(&scx_sched_lock);
-
- refresh_watchdog();
+ scx_link_sched(sch);
if (sch->level >= SCX_SUB_MAX_DEPTH) {
scx_error(sch, "max nesting depth %d violated",