raw_spin_unlock_irqrestore(&scx_tasks_lock, flags);
/*
- * @p is off scx_tasks and wholly ours. scx_enable()'s READY -> ENABLED
- * transitions can't race us. Disable ops for @p.
+ * @p is off scx_tasks and wholly ours. scx_root_enable()'s READY ->
+ * ENABLED transitions can't race us. Disable ops for @p.
*/
if (scx_get_task_state(p) != SCX_TASK_NONE) {
struct rq_flags rf;
}
}
-static void scx_disable_workfn(struct kthread_work *work)
+static void scx_root_disable(struct scx_sched *sch)
{
- struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
struct scx_exit_info *ei = sch->exit_info;
struct scx_task_iter sti;
struct task_struct *p;
- int kind, cpu;
-
- kind = atomic_read(&sch->exit_kind);
- while (true) {
- if (kind == SCX_EXIT_DONE) /* already disabled? */
- return;
- WARN_ON_ONCE(kind == SCX_EXIT_NONE);
- if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
- break;
- }
- ei->kind = kind;
- ei->reason = scx_exit_reason(ei->kind);
+ int cpu;
/* guarantee forward progress by bypassing scx_ops */
scx_bypass(true);
lockdep_assert_preemption_disabled();
+ if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
+ kind = SCX_EXIT_ERROR;
+
if (!atomic_try_cmpxchg(&sch->exit_kind, &none, kind))
return false;
return true;
}
-static void scx_disable(enum scx_exit_kind kind)
+static void scx_disable_workfn(struct kthread_work *work)
{
- struct scx_sched *sch;
+ struct scx_sched *sch = container_of(work, struct scx_sched, disable_work);
+ struct scx_exit_info *ei = sch->exit_info;
+ int kind;
- if (WARN_ON_ONCE(kind == SCX_EXIT_NONE || kind == SCX_EXIT_DONE))
- kind = SCX_EXIT_ERROR;
+ kind = atomic_read(&sch->exit_kind);
+ while (true) {
+ if (kind == SCX_EXIT_DONE) /* already disabled? */
+ return;
+ WARN_ON_ONCE(kind == SCX_EXIT_NONE);
+ if (atomic_try_cmpxchg(&sch->exit_kind, &kind, SCX_EXIT_DONE))
+ break;
+ }
+ ei->kind = kind;
+ ei->reason = scx_exit_reason(ei->kind);
- rcu_read_lock();
- sch = rcu_dereference(scx_root);
- if (sch) {
- guard(preempt)();
- scx_claim_exit(sch, kind);
+ scx_root_disable(sch);
+}
+
+static void scx_disable(struct scx_sched *sch, enum scx_exit_kind kind)
+{
+ guard(preempt)();
+ if (scx_claim_exit(sch, kind))
kthread_queue_work(sch->helper, &sch->disable_work);
- }
- rcu_read_unlock();
}
static void dump_newline(struct seq_buf *s)
int ret;
};
-static void scx_enable_workfn(struct kthread_work *work)
+static void scx_root_enable_workfn(struct kthread_work *work)
{
- struct scx_enable_cmd *cmd =
- container_of(work, struct scx_enable_cmd, work);
+ struct scx_enable_cmd *cmd = container_of(work, struct scx_enable_cmd, work);
struct sched_ext_ops *ops = cmd->ops;
struct scx_sched *sch;
struct scx_task_iter sti;
* Flush scx_disable_work to ensure that error is reported before init
* completion. sch's base reference will be put by bpf_scx_unreg().
*/
- scx_error(sch, "scx_enable() failed (%d)", ret);
+ scx_error(sch, "scx_root_enable() failed (%d)", ret);
kthread_flush_work(&sch->disable_work);
cmd->ret = 0;
}
-static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
+static s32 scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
{
static struct kthread_worker *helper;
static DEFINE_MUTEX(helper_mutex);
mutex_unlock(&helper_mutex);
}
- kthread_init_work(&cmd.work, scx_enable_workfn);
+ kthread_init_work(&cmd.work, scx_root_enable_workfn);
cmd.ops = ops;
kthread_queue_work(READ_ONCE(helper), &cmd.work);
struct sched_ext_ops *ops = kdata;
struct scx_sched *sch = ops->priv;
- scx_disable(SCX_EXIT_UNREG);
+ scx_disable(sch, SCX_EXIT_UNREG);
kthread_flush_work(&sch->disable_work);
kobject_put(&sch->kobj);
}
static void sysrq_handle_sched_ext_reset(u8 key)
{
- scx_disable(SCX_EXIT_SYSRQ);
+ struct scx_sched *sch;
+
+ rcu_read_lock();
+ sch = rcu_dereference(scx_root);
+ if (likely(sch))
+ scx_disable(sch, SCX_EXIT_SYSRQ);
+ else
+ pr_info("sched_ext: BPF schedulers not loaded\n");
+ rcu_read_unlock();
}
static const struct sysrq_key_op sysrq_sched_ext_reset_op = {