*/
static LIST_HEAD(scx_sched_all);
+#ifdef CONFIG_EXT_SUB_SCHED
+static const struct rhashtable_params scx_sched_hash_params = {
+ .key_len = sizeof_field(struct scx_sched, ops.sub_cgroup_id),
+ .key_offset = offsetof(struct scx_sched, ops.sub_cgroup_id),
+ .head_offset = offsetof(struct scx_sched, hash_node),
+};
+
+static struct rhashtable scx_sched_hash;
+#endif
+
/*
* During exit, a task may schedule after losing its PIDs. When disabling the
* BPF scheduler, we need to be able to iterate tasks in every state to
return NULL;
}
+static struct scx_sched *scx_find_sub_sched(u64 cgroup_id)
+{
+ return rhashtable_lookup(&scx_sched_hash, &cgroup_id,
+ scx_sched_hash_params);
+}
+
static void scx_set_task_sched(struct task_struct *p, struct scx_sched *sch)
{
rcu_assign_pointer(p->scx.sched, sch);
#else /* CONFIG_EXT_SUB_SCHED */
static struct scx_sched *scx_parent(struct scx_sched *sch) { return NULL; }
static struct scx_sched *scx_next_descendant_pre(struct scx_sched *pos, struct scx_sched *root) { return pos ? NULL : root; }
+static struct scx_sched *scx_find_sub_sched(u64 cgroup_id) { return NULL; }
static void scx_set_task_sched(struct task_struct *p, struct scx_sched *sch) {}
#endif /* CONFIG_EXT_SUB_SCHED */
cancel_delayed_work_sync(&scx_watchdog_work);
}
-static void scx_link_sched(struct scx_sched *sch)
+static s32 scx_link_sched(struct scx_sched *sch)
{
scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
#ifdef CONFIG_EXT_SUB_SCHED
struct scx_sched *parent = scx_parent(sch);
- if (parent)
+ s32 ret;
+
+ if (parent) {
+ ret = rhashtable_lookup_insert_fast(&scx_sched_hash,
+ &sch->hash_node, scx_sched_hash_params);
+ if (ret) {
+ scx_error(sch, "failed to insert into scx_sched_hash (%d)", ret);
+ return ret;
+ }
+
list_add_tail(&sch->sibling, &parent->children);
+ }
#endif /* CONFIG_EXT_SUB_SCHED */
+
list_add_tail_rcu(&sch->all, &scx_sched_all);
}
refresh_watchdog();
+ return 0;
}
static void scx_unlink_sched(struct scx_sched *sch)
{
scoped_guard(raw_spinlock_irq, &scx_sched_lock) {
#ifdef CONFIG_EXT_SUB_SCHED
- if (scx_parent(sch))
+ if (scx_parent(sch)) {
+ rhashtable_remove_fast(&scx_sched_hash, &sch->hash_node,
+ scx_sched_hash_params);
list_del_init(&sch->sibling);
+ }
#endif /* CONFIG_EXT_SUB_SCHED */
list_del_rcu(&sch->all);
}
*/
rcu_assign_pointer(scx_root, sch);
- scx_link_sched(sch);
+ ret = scx_link_sched(sch);
+ if (ret)
+ goto err_disable;
scx_idle_enable(ops);
goto out_put_cgrp;
}
- scx_link_sched(sch);
+ ret = scx_link_sched(sch);
+ if (ret)
+ goto err_disable;
if (sch->level >= SCX_SUB_MAX_DEPTH) {
scx_error(sch, "max nesting depth %d violated",
register_sysrq_key('S', &sysrq_sched_ext_reset_op);
register_sysrq_key('D', &sysrq_sched_ext_dump_op);
INIT_DELAYED_WORK(&scx_watchdog_work, scx_watchdog_workfn);
+
+#ifdef CONFIG_EXT_SUB_SCHED
+ BUG_ON(rhashtable_init(&scx_sched_hash, &scx_sched_hash_params));
+#endif /* CONFIG_EXT_SUB_SCHED */
}