static LLIST_HEAD(dsqs_to_free);
-/* dispatch buf */
-struct scx_dsp_buf_ent {
- struct task_struct *task;
- unsigned long qseq;
- u64 dsq_id;
- u64 enq_flags;
-};
-
-static u32 scx_dsp_max_batch;
-
-struct scx_dsp_ctx {
- struct rq *rq;
- u32 cursor;
- u32 nr_tasks;
- struct scx_dsp_buf_ent buf[];
-};
-
-static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
-
/* string formatting from BPF */
struct scx_bstr_buf {
u64 data[MAX_BPRINTF_VARARGS];
static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
u32 u;
for (u = 0; u < dspc->cursor; u++) {
static bool scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
struct task_struct *prev)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
int nr_loops = SCX_DSP_MAX_LOOPS;
s32 cpu = cpu_of(rq);
bool prev_on_sch = (prev->sched_class == &ext_sched_class) &&
*/
kobject_del(&sch->kobj);
- free_percpu(scx_dsp_ctx);
- scx_dsp_ctx = NULL;
- scx_dsp_max_batch = 0;
free_kick_syncs();
mutex_unlock(&scx_enable_mutex);
sch->global_dsqs[node] = dsq;
}
- sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
+ sch->dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
+ sch->pcpu = __alloc_percpu(struct_size_t(struct scx_sched_pcpu,
+ dsp_ctx.buf, sch->dsp_max_batch),
+ __alignof__(struct scx_sched_pcpu));
if (!sch->pcpu) {
ret = -ENOMEM;
goto err_free_gdsqs;
if (ret)
goto err_disable;
- WARN_ON_ONCE(scx_dsp_ctx);
- scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
- scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
- scx_dsp_max_batch),
- __alignof__(struct scx_dsp_ctx));
- if (!scx_dsp_ctx) {
- ret = -ENOMEM;
- goto err_disable;
- }
-
if (ops->timeout_ms)
timeout = msecs_to_jiffies(ops->timeout_ms);
else
static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
u64 dsq_id, u64 enq_flags)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+ struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
struct task_struct *ddsp_task;
ddsp_task = __this_cpu_read(direct_dispatch_task);
return;
}
- if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
+ if (unlikely(dspc->cursor >= sch->dsp_max_batch)) {
scx_error(sch, "dispatch buffer overflow");
return;
}
if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
return 0;
- return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
+ return sch->dsp_max_batch - __this_cpu_read(sch->pcpu->dsp_ctx.cursor);
}
/**
*/
__bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
struct scx_sched *sch;
+ struct scx_dsp_ctx *dspc;
guard(rcu)();
if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
return;
+ dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
+
if (dspc->cursor > 0)
dspc->cursor--;
else
*/
__bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id, const struct bpf_prog_aux *aux)
{
- struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
struct scx_dispatch_q *dsq;
struct scx_sched *sch;
+ struct scx_dsp_ctx *dspc;
guard(rcu)();
if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
return false;
+ dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
+
flush_dispatch_buf(sch, dspc->rq);
dsq = find_user_dsq(sch, dsq_id);