]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Move scx_dsp_ctx and scx_dsp_max_batch into scx_sched
authorTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:04 +0000 (07:58 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:04 +0000 (07:58 -1000)
scx_dsp_ctx and scx_dsp_max_batch are global variables used in the dispatch
path. In prepration for multiple scheduler support, move the former into
scx_sched_pcpu and the latter into scx_sched. No user-visible behavior
changes intended.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c
kernel/sched/ext_internal.h

index bf23e092032b346c775eccb305afb9fdfc307237..43569d138bd23211346b29f28069d58046e574ec 100644 (file)
@@ -106,25 +106,6 @@ static const struct rhashtable_params dsq_hash_params = {
 
 static LLIST_HEAD(dsqs_to_free);
 
-/* dispatch buf */
-struct scx_dsp_buf_ent {
-       struct task_struct      *task;
-       unsigned long           qseq;
-       u64                     dsq_id;
-       u64                     enq_flags;
-};
-
-static u32 scx_dsp_max_batch;
-
-struct scx_dsp_ctx {
-       struct rq               *rq;
-       u32                     cursor;
-       u32                     nr_tasks;
-       struct scx_dsp_buf_ent  buf[];
-};
-
-static struct scx_dsp_ctx __percpu *scx_dsp_ctx;
-
 /* string formatting from BPF */
 struct scx_bstr_buf {
        u64                     data[MAX_BPRINTF_VARARGS];
@@ -2402,7 +2383,7 @@ retry:
 
 static void flush_dispatch_buf(struct scx_sched *sch, struct rq *rq)
 {
-       struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+       struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
        u32 u;
 
        for (u = 0; u < dspc->cursor; u++) {
@@ -2432,7 +2413,7 @@ static inline void maybe_queue_balance_callback(struct rq *rq)
 static bool scx_dispatch_sched(struct scx_sched *sch, struct rq *rq,
                               struct task_struct *prev)
 {
-       struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+       struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
        int nr_loops = SCX_DSP_MAX_LOOPS;
        s32 cpu = cpu_of(rq);
        bool prev_on_sch = (prev->sched_class == &ext_sched_class) &&
@@ -4972,9 +4953,6 @@ static void scx_root_disable(struct scx_sched *sch)
         */
        kobject_del(&sch->kobj);
 
-       free_percpu(scx_dsp_ctx);
-       scx_dsp_ctx = NULL;
-       scx_dsp_max_batch = 0;
        free_kick_syncs();
 
        mutex_unlock(&scx_enable_mutex);
@@ -5469,7 +5447,10 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
                sch->global_dsqs[node] = dsq;
        }
 
-       sch->pcpu = alloc_percpu(struct scx_sched_pcpu);
+       sch->dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
+       sch->pcpu = __alloc_percpu(struct_size_t(struct scx_sched_pcpu,
+                                                dsp_ctx.buf, sch->dsp_max_batch),
+                                  __alignof__(struct scx_sched_pcpu));
        if (!sch->pcpu) {
                ret = -ENOMEM;
                goto err_free_gdsqs;
@@ -5716,16 +5697,6 @@ static void scx_root_enable_workfn(struct kthread_work *work)
        if (ret)
                goto err_disable;
 
-       WARN_ON_ONCE(scx_dsp_ctx);
-       scx_dsp_max_batch = ops->dispatch_max_batch ?: SCX_DSP_DFL_MAX_BATCH;
-       scx_dsp_ctx = __alloc_percpu(struct_size_t(struct scx_dsp_ctx, buf,
-                                                  scx_dsp_max_batch),
-                                    __alignof__(struct scx_dsp_ctx));
-       if (!scx_dsp_ctx) {
-               ret = -ENOMEM;
-               goto err_disable;
-       }
-
        if (ops->timeout_ms)
                timeout = msecs_to_jiffies(ops->timeout_ms);
        else
@@ -6703,7 +6674,7 @@ static bool scx_dsq_insert_preamble(struct scx_sched *sch, struct task_struct *p
 static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
                                  u64 dsq_id, u64 enq_flags)
 {
-       struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
+       struct scx_dsp_ctx *dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
        struct task_struct *ddsp_task;
 
        ddsp_task = __this_cpu_read(direct_dispatch_task);
@@ -6712,7 +6683,7 @@ static void scx_dsq_insert_commit(struct scx_sched *sch, struct task_struct *p,
                return;
        }
 
-       if (unlikely(dspc->cursor >= scx_dsp_max_batch)) {
+       if (unlikely(dspc->cursor >= sch->dsp_max_batch)) {
                scx_error(sch, "dispatch buffer overflow");
                return;
        }
@@ -7030,7 +7001,7 @@ __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(const struct bpf_prog_aux *aux)
        if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
                return 0;
 
-       return scx_dsp_max_batch - __this_cpu_read(scx_dsp_ctx->cursor);
+       return sch->dsp_max_batch - __this_cpu_read(sch->pcpu->dsp_ctx.cursor);
 }
 
 /**
@@ -7042,8 +7013,8 @@ __bpf_kfunc u32 scx_bpf_dispatch_nr_slots(const struct bpf_prog_aux *aux)
  */
 __bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux)
 {
-       struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
        struct scx_sched *sch;
+       struct scx_dsp_ctx *dspc;
 
        guard(rcu)();
 
@@ -7054,6 +7025,8 @@ __bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux)
        if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
                return;
 
+       dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
+
        if (dspc->cursor > 0)
                dspc->cursor--;
        else
@@ -7077,9 +7050,9 @@ __bpf_kfunc void scx_bpf_dispatch_cancel(const struct bpf_prog_aux *aux)
  */
 __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id, const struct bpf_prog_aux *aux)
 {
-       struct scx_dsp_ctx *dspc = this_cpu_ptr(scx_dsp_ctx);
        struct scx_dispatch_q *dsq;
        struct scx_sched *sch;
+       struct scx_dsp_ctx *dspc;
 
        guard(rcu)();
 
@@ -7090,6 +7063,8 @@ __bpf_kfunc bool scx_bpf_dsq_move_to_local(u64 dsq_id, const struct bpf_prog_aux
        if (!scx_kf_allowed(sch, SCX_KF_DISPATCH))
                return false;
 
+       dspc = &this_cpu_ptr(sch->pcpu)->dsp_ctx;
+
        flush_dispatch_buf(sch, dspc->rq);
 
        dsq = find_user_dsq(sch, dsq_id);
index 1da3b9b75d1820c2823ebec47967efc6ba3b118a..dba8c2ce8ed9bc4d3aa561bd597b406d476bba9b 100644 (file)
@@ -937,6 +937,21 @@ enum scx_sched_pcpu_flags {
        SCX_SCHED_PCPU_BYPASSING        = 1LLU << 0,
 };
 
+/* dispatch buf */
+struct scx_dsp_buf_ent {
+       struct task_struct      *task;
+       unsigned long           qseq;
+       u64                     dsq_id;
+       u64                     enq_flags;
+};
+
+struct scx_dsp_ctx {
+       struct rq               *rq;
+       u32                     cursor;
+       u32                     nr_tasks;
+       struct scx_dsp_buf_ent  buf[];
+};
+
 struct scx_sched_pcpu {
        u64                     flags;  /* protected by rq lock */
 
@@ -951,6 +966,9 @@ struct scx_sched_pcpu {
 #ifdef CONFIG_EXT_SUB_SCHED
        u32                     bypass_host_seq;
 #endif
+
+       /* must be the last entry - contains flex array */
+       struct scx_dsp_ctx      dsp_ctx;
 };
 
 struct scx_sched {
@@ -978,6 +996,7 @@ struct scx_sched {
        atomic_t                bypass_dsp_enable_depth;
 
        bool                    aborting;
+       u32                     dsp_max_batch;
        s32                     level;
 
        /*