]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Move default slice to per-scheduler field
authorTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:03 +0000 (07:58 -1000)
committerTejun Heo <tj@kernel.org>
Fri, 6 Mar 2026 17:58:03 +0000 (07:58 -1000)
The default time slice was stored in the global scx_slice_dfl variable which
was dynamically modified when entering and exiting bypass mode. With
hierarchical scheduling, each scheduler instance needs its own default slice
configuration so that bypass operations on one scheduler don't affect others.

Move slice_dfl into struct scx_sched and update all access sites. The bypass
logic now modifies the root scheduler's slice_dfl. At task initialization in
init_scx_entity(), use the SCX_SLICE_DFL constant directly since the task may
not yet be associated with a specific scheduler.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
kernel/sched/ext.c
kernel/sched/ext_internal.h

index e1fc2b1fc779ef4ff19eec8e94c34ca480722093..a73a5957e9d9f11da50c3aba24b77f139b05c1e8 100644 (file)
@@ -164,7 +164,6 @@ static struct kset *scx_kset;
  * There usually is no reason to modify these as normal scheduler operation
  * shouldn't be affected by them. The knobs are primarily for debugging.
  */
-static u64 scx_slice_dfl = SCX_SLICE_DFL;
 static unsigned int scx_slice_bypass_us = SCX_SLICE_BYPASS / NSEC_PER_USEC;
 static unsigned int scx_bypass_lb_intv_us = SCX_BYPASS_LB_DFL_INTV_US;
 
@@ -1135,7 +1134,7 @@ static void dsq_mod_nr(struct scx_dispatch_q *dsq, s32 delta)
 
 static void refill_task_slice_dfl(struct scx_sched *sch, struct task_struct *p)
 {
-       p->scx.slice = READ_ONCE(scx_slice_dfl);
+       p->scx.slice = READ_ONCE(sch->slice_dfl);
        __scx_add_event(sch, SCX_EV_REFILL_SLICE_DFL, 1);
 }
 
@@ -3288,7 +3287,7 @@ void init_scx_entity(struct sched_ext_entity *scx)
        INIT_LIST_HEAD(&scx->runnable_node);
        scx->runnable_at = jiffies;
        scx->ddsp_dsq_id = SCX_DSQ_INVALID;
-       scx->slice = READ_ONCE(scx_slice_dfl);
+       scx->slice = SCX_SLICE_DFL;
 }
 
 void scx_pre_fork(struct task_struct *p)
@@ -4449,6 +4448,8 @@ static void scx_bypass(bool bypass)
 
        raw_spin_lock_irqsave(&bypass_lock, flags);
        sch = rcu_dereference_bh(scx_root);
+       if (!sch)
+               goto unlock;
 
        if (bypass) {
                u32 intv_us;
@@ -4457,7 +4458,7 @@ static void scx_bypass(bool bypass)
                WARN_ON_ONCE(scx_bypass_depth <= 0);
                if (scx_bypass_depth != 1)
                        goto unlock;
-               WRITE_ONCE(scx_slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
+               WRITE_ONCE(sch->slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
                bypass_timestamp = ktime_get_ns();
                if (sch)
                        scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
@@ -4473,7 +4474,7 @@ static void scx_bypass(bool bypass)
                WARN_ON_ONCE(scx_bypass_depth < 0);
                if (scx_bypass_depth != 0)
                        goto unlock;
-               WRITE_ONCE(scx_slice_dfl, SCX_SLICE_DFL);
+               WRITE_ONCE(sch->slice_dfl, SCX_SLICE_DFL);
                if (sch)
                        scx_add_event(sch, SCX_EV_BYPASS_DURATION,
                                      ktime_get_ns() - bypass_timestamp);
@@ -5317,6 +5318,7 @@ static struct scx_sched *scx_alloc_and_add_sched(struct sched_ext_ops *ops,
        sch->ancestors[level] = sch;
        sch->level = level;
 
+       sch->slice_dfl = SCX_SLICE_DFL;
        atomic_set(&sch->exit_kind, SCX_EXIT_NONE);
        init_irq_work(&sch->error_irq_work, scx_error_irq_workfn);
        kthread_init_work(&sch->disable_work, scx_disable_workfn);
@@ -5662,7 +5664,7 @@ static void scx_root_enable_workfn(struct kthread_work *work)
                        queue_flags |= DEQUEUE_CLASS;
 
                scoped_guard (sched_change, p, queue_flags) {
-                       p->scx.slice = READ_ONCE(scx_slice_dfl);
+                       p->scx.slice = READ_ONCE(sch->slice_dfl);
                        p->sched_class = new_class;
                }
        }
index 026bfdd0e11da6e42027a961e7936d3084b4f7f5..6c1eeaaa41db27444987254c685348c668b80344 100644 (file)
@@ -950,6 +950,7 @@ struct scx_sched {
        struct scx_dispatch_q   **global_dsqs;
        struct scx_sched_pcpu __percpu *pcpu;
 
+       u64                     slice_dfl;
        s32                     level;
 
        /*