]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Start schedulers with consistent p->scx.slice values
authorTejun Heo <tj@kernel.org>
Thu, 10 Oct 2024 21:41:44 +0000 (11:41 -1000)
committerTejun Heo <tj@kernel.org>
Thu, 10 Oct 2024 21:41:44 +0000 (11:41 -1000)
The disable path caps p->scx.slice to SCX_SLICE_DFL. As the field is already
being ignored at this stage during disable, the only effect this has is that
when the next BPF scheduler is loaded, it won't see unreasonable left-over
slices. Ultimately, this shouldn't matter but it's better to start in a
known state. Drop p->scx.slice capping from the disable path and instead
reset it to SCX_SLICE_DFL in the enable path.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David Vernet <void@manifault.com>
kernel/sched/ext.c

index 2cb304b370141586e796d17f4fc0d610462ba4c8..4e56230e6e4a4c0fcc3785c93ac5f7e1c4092a69 100644 (file)
@@ -4473,7 +4473,6 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
 
                sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
 
-               p->scx.slice = min_t(u64, p->scx.slice, SCX_SLICE_DFL);
                __setscheduler_prio(p, p->prio);
                check_class_changing(task_rq(p), p, old_class);
 
@@ -5190,6 +5189,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
 
                sched_deq_and_put_task(p, DEQUEUE_SAVE | DEQUEUE_MOVE, &ctx);
 
+               p->scx.slice = SCX_SLICE_DFL;
                __setscheduler_prio(p, p->prio);
                check_class_changing(task_rq(p), p, old_class);