]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Update demo schedulers and selftests to use scx_bpf_task_set_dsq_vtime()
authorCheng-Yang Chou <yphbchou0911@gmail.com>
Sun, 15 Mar 2026 08:24:40 +0000 (16:24 +0800)
committerTejun Heo <tj@kernel.org>
Sun, 15 Mar 2026 08:53:59 +0000 (22:53 -1000)
Direct writes to p->scx.dsq_vtime are deprecated in favor of
scx_bpf_task_set_dsq_vtime(). Update scx_simple, scx_flatcg, and
select_cpu_vtime selftest to use the new kfunc with
scale_by_task_weight_inverse().

Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
tools/sched_ext/scx_flatcg.bpf.c
tools/sched_ext/scx_simple.bpf.c
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c

index 1351377f64d5b1f753c98d1a61c8d83624866dc5..6d3a028d7b5924f800737d0da2dd455ca4db8249 100644 (file)
@@ -551,9 +551,11 @@ void BPF_STRUCT_OPS(fcg_stopping, struct task_struct *p, bool runnable)
         * too much, determine the execution time by taking explicit timestamps
         * instead of depending on @p->scx.slice.
         */
-       if (!fifo_sched)
-               p->scx.dsq_vtime +=
-                       (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
+       if (!fifo_sched) {
+               u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice);
+
+               scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta);
+       }
 
        taskc = bpf_task_storage_get(&task_ctx, p, 0, 0);
        if (!taskc) {
@@ -822,7 +824,7 @@ s32 BPF_STRUCT_OPS(fcg_init_task, struct task_struct *p,
        if (!(cgc = find_cgrp_ctx(args->cgroup)))
                return -ENOENT;
 
-       p->scx.dsq_vtime = cgc->tvtime_now;
+       scx_bpf_task_set_dsq_vtime(p, cgc->tvtime_now);
 
        return 0;
 }
@@ -924,7 +926,7 @@ void BPF_STRUCT_OPS(fcg_cgroup_move, struct task_struct *p,
                return;
 
        delta = time_delta(p->scx.dsq_vtime, from_cgc->tvtime_now);
-       p->scx.dsq_vtime = to_cgc->tvtime_now + delta;
+       scx_bpf_task_set_dsq_vtime(p, to_cgc->tvtime_now + delta);
 }
 
 s32 BPF_STRUCT_OPS_SLEEPABLE(fcg_init)
index 9ad6f094998744c174323aace6715a5b99e86a51..cc40552b2b5f01b2f763a7e7de3faf71f025813a 100644 (file)
@@ -121,12 +121,14 @@ void BPF_STRUCT_OPS(simple_stopping, struct task_struct *p, bool runnable)
         * too much, determine the execution time by taking explicit timestamps
         * instead of depending on @p->scx.slice.
         */
-       p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
+       u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice);
+
+       scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta);
 }
 
 void BPF_STRUCT_OPS(simple_enable, struct task_struct *p)
 {
-       p->scx.dsq_vtime = vtime_now;
+       scx_bpf_task_set_dsq_vtime(p, vtime_now);
 }
 
 s32 BPF_STRUCT_OPS_SLEEPABLE(simple_init)
index 7aa5dc6bfb9366332dacefabda99f585f32f10c6..eec70d388cbf3334ec18841c99ac2c909b9158cd 100644 (file)
@@ -66,12 +66,14 @@ void BPF_STRUCT_OPS(select_cpu_vtime_running, struct task_struct *p)
 void BPF_STRUCT_OPS(select_cpu_vtime_stopping, struct task_struct *p,
                    bool runnable)
 {
-       p->scx.dsq_vtime += (SCX_SLICE_DFL - p->scx.slice) * 100 / p->scx.weight;
+       u64 delta = scale_by_task_weight_inverse(p, SCX_SLICE_DFL - p->scx.slice);
+
+       scx_bpf_task_set_dsq_vtime(p, p->scx.dsq_vtime + delta);
 }
 
 void BPF_STRUCT_OPS(select_cpu_vtime_enable, struct task_struct *p)
 {
-       p->scx.dsq_vtime = vtime_now;
+       scx_bpf_task_set_dsq_vtime(p, vtime_now);
 }
 
 s32 BPF_STRUCT_OPS_SLEEPABLE(select_cpu_vtime_init)