]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/sched_ext: fix build after renames in sched_ext API
authorIhor Solodrai <ihor.solodrai@pm.me>
Thu, 21 Nov 2024 21:40:17 +0000 (21:40 +0000)
committerTejun Heo <tj@kernel.org>
Wed, 4 Dec 2024 19:46:39 +0000 (09:46 -1000)
The selftests are falining to build on current tip of bpf-next and
sched_ext [1]. This has broken BPF CI [2] after merge from upstream.

Use appropriate function names in the selftests according to the
recent changes in the sched_ext API [3].

[1] https://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next.git/commit/?id=fc39fb56917bb3cb53e99560ca3612a84456ada2
[2] https://github.com/kernel-patches/bpf/actions/runs/11959327258/job/33340923745
[3] https://lore.kernel.org/all/20241109194853.580310-1-tj@kernel.org/

Signed-off-by: Ihor Solodrai <ihor.solodrai@pm.me>
Acked-by: Andrea Righi <arighi@nvidia.com>
Acked-by: David Vernet <void@manifault.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
12 files changed:
tools/testing/selftests/sched_ext/ddsp_bogus_dsq_fail.bpf.c
tools/testing/selftests/sched_ext/ddsp_vtimelocal_fail.bpf.c
tools/testing/selftests/sched_ext/dsp_local_on.bpf.c
tools/testing/selftests/sched_ext/enq_select_cpu_fails.bpf.c
tools/testing/selftests/sched_ext/exit.bpf.c
tools/testing/selftests/sched_ext/maximal.bpf.c
tools/testing/selftests/sched_ext/select_cpu_dfl.bpf.c
tools/testing/selftests/sched_ext/select_cpu_dfl_nodispatch.bpf.c
tools/testing/selftests/sched_ext/select_cpu_dispatch.bpf.c
tools/testing/selftests/sched_ext/select_cpu_dispatch_bad_dsq.bpf.c
tools/testing/selftests/sched_ext/select_cpu_dispatch_dbl_dsp.bpf.c
tools/testing/selftests/sched_ext/select_cpu_vtime.bpf.c

index 37d9bf6fb7458d3ddbfd3de74b3d9f1775574664..6f4c3f5a1c5d99a3f14debc30810e5fe220f91e6 100644 (file)
@@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(ddsp_bogus_dsq_fail_select_cpu, struct task_struct *p,
                 * If we dispatch to a bogus DSQ that will fall back to the
                 * builtin global DSQ, we fail gracefully.
                 */
-               scx_bpf_dispatch_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
+               scx_bpf_dsq_insert_vtime(p, 0xcafef00d, SCX_SLICE_DFL,
                                       p->scx.dsq_vtime, 0);
                return cpu;
        }
index dffc97d9cdf141beffdeecce79f9e8751b0a3a52..e4a55027778fd08b5e769c37291810462481f952 100644 (file)
@@ -17,8 +17,8 @@ s32 BPF_STRUCT_OPS(ddsp_vtimelocal_fail_select_cpu, struct task_struct *p,
 
        if (cpu >= 0) {
                /* Shouldn't be allowed to vtime dispatch to a builtin DSQ. */
-               scx_bpf_dispatch_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
-                                      p->scx.dsq_vtime, 0);
+               scx_bpf_dsq_insert_vtime(p, SCX_DSQ_LOCAL, SCX_SLICE_DFL,
+                                        p->scx.dsq_vtime, 0);
                return cpu;
        }
 
index 6a7db1502c29e1d8a1bca1f224c10c25540e5732..6325bf76f47ee439b062dfcbaae4e72edf0bbfa1 100644 (file)
@@ -45,7 +45,7 @@ void BPF_STRUCT_OPS(dsp_local_on_dispatch, s32 cpu, struct task_struct *prev)
 
        target = bpf_get_prandom_u32() % nr_cpus;
 
-       scx_bpf_dispatch(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
+       scx_bpf_dsq_insert(p, SCX_DSQ_LOCAL_ON | target, SCX_SLICE_DFL, 0);
        bpf_task_release(p);
 }
 
index 1efb50d61040ad3832d5cea89c11200593691069..a7cf868d5e311d8f9e7649657a53e63237b0e516 100644 (file)
@@ -31,7 +31,7 @@ void BPF_STRUCT_OPS(enq_select_cpu_fails_enqueue, struct task_struct *p,
        /* Can only call from ops.select_cpu() */
        scx_bpf_select_cpu_dfl(p, 0, 0, &found);
 
-       scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+       scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
 }
 
 SEC(".struct_ops.link")
index d75d4faf07f6d5690801695288306c358e3a5e84..4bc36182d3ffc2fb10e4e2c4eb92fccbfd7fec41 100644 (file)
@@ -33,7 +33,7 @@ void BPF_STRUCT_OPS(exit_enqueue, struct task_struct *p, u64 enq_flags)
        if (exit_point == EXIT_ENQUEUE)
                EXIT_CLEANLY();
 
-       scx_bpf_dispatch(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
+       scx_bpf_dsq_insert(p, DSQ_ID, SCX_SLICE_DFL, enq_flags);
 }
 
 void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
@@ -41,7 +41,7 @@ void BPF_STRUCT_OPS(exit_dispatch, s32 cpu, struct task_struct *p)
        if (exit_point == EXIT_DISPATCH)
                EXIT_CLEANLY();
 
-       scx_bpf_consume(DSQ_ID);
+       scx_bpf_dsq_move_to_local(DSQ_ID);
 }
 
 void BPF_STRUCT_OPS(exit_enable, struct task_struct *p)
index 4d4cd8d966dba646f2718f8ab126726b7c3ad375..4c005fa718103bf31326e8ca7c6ccf43d7f8a97e 100644 (file)
@@ -20,7 +20,7 @@ s32 BPF_STRUCT_OPS(maximal_select_cpu, struct task_struct *p, s32 prev_cpu,
 
 void BPF_STRUCT_OPS(maximal_enqueue, struct task_struct *p, u64 enq_flags)
 {
-       scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+       scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
 }
 
 void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
@@ -28,7 +28,7 @@ void BPF_STRUCT_OPS(maximal_dequeue, struct task_struct *p, u64 deq_flags)
 
 void BPF_STRUCT_OPS(maximal_dispatch, s32 cpu, struct task_struct *prev)
 {
-       scx_bpf_consume(SCX_DSQ_GLOBAL);
+       scx_bpf_dsq_move_to_local(SCX_DSQ_GLOBAL);
 }
 
 void BPF_STRUCT_OPS(maximal_runnable, struct task_struct *p, u64 enq_flags)
index f171ac47097060be0ff3a72ef13385cc00502126..13d0f5be788d1206f28a15c8415679c52fb74989 100644 (file)
@@ -30,7 +30,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_enqueue, struct task_struct *p,
        }
        scx_bpf_put_idle_cpumask(idle_mask);
 
-       scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
+       scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, enq_flags);
 }
 
 SEC(".struct_ops.link")
index 9efdbb7da92887d0eec403e130783bd7b106e88e..815f1d5d61ac4352c8952332f7ce9cadfcd75345 100644 (file)
@@ -67,7 +67,7 @@ void BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_enqueue, struct task_struct *p,
                saw_local = true;
        }
 
-       scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, enq_flags);
+       scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, enq_flags);
 }
 
 s32 BPF_STRUCT_OPS(select_cpu_dfl_nodispatch_init_task,
index 59bfc4f36167a704c8980304dc09fc7a51ff1456..4bb99699e9209c722f361a53fbe1953803236d4f 100644 (file)
@@ -29,7 +29,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_select_cpu, struct task_struct *p,
        cpu = prev_cpu;
 
 dispatch:
-       scx_bpf_dispatch(p, dsq_id, SCX_SLICE_DFL, 0);
+       scx_bpf_dsq_insert(p, dsq_id, SCX_SLICE_DFL, 0);
        return cpu;
 }
 
index 3bbd5fcdfb18e0aa64a3a68b79d6db4573218d03..2a75de11b2cfd59b86ce6ed36ee676b4655708ba 100644 (file)
@@ -18,7 +18,7 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_bad_dsq_select_cpu, struct task_struct *p
                   s32 prev_cpu, u64 wake_flags)
 {
        /* Dispatching to a random DSQ should fail. */
-       scx_bpf_dispatch(p, 0xcafef00d, SCX_SLICE_DFL, 0);
+       scx_bpf_dsq_insert(p, 0xcafef00d, SCX_SLICE_DFL, 0);
 
        return prev_cpu;
 }
index 0fda57fe0ecfaecc5a5cfaebe2589f563f7ad579..99d075695c9743a4b817e48263f4467b8024ff9f 100644 (file)
@@ -18,8 +18,8 @@ s32 BPF_STRUCT_OPS(select_cpu_dispatch_dbl_dsp_select_cpu, struct task_struct *p
                   s32 prev_cpu, u64 wake_flags)
 {
        /* Dispatching twice in a row is disallowed. */
-       scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
-       scx_bpf_dispatch(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+       scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
+       scx_bpf_dsq_insert(p, SCX_DSQ_GLOBAL, SCX_SLICE_DFL, 0);
 
        return prev_cpu;
 }
index e6c67bcf5e6e35f82e47ce139476a375f509c2cb..bfcb96cd4954bdd65b3c5f8217eb145aafd9010e 100644 (file)
@@ -2,8 +2,8 @@
 /*
  * A scheduler that validates that enqueue flags are properly stored and
  * applied at dispatch time when a task is directly dispatched from
- * ops.select_cpu(). We validate this by using scx_bpf_dispatch_vtime(), and
- * making the test a very basic vtime scheduler.
+ * ops.select_cpu(). We validate this by using scx_bpf_dsq_insert_vtime(),
+ * and making the test a very basic vtime scheduler.
  *
  * Copyright (c) 2024 Meta Platforms, Inc. and affiliates.
  * Copyright (c) 2024 David Vernet <dvernet@meta.com>
@@ -47,13 +47,13 @@ s32 BPF_STRUCT_OPS(select_cpu_vtime_select_cpu, struct task_struct *p,
        cpu = prev_cpu;
        scx_bpf_test_and_clear_cpu_idle(cpu);
 ddsp:
-       scx_bpf_dispatch_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
+       scx_bpf_dsq_insert_vtime(p, VTIME_DSQ, SCX_SLICE_DFL, task_vtime(p), 0);
        return cpu;
 }
 
 void BPF_STRUCT_OPS(select_cpu_vtime_dispatch, s32 cpu, struct task_struct *p)
 {
-       if (scx_bpf_consume(VTIME_DSQ))
+       if (scx_bpf_dsq_move_to_local(VTIME_DSQ))
                consumed = true;
 }