]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched_ext: Update selftests to drop ops.cpu_acquire/release()
authorCheng-Yang Chou <yphbchou0911@gmail.com>
Sun, 15 Mar 2026 08:24:41 +0000 (16:24 +0800)
committerTejun Heo <tj@kernel.org>
Sun, 15 Mar 2026 08:54:05 +0000 (22:54 -1000)
ops.cpu_acquire/release() are deprecated by commit a3f5d4822253
("sched_ext: Allow scx_bpf_reenqueue_local() to be called from
anywhere") in favor of handling CPU preemption via the sched_switch
tracepoint.

In the maximal selftest, replace the cpu_acquire/release stubs with a
minimal sched_switch TP program. Attach all non-struct_ops programs
(including the new TP) via maximal__attach() after disabling auto-attach
for the maximal_ops struct_ops map, which is managed manually in run().

Apply the same fix to reload_loop, which also uses the maximal skeleton.

Signed-off-by: Cheng-Yang Chou <yphbchou0911@gmail.com>
Reviewed-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
tools/testing/selftests/sched_ext/maximal.bpf.c
tools/testing/selftests/sched_ext/maximal.c
tools/testing/selftests/sched_ext/reload_loop.c

index a3aabeb82e6be2d2216d2f733ea0fa7985e630ce..04a369078aac4c0269bdef8eb769d95df21d02d9 100644 (file)
@@ -67,13 +67,12 @@ void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p,
 void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle)
 {}
 
-void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu,
-                   struct scx_cpu_acquire_args *args)
-{}
-
-void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu,
-                   struct scx_cpu_release_args *args)
-{}
+SEC("tp_btf/sched_switch")
+int BPF_PROG(maximal_sched_switch, bool preempt, struct task_struct *prev,
+            struct task_struct *next, unsigned int prev_state)
+{
+       return 0;
+}
 
 void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu)
 {}
@@ -150,8 +149,6 @@ struct sched_ext_ops maximal_ops = {
        .set_weight             = (void *) maximal_set_weight,
        .set_cpumask            = (void *) maximal_set_cpumask,
        .update_idle            = (void *) maximal_update_idle,
-       .cpu_acquire            = (void *) maximal_cpu_acquire,
-       .cpu_release            = (void *) maximal_cpu_release,
        .cpu_online             = (void *) maximal_cpu_online,
        .cpu_offline            = (void *) maximal_cpu_offline,
        .init_task              = (void *) maximal_init_task,
index c6be50a9941d55aa8994c69456e03a087e5b75fc..1dc3692246705009c7b850a0b32812a57fa9c550 100644 (file)
@@ -19,6 +19,9 @@ static enum scx_test_status setup(void **ctx)
        SCX_ENUM_INIT(skel);
        SCX_FAIL_IF(maximal__load(skel), "Failed to load skel");
 
+       bpf_map__set_autoattach(skel->maps.maximal_ops, false);
+       SCX_FAIL_IF(maximal__attach(skel), "Failed to attach skel");
+
        *ctx = skel;
 
        return SCX_TEST_PASS;
index 308211d804364aae76331638a25171ac764a97d2..49297b83d748d875485ae1d5e6eea0e84e60ae13 100644 (file)
@@ -23,6 +23,9 @@ static enum scx_test_status setup(void **ctx)
        SCX_ENUM_INIT(skel);
        SCX_FAIL_IF(maximal__load(skel), "Failed to load skel");
 
+       bpf_map__set_autoattach(skel->maps.maximal_ops, false);
+       SCX_FAIL_IF(maximal__attach(skel), "Failed to attach skel");
+
        return SCX_TEST_PASS;
 }