From: Cheng-Yang Chou Date: Sun, 15 Mar 2026 08:24:41 +0000 (+0800) Subject: sched_ext: Update selftests to drop ops.cpu_acquire/release() X-Git-Url: http://git.ipfire.org/gitweb.cgi?a=commitdiff_plain;h=f96bc0fa92be8dc0ec97bbe5bec6d5df26f9585b;p=thirdparty%2Flinux.git sched_ext: Update selftests to drop ops.cpu_acquire/release() ops.cpu_acquire/release() are deprecated by commit a3f5d4822253 ("sched_ext: Allow scx_bpf_reenqueue_local() to be called from anywhere") in favor of handling CPU preemption via the sched_switch tracepoint. In the maximal selftest, replace the cpu_acquire/release stubs with a minimal sched_switch TP program. Attach all non-struct_ops programs (including the new TP) via maximal__attach() after disabling auto-attach for the maximal_ops struct_ops map, which is managed manually in run(). Apply the same fix to reload_loop, which also uses the maximal skeleton. Signed-off-by: Cheng-Yang Chou Reviewed-by: Andrea Righi Signed-off-by: Tejun Heo --- diff --git a/tools/testing/selftests/sched_ext/maximal.bpf.c b/tools/testing/selftests/sched_ext/maximal.bpf.c index a3aabeb82e6be..04a369078aac4 100644 --- a/tools/testing/selftests/sched_ext/maximal.bpf.c +++ b/tools/testing/selftests/sched_ext/maximal.bpf.c @@ -67,13 +67,12 @@ void BPF_STRUCT_OPS(maximal_set_cpumask, struct task_struct *p, void BPF_STRUCT_OPS(maximal_update_idle, s32 cpu, bool idle) {} -void BPF_STRUCT_OPS(maximal_cpu_acquire, s32 cpu, - struct scx_cpu_acquire_args *args) -{} - -void BPF_STRUCT_OPS(maximal_cpu_release, s32 cpu, - struct scx_cpu_release_args *args) -{} +SEC("tp_btf/sched_switch") +int BPF_PROG(maximal_sched_switch, bool preempt, struct task_struct *prev, + struct task_struct *next, unsigned int prev_state) +{ + return 0; +} void BPF_STRUCT_OPS(maximal_cpu_online, s32 cpu) {} @@ -150,8 +149,6 @@ struct sched_ext_ops maximal_ops = { .set_weight = (void *) maximal_set_weight, .set_cpumask = (void *) maximal_set_cpumask, .update_idle = (void *) maximal_update_idle, - .cpu_acquire = (void *) maximal_cpu_acquire, - .cpu_release = (void *) maximal_cpu_release, .cpu_online = (void *) maximal_cpu_online, .cpu_offline = (void *) maximal_cpu_offline, .init_task = (void *) maximal_init_task, diff --git a/tools/testing/selftests/sched_ext/maximal.c b/tools/testing/selftests/sched_ext/maximal.c index c6be50a9941d5..1dc3692246705 100644 --- a/tools/testing/selftests/sched_ext/maximal.c +++ b/tools/testing/selftests/sched_ext/maximal.c @@ -19,6 +19,9 @@ static enum scx_test_status setup(void **ctx) SCX_ENUM_INIT(skel); SCX_FAIL_IF(maximal__load(skel), "Failed to load skel"); + bpf_map__set_autoattach(skel->maps.maximal_ops, false); + SCX_FAIL_IF(maximal__attach(skel), "Failed to attach skel"); + *ctx = skel; return SCX_TEST_PASS; diff --git a/tools/testing/selftests/sched_ext/reload_loop.c b/tools/testing/selftests/sched_ext/reload_loop.c index 308211d804364..49297b83d748d 100644 --- a/tools/testing/selftests/sched_ext/reload_loop.c +++ b/tools/testing/selftests/sched_ext/reload_loop.c @@ -23,6 +23,9 @@ static enum scx_test_status setup(void **ctx) SCX_ENUM_INIT(skel); SCX_FAIL_IF(maximal__load(skel), "Failed to load skel"); + bpf_map__set_autoattach(skel->maps.maximal_ops, false); + SCX_FAIL_IF(maximal__attach(skel), "Failed to attach skel"); + return SCX_TEST_PASS; }