]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/sched_ext: Add test for scx_bpf_select_cpu_and() via test_run
authorAndrea Righi <arighi@nvidia.com>
Thu, 15 May 2025 19:11:45 +0000 (21:11 +0200)
committerTejun Heo <tj@kernel.org>
Tue, 20 May 2025 20:24:17 +0000 (10:24 -1000)
Update the allowed_cpus selftest to include a check to validate the
behavior of scx_bpf_select_cpu_and() when invoked via a BPF test_run
call.

Signed-off-by: Andrea Righi <arighi@nvidia.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
tools/testing/selftests/sched_ext/allowed_cpus.bpf.c
tools/testing/selftests/sched_ext/allowed_cpus.c

index 39d57f7f74099b0d3fcc2c941f6ffd093a781854..35923e74a2ec3d8e1ef417afca993aa2d3cabca5 100644 (file)
@@ -111,6 +111,29 @@ void BPF_STRUCT_OPS(allowed_cpus_exit, struct scx_exit_info *ei)
        UEI_RECORD(uei, ei);
 }
 
+struct task_cpu_arg {
+       pid_t pid;
+};
+
+SEC("syscall")
+int select_cpu_from_user(struct task_cpu_arg *input)
+{
+       struct task_struct *p;
+       int cpu;
+
+       p = bpf_task_from_pid(input->pid);
+       if (!p)
+               return -EINVAL;
+
+       bpf_rcu_read_lock();
+       cpu = scx_bpf_select_cpu_and(p, bpf_get_smp_processor_id(), 0, p->cpus_ptr, 0);
+       bpf_rcu_read_unlock();
+
+       bpf_task_release(p);
+
+       return cpu;
+}
+
 SEC(".struct_ops.link")
 struct sched_ext_ops allowed_cpus_ops = {
        .select_cpu             = (void *)allowed_cpus_select_cpu,
index a001a3a0e9f1f54f3df053f8709ee83def94a9ba..093f285ab4baef43118db847f4f6268620e99ac0 100644 (file)
@@ -23,6 +23,30 @@ static enum scx_test_status setup(void **ctx)
        return SCX_TEST_PASS;
 }
 
+static int test_select_cpu_from_user(const struct allowed_cpus *skel)
+{
+       int fd, ret;
+       __u64 args[1];
+
+       LIBBPF_OPTS(bpf_test_run_opts, attr,
+               .ctx_in = args,
+               .ctx_size_in = sizeof(args),
+       );
+
+       args[0] = getpid();
+       fd = bpf_program__fd(skel->progs.select_cpu_from_user);
+       if (fd < 0)
+               return fd;
+
+       ret = bpf_prog_test_run_opts(fd, &attr);
+       if (ret < 0)
+               return ret;
+
+       fprintf(stderr, "%s: CPU %d\n", __func__, attr.retval);
+
+       return 0;
+}
+
 static enum scx_test_status run(void *ctx)
 {
        struct allowed_cpus *skel = ctx;
@@ -31,6 +55,9 @@ static enum scx_test_status run(void *ctx)
        link = bpf_map__attach_struct_ops(skel->maps.allowed_cpus_ops);
        SCX_FAIL_IF(!link, "Failed to attach scheduler");
 
+       /* Pick an idle CPU from user-space */
+       SCX_FAIL_IF(test_select_cpu_from_user(skel), "Failed to pick idle CPU");
+
        /* Just sleeping is fine, plenty of scheduling events happening */
        sleep(1);