]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
selftests/bpf: Add tests for execution context helpers
authorChangwoo Min <changwoo@igalia.com>
Sun, 25 Jan 2026 11:54:13 +0000 (20:54 +0900)
committerAlexei Starovoitov <ast@kernel.org>
Sun, 25 Jan 2026 16:20:50 +0000 (08:20 -0800)
Add a new selftest suite `exe_ctx` to verify the accuracy of the
bpf_in_task(), bpf_in_hardirq(), and bpf_in_serving_softirq() helpers
introduced in bpf_experimental.h.

Testing these execution contexts deterministically requires crossing
context boundaries within a single CPU. To achieve this, the test
implements a "Trigger-Observer" pattern using bpf_testmod:

1. Trigger: A BPF syscall program calls a new bpf_testmod kfunc
   bpf_kfunc_trigger_ctx_check().
2. Task to HardIRQ: The kfunc uses irq_work_queue() to trigger a
   self-IPI on the local CPU.
3. HardIRQ to SoftIRQ: The irq_work handler calls a dummy function
   (observed by BPF fentry) and then schedules a tasklet to
   transition into SoftIRQ context.

The user-space runner ensures determinism by pinning itself to CPU 0
before execution, forcing the entire interrupt chain to remain on a
single core. Dummy noinline functions with compiler barriers are
added to bpf_testmod.c to serve as stable attachment points for
fentry programs. A retry loop is used in user-space to wait for the
asynchronous SoftIRQ to complete.

Note that testing on s390x is avoided because supporting those helpers
purely in BPF on s390x is not possible at this point.

Reviewed-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Changwoo Min <changwoo@igalia.com>
Link: https://lore.kernel.org/r/20260125115413.117502-3-changwoo@igalia.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/DENYLIST.s390x
tools/testing/selftests/bpf/prog_tests/exe_ctx.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/test_ctx.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c
tools/testing/selftests/bpf/test_kmods/bpf_testmod_kfunc.h

index a17baf8c6fd75ac044b6dd942d66b4c612bdd161..f7e1e5f5511cc59cab206dd9649492382252fffd 100644 (file)
@@ -1,4 +1,5 @@
 # TEMPORARY
 # Alphabetical order
+exe_ctx                                  # execution context check (e.g., hardirq, softirq, etc)
 get_stack_raw_tp                         # user_stack corrupted user stack                                             (no backchain userspace)
 stacktrace_build_id                      # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2                   (?)
diff --git a/tools/testing/selftests/bpf/prog_tests/exe_ctx.c b/tools/testing/selftests/bpf/prog_tests/exe_ctx.c
new file mode 100644 (file)
index 0000000..aed6a6e
--- /dev/null
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2026 Valve Corporation.
+ * Author: Changwoo Min <changwoo@igalia.com>
+ */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "test_ctx.skel.h"
+
+void test_exe_ctx(void)
+{
+       LIBBPF_OPTS(bpf_test_run_opts, opts);
+       cpu_set_t old_cpuset, target_cpuset;
+       struct test_ctx *skel;
+       int err, prog_fd;
+
+       /* 1. Pin the current process to CPU 0. */
+       if (sched_getaffinity(0, sizeof(old_cpuset), &old_cpuset) == 0) {
+               CPU_ZERO(&target_cpuset);
+               CPU_SET(0, &target_cpuset);
+               ASSERT_OK(sched_setaffinity(0, sizeof(target_cpuset),
+                                           &target_cpuset), "setaffinity");
+       }
+
+       skel = test_ctx__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "skel_load"))
+               goto restore_affinity;
+
+       err = test_ctx__attach(skel);
+       if (!ASSERT_OK(err, "skel_attach"))
+               goto cleanup;
+
+       /* 2. When we run this, the kernel will execute the BPF prog on CPU 0. */
+       prog_fd = bpf_program__fd(skel->progs.trigger_all_contexts);
+       err = bpf_prog_test_run_opts(prog_fd, &opts);
+       ASSERT_OK(err, "test_run_trigger");
+
+       /* 3. Wait for the local CPU's softirq/tasklet to finish. */
+       for (int i = 0; i < 1000; i++) {
+               if (skel->bss->count_task > 0 &&
+                   skel->bss->count_hardirq > 0 &&
+                   skel->bss->count_softirq > 0)
+                       break;
+               usleep(1000); /* Wait 1ms per iteration, up to 1 sec total */
+       }
+
+       /* On CPU 0, these should now all be non-zero. */
+       ASSERT_GT(skel->bss->count_task, 0, "task_ok");
+       ASSERT_GT(skel->bss->count_hardirq, 0, "hardirq_ok");
+       ASSERT_GT(skel->bss->count_softirq, 0, "softirq_ok");
+
+cleanup:
+       test_ctx__destroy(skel);
+
+restore_affinity:
+       ASSERT_OK(sched_setaffinity(0, sizeof(old_cpuset), &old_cpuset),
+                 "restore_affinity");
+}
diff --git a/tools/testing/selftests/bpf/progs/test_ctx.c b/tools/testing/selftests/bpf/progs/test_ctx.c
new file mode 100644 (file)
index 0000000..7d49955
--- /dev/null
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2026 Valve Corporation.
+ * Author: Changwoo Min <changwoo@igalia.com>
+ */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern void bpf_kfunc_trigger_ctx_check(void) __ksym;
+
+int count_hardirq;
+int count_softirq;
+int count_task;
+
+/* Triggered via bpf_prog_test_run from user-space */
+SEC("syscall")
+int trigger_all_contexts(void *ctx)
+{
+       if (bpf_in_task())
+               __sync_fetch_and_add(&count_task, 1);
+
+       /* Trigger the firing of a hardirq and softirq for test. */
+       bpf_kfunc_trigger_ctx_check();
+       return 0;
+}
+
+/* Observer for HardIRQ */
+SEC("fentry/bpf_testmod_test_hardirq_fn")
+int BPF_PROG(on_hardirq)
+{
+       if (bpf_in_hardirq())
+               __sync_fetch_and_add(&count_hardirq, 1);
+       return 0;
+}
+
+/* Observer for SoftIRQ */
+SEC("fentry/bpf_testmod_test_softirq_fn")
+int BPF_PROG(on_softirq)
+{
+       if (bpf_in_serving_softirq())
+               __sync_fetch_and_add(&count_softirq, 1);
+       return 0;
+}
index 77a81fa8ec6a3165cfec927de8ed062fa3118ea6..186a25ab429a68f6772a6bf062599201c137ffad 100644 (file)
@@ -1168,6 +1168,33 @@ __bpf_kfunc int bpf_kfunc_implicit_arg(int a, struct bpf_prog_aux *aux);
 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux);
 __bpf_kfunc int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux);
 
+/* hook targets */
+noinline void bpf_testmod_test_hardirq_fn(void) { barrier(); }
+noinline void bpf_testmod_test_softirq_fn(void) { barrier(); }
+
+/* Tasklet for SoftIRQ context */
+static void ctx_check_tasklet_fn(struct tasklet_struct *t)
+{
+       bpf_testmod_test_softirq_fn();
+}
+
+DECLARE_TASKLET(ctx_check_tasklet, ctx_check_tasklet_fn);
+
+/* IRQ Work for HardIRQ context */
+static void ctx_check_irq_fn(struct irq_work *work)
+{
+       bpf_testmod_test_hardirq_fn();
+       tasklet_schedule(&ctx_check_tasklet);
+}
+
+static struct irq_work ctx_check_irq = IRQ_WORK_INIT_HARD(ctx_check_irq_fn);
+
+/* The kfunc trigger */
+__bpf_kfunc void bpf_kfunc_trigger_ctx_check(void)
+{
+       irq_work_queue(&ctx_check_irq);
+}
+
 BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
 BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
 BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
@@ -1213,6 +1240,7 @@ BTF_ID_FLAGS(func, bpf_kfunc_multi_st_ops_test_1_assoc, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy_impl)
+BTF_ID_FLAGS(func, bpf_kfunc_trigger_ctx_check)
 BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
 
 static int bpf_testmod_ops_init(struct btf *btf)
@@ -1844,6 +1872,10 @@ static void bpf_testmod_exit(void)
        while (refcount_read(&prog_test_struct.cnt) > 1)
                msleep(20);
 
+       /* Clean up irqwork and tasklet */
+       irq_work_sync(&ctx_check_irq);
+       tasklet_kill(&ctx_check_tasklet);
+
        bpf_kfunc_close_sock();
        sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
        unregister_bpf_testmod_uprobe();
index 10f89f06245f16c542236802b75ecd6805b70f13..d5c5454e257ecb296211fcb1b315e45b1171a17e 100644 (file)
@@ -169,4 +169,8 @@ extern int bpf_kfunc_multi_st_ops_test_1_assoc(struct st_ops_args *args) __weak
 struct prog_test_member *bpf_kfunc_get_default_trusted_ptr_test(void) __ksym;
 void bpf_kfunc_put_default_trusted_ptr_test(struct prog_test_member *trusted_ptr) __ksym;
 
+void bpf_testmod_test_hardirq_fn(void);
+void bpf_testmod_test_softirq_fn(void);
+void bpf_kfunc_trigger_ctx_check(void) __ksym;
+
 #endif /* _BPF_TESTMOD_KFUNC_H */