Add a new selftest suite `exe_ctx` to verify the accuracy of the
bpf_in_task(), bpf_in_hardirq(), and bpf_in_serving_softirq() helpers
introduced in bpf_experimental.h.
Testing these execution contexts deterministically requires crossing
context boundaries within a single CPU. To achieve this, the test
implements a "Trigger-Observer" pattern using bpf_testmod:
1. Trigger: A BPF syscall program calls a new bpf_testmod kfunc
bpf_kfunc_trigger_ctx_check().
2. Task to HardIRQ: The kfunc uses irq_work_queue() to trigger a
self-IPI on the local CPU.
3. HardIRQ to SoftIRQ: The irq_work handler calls a dummy function
(observed by BPF fentry) and then schedules a tasklet to
transition into SoftIRQ context.
The user-space runner ensures determinism by pinning itself to CPU 0
before execution, forcing the entire interrupt chain to remain on a
single core. Dummy noinline functions with compiler barriers are
added to bpf_testmod.c to serve as stable attachment points for
fentry programs. A retry loop is used in user-space to wait for the
asynchronous SoftIRQ to complete.
Note that testing on s390x is avoided because supporting those helpers
purely in BPF on s390x is not possible at this point.
Reviewed-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Changwoo Min <changwoo@igalia.com>
Link: https://lore.kernel.org/r/20260125115413.117502-3-changwoo@igalia.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
# TEMPORARY
# Alphabetical order
+exe_ctx # execution context check (e.g., hardirq, softirq, etc)
get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace)
stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?)
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2026 Valve Corporation.
+ * Author: Changwoo Min <changwoo@igalia.com>
+ */
+
+#include <test_progs.h>
+#include <sys/syscall.h>
+#include "test_ctx.skel.h"
+
+void test_exe_ctx(void)
+{
+ LIBBPF_OPTS(bpf_test_run_opts, opts);
+ cpu_set_t old_cpuset, target_cpuset;
+ struct test_ctx *skel;
+ int err, prog_fd;
+
+ /* 1. Pin the current process to CPU 0. */
+ if (sched_getaffinity(0, sizeof(old_cpuset), &old_cpuset) == 0) {
+ CPU_ZERO(&target_cpuset);
+ CPU_SET(0, &target_cpuset);
+ ASSERT_OK(sched_setaffinity(0, sizeof(target_cpuset),
+ &target_cpuset), "setaffinity");
+ }
+
+ skel = test_ctx__open_and_load();
+ if (!ASSERT_OK_PTR(skel, "skel_load"))
+ goto restore_affinity;
+
+ err = test_ctx__attach(skel);
+ if (!ASSERT_OK(err, "skel_attach"))
+ goto cleanup;
+
+ /* 2. When we run this, the kernel will execute the BPF prog on CPU 0. */
+ prog_fd = bpf_program__fd(skel->progs.trigger_all_contexts);
+ err = bpf_prog_test_run_opts(prog_fd, &opts);
+ ASSERT_OK(err, "test_run_trigger");
+
+ /* 3. Wait for the local CPU's softirq/tasklet to finish. */
+ for (int i = 0; i < 1000; i++) {
+ if (skel->bss->count_task > 0 &&
+ skel->bss->count_hardirq > 0 &&
+ skel->bss->count_softirq > 0)
+ break;
+ usleep(1000); /* Wait 1ms per iteration, up to 1 sec total */
+ }
+
+ /* On CPU 0, these should now all be non-zero. */
+ ASSERT_GT(skel->bss->count_task, 0, "task_ok");
+ ASSERT_GT(skel->bss->count_hardirq, 0, "hardirq_ok");
+ ASSERT_GT(skel->bss->count_softirq, 0, "softirq_ok");
+
+cleanup:
+ test_ctx__destroy(skel);
+
+restore_affinity:
+ ASSERT_OK(sched_setaffinity(0, sizeof(old_cpuset), &old_cpuset),
+ "restore_affinity");
+}
--- /dev/null
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2026 Valve Corporation.
+ * Author: Changwoo Min <changwoo@igalia.com>
+ */
+
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_experimental.h"
+
+char _license[] SEC("license") = "GPL";
+
+extern void bpf_kfunc_trigger_ctx_check(void) __ksym;
+
+int count_hardirq;
+int count_softirq;
+int count_task;
+
+/* Triggered via bpf_prog_test_run from user-space */
+SEC("syscall")
+int trigger_all_contexts(void *ctx)
+{
+ if (bpf_in_task())
+ __sync_fetch_and_add(&count_task, 1);
+
+ /* Trigger the firing of a hardirq and softirq for test. */
+ bpf_kfunc_trigger_ctx_check();
+ return 0;
+}
+
+/* Observer for HardIRQ */
+SEC("fentry/bpf_testmod_test_hardirq_fn")
+int BPF_PROG(on_hardirq)
+{
+ if (bpf_in_hardirq())
+ __sync_fetch_and_add(&count_hardirq, 1);
+ return 0;
+}
+
+/* Observer for SoftIRQ */
+SEC("fentry/bpf_testmod_test_softirq_fn")
+int BPF_PROG(on_softirq)
+{
+ if (bpf_in_serving_softirq())
+ __sync_fetch_and_add(&count_softirq, 1);
+ return 0;
+}
__bpf_kfunc int bpf_kfunc_implicit_arg_legacy(int a, int b, struct bpf_prog_aux *aux);
__bpf_kfunc int bpf_kfunc_implicit_arg_legacy_impl(int a, int b, struct bpf_prog_aux *aux);
+/* hook targets */
+noinline void bpf_testmod_test_hardirq_fn(void) { barrier(); }
+noinline void bpf_testmod_test_softirq_fn(void) { barrier(); }
+
+/* Tasklet for SoftIRQ context */
+static void ctx_check_tasklet_fn(struct tasklet_struct *t)
+{
+ bpf_testmod_test_softirq_fn();
+}
+
+DECLARE_TASKLET(ctx_check_tasklet, ctx_check_tasklet_fn);
+
+/* IRQ Work for HardIRQ context */
+static void ctx_check_irq_fn(struct irq_work *work)
+{
+ bpf_testmod_test_hardirq_fn();
+ tasklet_schedule(&ctx_check_tasklet);
+}
+
+static struct irq_work ctx_check_irq = IRQ_WORK_INIT_HARD(ctx_check_irq_fn);
+
+/* The kfunc trigger */
+__bpf_kfunc void bpf_kfunc_trigger_ctx_check(void)
+{
+ irq_work_queue(&ctx_check_irq);
+}
+
BTF_KFUNCS_START(bpf_testmod_check_kfunc_ids)
BTF_ID_FLAGS(func, bpf_testmod_test_mod_kfunc)
BTF_ID_FLAGS(func, bpf_kfunc_call_test1)
BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy, KF_IMPLICIT_ARGS)
BTF_ID_FLAGS(func, bpf_kfunc_implicit_arg_legacy_impl)
+BTF_ID_FLAGS(func, bpf_kfunc_trigger_ctx_check)
BTF_KFUNCS_END(bpf_testmod_check_kfunc_ids)
static int bpf_testmod_ops_init(struct btf *btf)
while (refcount_read(&prog_test_struct.cnt) > 1)
msleep(20);
+ /* Clean up irqwork and tasklet */
+ irq_work_sync(&ctx_check_irq);
+ tasklet_kill(&ctx_check_tasklet);
+
bpf_kfunc_close_sock();
sysfs_remove_bin_file(kernel_kobj, &bin_attr_bpf_testmod_file);
unregister_bpf_testmod_uprobe();
struct prog_test_member *bpf_kfunc_get_default_trusted_ptr_test(void) __ksym;
void bpf_kfunc_put_default_trusted_ptr_test(struct prog_test_member *trusted_ptr) __ksym;
+void bpf_testmod_test_hardirq_fn(void);
+void bpf_testmod_test_softirq_fn(void);
+void bpf_kfunc_trigger_ctx_check(void) __ksym;
+
#endif /* _BPF_TESTMOD_KFUNC_H */