]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/bpf: Add stacktrace ips test for kprobe_multi/kretprobe_multi
authorJiri Olsa <jolsa@kernel.org>
Tue, 4 Nov 2025 21:54:04 +0000 (22:54 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 6 Nov 2025 01:05:19 +0000 (17:05 -0800)
Adding test that attaches kprobe/kretprobe multi and verifies the
ORC stacktrace matches expected functions.

Adding bpf_testmod_stacktrace_test function to bpf_testmod kernel
module which is called through several functions so we get reliable
call path for stacktrace.

The test is only for ORC unwinder to keep it simple.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20251104215405.168643-4-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/stacktrace_ips.c [new file with mode: 0644]
tools/testing/selftests/bpf/test_kmods/bpf_testmod.c

diff --git a/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c b/tools/testing/selftests/bpf/prog_tests/stacktrace_ips.c
new file mode 100644 (file)
index 0000000..6fca459
--- /dev/null
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <test_progs.h>
+#include "stacktrace_ips.skel.h"
+
+#ifdef __x86_64__
+static int check_stacktrace_ips(int fd, __u32 key, int cnt, ...)
+{
+       __u64 ips[PERF_MAX_STACK_DEPTH];
+       struct ksyms *ksyms = NULL;
+       int i, err = 0;
+       va_list args;
+
+       /* sorted by addr */
+       ksyms = load_kallsyms_local();
+       if (!ASSERT_OK_PTR(ksyms, "load_kallsyms_local"))
+               return -1;
+
+       /* unlikely, but... */
+       if (!ASSERT_LT(cnt, PERF_MAX_STACK_DEPTH, "check_max"))
+               return -1;
+
+       err = bpf_map_lookup_elem(fd, &key, ips);
+       if (err)
+               goto out;
+
+       /*
+        * Compare all symbols provided via arguments with stacktrace ips,
+        * and their related symbol addresses.t
+        */
+       va_start(args, cnt);
+
+       for (i = 0; i < cnt; i++) {
+               unsigned long val;
+               struct ksym *ksym;
+
+               val = va_arg(args, unsigned long);
+               ksym = ksym_search_local(ksyms, ips[i]);
+               if (!ASSERT_OK_PTR(ksym, "ksym_search_local"))
+                       break;
+               ASSERT_EQ(ksym->addr, val, "stack_cmp");
+       }
+
+       va_end(args);
+
+out:
+       free_kallsyms_local(ksyms);
+       return err;
+}
+
+static void test_stacktrace_ips_kprobe_multi(bool retprobe)
+{
+       LIBBPF_OPTS(bpf_kprobe_multi_opts, opts,
+               .retprobe = retprobe
+       );
+       LIBBPF_OPTS(bpf_test_run_opts, topts);
+       struct stacktrace_ips *skel;
+
+       skel = stacktrace_ips__open_and_load();
+       if (!ASSERT_OK_PTR(skel, "stacktrace_ips__open_and_load"))
+               return;
+
+       if (!skel->kconfig->CONFIG_UNWINDER_ORC) {
+               test__skip();
+               goto cleanup;
+       }
+
+       skel->links.kprobe_multi_test = bpf_program__attach_kprobe_multi_opts(
+                                                       skel->progs.kprobe_multi_test,
+                                                       "bpf_testmod_stacktrace_test", &opts);
+       if (!ASSERT_OK_PTR(skel->links.kprobe_multi_test, "bpf_program__attach_kprobe_multi_opts"))
+               goto cleanup;
+
+       trigger_module_test_read(1);
+
+       load_kallsyms();
+
+       check_stacktrace_ips(bpf_map__fd(skel->maps.stackmap), skel->bss->stack_key, 4,
+                            ksym_get_addr("bpf_testmod_stacktrace_test_3"),
+                            ksym_get_addr("bpf_testmod_stacktrace_test_2"),
+                            ksym_get_addr("bpf_testmod_stacktrace_test_1"),
+                            ksym_get_addr("bpf_testmod_test_read"));
+
+cleanup:
+       stacktrace_ips__destroy(skel);
+}
+
+static void __test_stacktrace_ips(void)
+{
+       if (test__start_subtest("kprobe_multi"))
+               test_stacktrace_ips_kprobe_multi(false);
+       if (test__start_subtest("kretprobe_multi"))
+               test_stacktrace_ips_kprobe_multi(true);
+}
+#else
+static void __test_stacktrace_ips(void)
+{
+       test__skip();
+}
+#endif
+
+void test_stacktrace_ips(void)
+{
+       __test_stacktrace_ips();
+}
diff --git a/tools/testing/selftests/bpf/progs/stacktrace_ips.c b/tools/testing/selftests/bpf/progs/stacktrace_ips.c
new file mode 100644 (file)
index 0000000..e2eb309
--- /dev/null
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018 Facebook
+
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+
+#ifndef PERF_MAX_STACK_DEPTH
+#define PERF_MAX_STACK_DEPTH         127
+#endif
+
+typedef __u64 stack_trace_t[PERF_MAX_STACK_DEPTH];
+
+struct {
+       __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+       __uint(max_entries, 16384);
+       __type(key, __u32);
+       __type(value, stack_trace_t);
+} stackmap SEC(".maps");
+
+extern bool CONFIG_UNWINDER_ORC __kconfig __weak;
+
+/*
+ * This function is here to have CONFIG_UNWINDER_ORC
+ * used and added to object BTF.
+ */
+int unused(void)
+{
+       return CONFIG_UNWINDER_ORC ? 0 : 1;
+}
+
+__u32 stack_key;
+
+SEC("kprobe.multi")
+int kprobe_multi_test(struct pt_regs *ctx)
+{
+       stack_key = bpf_get_stackid(ctx, &stackmap, 0);
+       return 0;
+}
+
+char _license[] SEC("license") = "GPL";
index 8074bc5f6f2004ab69781938da087292f3c8e267..ed0a4721d8fd59ba286ea5dbf49a6fe493c6272b 100644 (file)
@@ -417,6 +417,30 @@ noinline int bpf_testmod_fentry_test11(u64 a, void *b, short c, int d,
        return a + (long)b + c + d + (long)e + f + g + h + i + j + k;
 }
 
+noinline void bpf_testmod_stacktrace_test(void)
+{
+       /* used for stacktrace test as attach function */
+       asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_3(void)
+{
+       bpf_testmod_stacktrace_test();
+       asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_2(void)
+{
+       bpf_testmod_stacktrace_test_3();
+       asm volatile ("");
+}
+
+noinline void bpf_testmod_stacktrace_test_1(void)
+{
+       bpf_testmod_stacktrace_test_2();
+       asm volatile ("");
+}
+
 int bpf_testmod_fentry_ok;
 
 noinline ssize_t
@@ -497,6 +521,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj,
                        21, 22, 23, 24, 25, 26) != 231)
                goto out;
 
+       bpf_testmod_stacktrace_test_1();
+
        bpf_testmod_fentry_ok = 1;
 out:
        return -EIO; /* always fail */