]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
selftests/bpf: Clobber a lot of registers in tailcall_bpf2bpf_hierarchy tests
authorIlya Leoshkevich <iii@linux.ibm.com>
Wed, 13 Aug 2025 12:06:31 +0000 (14:06 +0200)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 18 Aug 2025 13:08:30 +0000 (15:08 +0200)
Clobbering a lot of registers and stack slots helps exposing tail call
counter overwrite bugs in JITs.

Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20250813121016.163375-5-iii@linux.ibm.com
tools/testing/selftests/bpf/progs/bpf_test_utils.h [new file with mode: 0644]
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy1.c
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy2.c
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy3.c
tools/testing/selftests/bpf/progs/tailcall_bpf2bpf_hierarchy_fentry.c

diff --git a/tools/testing/selftests/bpf/progs/bpf_test_utils.h b/tools/testing/selftests/bpf/progs/bpf_test_utils.h
new file mode 100644 (file)
index 0000000..f4e67b4
--- /dev/null
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __BPF_TEST_UTILS_H__
+#define __BPF_TEST_UTILS_H__
+
+#include <bpf/bpf_helpers.h>
+#include "bpf_misc.h"
+
+/* Clobber as many native registers and stack slots as possible. */
+static __always_inline void clobber_regs_stack(void)
+{
+       char tmp_str[] = "123456789";
+       unsigned long tmp;
+
+       bpf_strtoul(tmp_str, sizeof(tmp_str), 0, &tmp);
+       __sink(tmp);
+}
+
+#endif
index 327ca395e8601a75899d3aed5767ce9c86c260ec..d556b19413d7b7690bb4fe92cf3d3938056a2f55 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
 #include "bpf_legacy.h"
+#include "bpf_test_utils.h"
 
 struct {
        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
@@ -24,6 +25,8 @@ int entry(struct __sk_buff *skb)
 {
        int ret = 1;
 
+       clobber_regs_stack();
+
        count++;
        subprog_tail(skb);
        subprog_tail(skb);
index 72fd0d577506a21f41be92f083a3fed4711f73af..ae94c9c70ab7d5e54a90fafb16283c988f441f52 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
 #include "bpf_misc.h"
+#include "bpf_test_utils.h"
 
 int classifier_0(struct __sk_buff *skb);
 int classifier_1(struct __sk_buff *skb);
@@ -60,6 +61,8 @@ int tailcall_bpf2bpf_hierarchy_2(struct __sk_buff *skb)
 {
        int ret = 0;
 
+       clobber_regs_stack();
+
        subprog_tail0(skb);
        subprog_tail1(skb);
 
index a7fb91cb05b736d863fd57aeaf1275c3447578b4..56b6b009984072291fdbcc9df1b95baf90f454ce 100644 (file)
@@ -2,6 +2,7 @@
 #include <linux/bpf.h>
 #include <bpf/bpf_helpers.h>
 #include "bpf_misc.h"
+#include "bpf_test_utils.h"
 
 int classifier_0(struct __sk_buff *skb);
 
@@ -53,6 +54,8 @@ int tailcall_bpf2bpf_hierarchy_3(struct __sk_buff *skb)
 {
        int ret = 0;
 
+       clobber_regs_stack();
+
        bpf_tail_call_static(skb, &jmp_table0, 0);
 
        __sink(ret);
index c87f9ca982d3ee8d539f0556084e9fea674adf04..5261395713cd5856e6a690963e0aa386bb3386b8 100644 (file)
@@ -4,6 +4,7 @@
 #include "vmlinux.h"
 #include <bpf/bpf_helpers.h>
 #include <bpf/bpf_tracing.h>
+#include "bpf_test_utils.h"
 
 struct {
        __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
@@ -24,6 +25,8 @@ int subprog_tail(void *ctx)
 SEC("fentry/dummy")
 int BPF_PROG(fentry, struct sk_buff *skb)
 {
+       clobber_regs_stack();
+
        count++;
        subprog_tail(ctx);
        subprog_tail(ctx);