]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/bpf: Test epilogue patching when the main prog has multiple BPF_EXIT
authorMartin KaFai Lau <martin.lau@kernel.org>
Thu, 29 Aug 2024 21:08:31 +0000 (14:08 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Fri, 30 Aug 2024 01:15:45 +0000 (18:15 -0700)
This patch tests the epilogue patching when the main prog has
multiple BPF_EXIT. The verifier should have patched the 2nd (and
later) BPF_EXIT with a BPF_JA that goes back to the earlier
patched epilogue instructions.

Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Link: https://lore.kernel.org/r/20240829210833.388152-10-martin.lau@linux.dev
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/pro_epilogue.c
tools/testing/selftests/bpf/progs/epilogue_exit.c [new file with mode: 0644]

index f974ae9ac610c60c6eb7b04f44a5b0d7f48031be..509883e6823a98413c306154395e7e4f9e0d27ad 100644 (file)
@@ -5,6 +5,7 @@
 #include "pro_epilogue.skel.h"
 #include "epilogue_tailcall.skel.h"
 #include "pro_epilogue_goto_start.skel.h"
+#include "epilogue_exit.skel.h"
 
 struct st_ops_args {
        __u64 a;
@@ -53,6 +54,7 @@ void test_pro_epilogue(void)
 {
        RUN_TESTS(pro_epilogue);
        RUN_TESTS(pro_epilogue_goto_start);
+       RUN_TESTS(epilogue_exit);
        if (test__start_subtest("tailcall"))
                test_tailcall();
 }
diff --git a/tools/testing/selftests/bpf/progs/epilogue_exit.c b/tools/testing/selftests/bpf/progs/epilogue_exit.c
new file mode 100644 (file)
index 0000000..33d3a57
--- /dev/null
@@ -0,0 +1,82 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+#include "../bpf_testmod/bpf_testmod.h"
+#include "../bpf_testmod/bpf_testmod_kfunc.h"
+
+char _license[] SEC("license") = "GPL";
+
+__success
+/* save __u64 *ctx to stack */
+__xlated("0: *(u64 *)(r10 -8) = r1")
+/* main prog */
+__xlated("1: r1 = *(u64 *)(r1 +0)")
+__xlated("2: r2 = *(u64 *)(r1 +0)")
+__xlated("3: r3 = 0")
+__xlated("4: r4 = 1")
+__xlated("5: if r2 == 0x0 goto pc+10")
+__xlated("6: r0 = 0")
+__xlated("7: *(u64 *)(r1 +0) = r3")
+/* epilogue */
+__xlated("8: r1 = *(u64 *)(r10 -8)")
+__xlated("9: r1 = *(u64 *)(r1 +0)")
+__xlated("10: r6 = *(u64 *)(r1 +0)")
+__xlated("11: r6 += 10000")
+__xlated("12: *(u64 *)(r1 +0) = r6")
+__xlated("13: r0 = r6")
+__xlated("14: r0 *= 2")
+__xlated("15: exit")
+/* 2nd part of the main prog after the first exit */
+__xlated("16: *(u64 *)(r1 +0) = r4")
+__xlated("17: r0 = 1")
+/* Clear the r1 to ensure it does not have
+ * off-by-1 error and ensure it jumps back to the
+ * beginning of epilogue which initializes
+ * the r1 with the ctx ptr.
+ */
+__xlated("18: r1 = 0")
+__xlated("19: gotol pc-12")
+SEC("struct_ops/test_epilogue_exit")
+__naked int test_epilogue_exit(void)
+{
+       asm volatile (
+       "r1 = *(u64 *)(r1 +0);"
+       "r2 = *(u64 *)(r1 +0);"
+       "r3 = 0;"
+       "r4 = 1;"
+       "if r2 == 0 goto +3;"
+       "r0 = 0;"
+       "*(u64 *)(r1 + 0) = r3;"
+       "exit;"
+       "*(u64 *)(r1 + 0) = r4;"
+       "r0 = 1;"
+       "r1 = 0;"
+       "exit;"
+       ::: __clobber_all);
+}
+
+SEC(".struct_ops.link")
+struct bpf_testmod_st_ops epilogue_exit = {
+       .test_epilogue = (void *)test_epilogue_exit,
+};
+
+SEC("syscall")
+__retval(20000)
+int syscall_epilogue_exit0(void *ctx)
+{
+       struct st_ops_args args = { .a = 1 };
+
+       return bpf_kfunc_st_ops_test_epilogue(&args);
+}
+
+SEC("syscall")
+__retval(20002)
+int syscall_epilogue_exit1(void *ctx)
+{
+       struct st_ops_args args = {};
+
+       return bpf_kfunc_st_ops_test_epilogue(&args);
+}