]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/bpf: Add tests for bpf_object__prepare
authorMykyta Yatsenko <yatsenko@meta.com>
Mon, 3 Mar 2025 13:57:52 +0000 (13:57 +0000)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 15 Mar 2025 18:48:28 +0000 (11:48 -0700)
Add selftests, checking that running bpf_object__prepare successfully
creates maps before load step.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20250303135752.158343-5-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/prepare.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/prepare.c [new file with mode: 0644]

diff --git a/tools/testing/selftests/bpf/prog_tests/prepare.c b/tools/testing/selftests/bpf/prog_tests/prepare.c
new file mode 100644 (file)
index 0000000..fb5cdad
--- /dev/null
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta */
+
+#include <test_progs.h>
+#include <network_helpers.h>
+#include "prepare.skel.h"
+
+static bool check_prepared(struct bpf_object *obj)
+{
+       bool is_prepared = true;
+       const struct bpf_map *map;
+
+       bpf_object__for_each_map(map, obj) {
+               if (bpf_map__fd(map) < 0)
+                       is_prepared = false;
+       }
+
+       return is_prepared;
+}
+
+static void test_prepare_no_load(void)
+{
+       struct prepare *skel;
+       int err;
+       LIBBPF_OPTS(bpf_test_run_opts, topts,
+                   .data_in = &pkt_v4,
+                   .data_size_in = sizeof(pkt_v4),
+       );
+
+       skel = prepare__open();
+       if (!ASSERT_OK_PTR(skel, "prepare__open"))
+               return;
+
+       if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared"))
+               goto cleanup;
+
+       err = bpf_object__prepare(skel->obj);
+
+       if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared"))
+               goto cleanup;
+
+       if (!ASSERT_OK(err, "bpf_object__prepare"))
+               goto cleanup;
+
+cleanup:
+       prepare__destroy(skel);
+}
+
+static void test_prepare_load(void)
+{
+       struct prepare *skel;
+       int err, prog_fd;
+       LIBBPF_OPTS(bpf_test_run_opts, topts,
+                   .data_in = &pkt_v4,
+                   .data_size_in = sizeof(pkt_v4),
+       );
+
+       skel = prepare__open();
+       if (!ASSERT_OK_PTR(skel, "prepare__open"))
+               return;
+
+       if (!ASSERT_FALSE(check_prepared(skel->obj), "not check_prepared"))
+               goto cleanup;
+
+       err = bpf_object__prepare(skel->obj);
+       if (!ASSERT_OK(err, "bpf_object__prepare"))
+               goto cleanup;
+
+       err = prepare__load(skel);
+       if (!ASSERT_OK(err, "prepare__load"))
+               goto cleanup;
+
+       if (!ASSERT_TRUE(check_prepared(skel->obj), "check_prepared"))
+               goto cleanup;
+
+       prog_fd = bpf_program__fd(skel->progs.program);
+       if (!ASSERT_GE(prog_fd, 0, "prog_fd"))
+               goto cleanup;
+
+       err = bpf_prog_test_run_opts(prog_fd, &topts);
+       if (!ASSERT_OK(err, "test_run_opts err"))
+               goto cleanup;
+
+       if (!ASSERT_OK(topts.retval, "test_run_opts retval"))
+               goto cleanup;
+
+       ASSERT_EQ(skel->bss->err, 0, "err");
+
+cleanup:
+       prepare__destroy(skel);
+}
+
+void test_prepare(void)
+{
+       if (test__start_subtest("prepare_load"))
+               test_prepare_load();
+       if (test__start_subtest("prepare_no_load"))
+               test_prepare_no_load();
+}
diff --git a/tools/testing/selftests/bpf/progs/prepare.c b/tools/testing/selftests/bpf/progs/prepare.c
new file mode 100644 (file)
index 0000000..1f1dd54
--- /dev/null
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta */
+#include <vmlinux.h>
+#include <bpf/bpf_helpers.h>
+//#include <bpf/bpf_tracing.h>
+
+char _license[] SEC("license") = "GPL";
+
+int err;
+
+struct {
+       __uint(type, BPF_MAP_TYPE_RINGBUF);
+       __uint(max_entries, 4096);
+} ringbuf SEC(".maps");
+
+struct {
+       __uint(type, BPF_MAP_TYPE_ARRAY);
+       __uint(max_entries, 1);
+       __type(key, __u32);
+       __type(value, __u32);
+} array_map SEC(".maps");
+
+SEC("cgroup_skb/egress")
+int program(struct __sk_buff *skb)
+{
+       err = 0;
+       return 0;
+}