]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
selftests/bpf: add bpf task work stress tests
authorMykyta Yatsenko <yatsenko@meta.com>
Tue, 23 Sep 2025 11:24:04 +0000 (12:24 +0100)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 23 Sep 2025 14:34:39 +0000 (07:34 -0700)
Add stress tests for BPF task-work scheduling kfuncs. The tests spawn
multiple threads that concurrently schedule task_work callbacks against
the same and different map values to exercise the kfuncs under high
contention.
Verify callbacks are reliably enqueued and executed with no drops.

Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Link: https://lore.kernel.org/r/20250923112404.668720-10-mykyta.yatsenko5@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/prog_tests/task_work_stress.c [new file with mode: 0644]
tools/testing/selftests/bpf/progs/task_work_stress.c [new file with mode: 0644]

diff --git a/tools/testing/selftests/bpf/prog_tests/task_work_stress.c b/tools/testing/selftests/bpf/prog_tests/task_work_stress.c
new file mode 100644 (file)
index 0000000..450d17d
--- /dev/null
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+#include <test_progs.h>
+#include <string.h>
+#include <stdio.h>
+#include "task_work_stress.skel.h"
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
+#include <time.h>
+#include <stdlib.h>
+#include <stdatomic.h>
+
+struct test_data {
+       int prog_fd;
+       atomic_int exit;
+};
+
+void *runner(void *test_data)
+{
+       struct test_data *td = test_data;
+       int err = 0;
+       LIBBPF_OPTS(bpf_test_run_opts, opts);
+
+       while (!err && !atomic_load(&td->exit))
+               err = bpf_prog_test_run_opts(td->prog_fd, &opts);
+
+       return NULL;
+}
+
+static int get_env_int(const char *str, int def)
+{
+       const char *s = getenv(str);
+       char *end;
+       int retval;
+
+       if (!s || !*s)
+               return def;
+       errno = 0;
+       retval = strtol(s, &end, 10);
+       if (errno || *end || retval < 0)
+               return def;
+       return retval;
+}
+
+static void task_work_run(bool enable_delete)
+{
+       struct task_work_stress *skel;
+       struct bpf_program *scheduler, *deleter;
+       int nthreads = 16;
+       int test_time_s = get_env_int("BPF_TASK_WORK_TEST_TIME", 1);
+       pthread_t tid[nthreads], tid_del;
+       bool started[nthreads], started_del = false;
+       struct test_data td_sched = { .exit = 0 }, td_del = { .exit = 1 };
+       int i, err;
+
+       skel = task_work_stress__open();
+       if (!ASSERT_OK_PTR(skel, "task_work__open"))
+               return;
+
+       scheduler = bpf_object__find_program_by_name(skel->obj, "schedule_task_work");
+       bpf_program__set_autoload(scheduler, true);
+
+       deleter = bpf_object__find_program_by_name(skel->obj, "delete_task_work");
+       bpf_program__set_autoload(deleter, true);
+
+       err = task_work_stress__load(skel);
+       if (!ASSERT_OK(err, "skel_load"))
+               goto cleanup;
+
+       for (i = 0; i < nthreads; ++i)
+               started[i] = false;
+
+       td_sched.prog_fd = bpf_program__fd(scheduler);
+       for (i = 0; i < nthreads; ++i) {
+               if (pthread_create(&tid[i], NULL, runner, &td_sched) != 0) {
+                       fprintf(stderr, "could not start thread");
+                       goto cancel;
+               }
+               started[i] = true;
+       }
+
+       if (enable_delete)
+               atomic_store(&td_del.exit, 0);
+
+       td_del.prog_fd = bpf_program__fd(deleter);
+       if (pthread_create(&tid_del, NULL, runner, &td_del) != 0) {
+               fprintf(stderr, "could not start thread");
+               goto cancel;
+       }
+       started_del = true;
+
+       /* Run stress test for some time */
+       sleep(test_time_s);
+
+cancel:
+       atomic_store(&td_sched.exit, 1);
+       atomic_store(&td_del.exit, 1);
+       for (i = 0; i < nthreads; ++i) {
+               if (started[i])
+                       pthread_join(tid[i], NULL);
+       }
+
+       if (started_del)
+               pthread_join(tid_del, NULL);
+
+       ASSERT_GT(skel->bss->callback_scheduled, 0, "work scheduled");
+       /* Some scheduling attempts should have failed due to contention */
+       ASSERT_GT(skel->bss->schedule_error, 0, "schedule error");
+
+       if (enable_delete) {
+               /* If delete thread is enabled, it has cancelled some callbacks */
+               ASSERT_GT(skel->bss->delete_success, 0, "delete success");
+               ASSERT_LT(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
+       } else {
+               /* Without delete thread number of scheduled callbacks is the same as fired */
+               ASSERT_EQ(skel->bss->callback_success, skel->bss->callback_scheduled, "callbacks");
+       }
+
+cleanup:
+       task_work_stress__destroy(skel);
+}
+
+void test_task_work_stress(void)
+{
+       if (test__start_subtest("no_delete"))
+               task_work_run(false);
+       if (test__start_subtest("with_delete"))
+               task_work_run(true);
+}
diff --git a/tools/testing/selftests/bpf/progs/task_work_stress.c b/tools/testing/selftests/bpf/progs/task_work_stress.c
new file mode 100644 (file)
index 0000000..90fca06
--- /dev/null
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2025 Meta Platforms, Inc. and affiliates. */
+
+#include <vmlinux.h>
+#include <string.h>
+#include <stdbool.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bpf_misc.h"
+
+#define ENTRIES 128
+
+char _license[] SEC("license") = "GPL";
+
+__u64 callback_scheduled = 0;
+__u64 callback_success = 0;
+__u64 schedule_error = 0;
+__u64 delete_success = 0;
+
+struct elem {
+       __u32 count;
+       struct bpf_task_work tw;
+};
+
+struct {
+       __uint(type, BPF_MAP_TYPE_HASH);
+       __uint(map_flags, BPF_F_NO_PREALLOC);
+       __uint(max_entries, ENTRIES);
+       __type(key, int);
+       __type(value, struct elem);
+} hmap SEC(".maps");
+
+static int process_work(struct bpf_map *map, void *key, void *value)
+{
+       __sync_fetch_and_add(&callback_success, 1);
+       return 0;
+}
+
+SEC("syscall")
+int schedule_task_work(void *ctx)
+{
+       struct elem empty_work = {.count = 0};
+       struct elem *work;
+       int key = 0, err;
+
+       key = bpf_ktime_get_ns() % ENTRIES;
+       work = bpf_map_lookup_elem(&hmap, &key);
+       if (!work) {
+               bpf_map_update_elem(&hmap, &key, &empty_work, BPF_NOEXIST);
+               work = bpf_map_lookup_elem(&hmap, &key);
+               if (!work)
+                       return 0;
+       }
+       err = bpf_task_work_schedule_signal(bpf_get_current_task_btf(), &work->tw, &hmap,
+                                           process_work, NULL);
+       if (err)
+               __sync_fetch_and_add(&schedule_error, 1);
+       else
+               __sync_fetch_and_add(&callback_scheduled, 1);
+       return 0;
+}
+
+SEC("syscall")
+int delete_task_work(void *ctx)
+{
+       int key = 0, err;
+
+       key = bpf_get_prandom_u32() % ENTRIES;
+       err = bpf_map_delete_elem(&hmap, &key);
+       if (!err)
+               __sync_fetch_and_add(&delete_success, 1);
+       return 0;
+}