// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2021 Facebook */
+#include <sched.h>
#include <test_progs.h>
+#include <linux/perf_event.h>
+#include <sys/syscall.h>
#include "timer.skel.h"
#include "timer_failure.skel.h"
#include "timer_interrupt.skel.h"
#define NUM_THR 8
+static int perf_event_open(__u32 type, __u64 config, int pid, int cpu)
+{
+ struct perf_event_attr attr = {
+ .type = type,
+ .config = config,
+ .size = sizeof(struct perf_event_attr),
+ .sample_period = 10000,
+ };
+
+ return syscall(__NR_perf_event_open, &attr, pid, cpu, -1, 0);
+}
+
static void *spin_lock_thread(void *arg)
{
int i, err, prog_fd = *(int *)arg;
return timer_stress_runner(timer_skel, true);
}
+static void *nmi_cpu_worker(void *arg)
+{
+ volatile __u64 num = 1;
+ int i;
+
+ for (i = 0; i < 500000000; ++i)
+ num *= (i % 7) + 1;
+ (void)num;
+
+ return NULL;
+}
+
+static int run_nmi_test(struct timer *timer_skel, struct bpf_program *prog)
+{
+ struct bpf_link *link = NULL;
+ int pe_fd = -1, pipefd[2] = {-1, -1}, pid = 0, status;
+ char buf = 0;
+ int ret = -1;
+
+ if (!ASSERT_OK(pipe(pipefd), "pipe"))
+ goto cleanup;
+
+ pid = fork();
+ if (pid == 0) {
+ /* Child: spawn multiple threads to consume multiple CPUs */
+ pthread_t threads[NUM_THR];
+ int i;
+
+ close(pipefd[1]);
+ read(pipefd[0], &buf, 1);
+ close(pipefd[0]);
+
+ for (i = 0; i < NUM_THR; i++)
+ pthread_create(&threads[i], NULL, nmi_cpu_worker, NULL);
+ for (i = 0; i < NUM_THR; i++)
+ pthread_join(threads[i], NULL);
+ exit(0);
+ }
+
+ if (!ASSERT_GE(pid, 0, "fork"))
+ goto cleanup;
+
+ /* Open perf event for child process across all CPUs */
+ pe_fd = perf_event_open(PERF_TYPE_HARDWARE,
+ PERF_COUNT_HW_CPU_CYCLES,
+ pid, /* measure child process */
+ -1); /* on any CPU */
+ if (pe_fd < 0) {
+ if (errno == ENOENT || errno == EOPNOTSUPP) {
+ printf("SKIP:no PERF_COUNT_HW_CPU_CYCLES\n");
+ test__skip();
+ ret = EOPNOTSUPP;
+ goto cleanup;
+ }
+ ASSERT_GE(pe_fd, 0, "perf_event_open");
+ goto cleanup;
+ }
+
+ link = bpf_program__attach_perf_event(prog, pe_fd);
+ if (!ASSERT_OK_PTR(link, "attach_perf_event"))
+ goto cleanup;
+ pe_fd = -1; /* Ownership transferred to link */
+
+ /* Signal child to start CPU work */
+ close(pipefd[0]);
+ pipefd[0] = -1;
+ write(pipefd[1], &buf, 1);
+ close(pipefd[1]);
+ pipefd[1] = -1;
+
+ waitpid(pid, &status, 0);
+ pid = 0;
+
+ /* Verify NMI context was hit */
+ ASSERT_GT(timer_skel->bss->test_hits, 0, "test_hits");
+ ret = 0;
+
+cleanup:
+ bpf_link__destroy(link);
+ if (pe_fd >= 0)
+ close(pe_fd);
+ if (pid > 0) {
+ write(pipefd[1], &buf, 1);
+ waitpid(pid, &status, 0);
+ }
+ if (pipefd[0] >= 0)
+ close(pipefd[0]);
+ if (pipefd[1] >= 0)
+ close(pipefd[1]);
+ return ret;
+}
+
+static int timer_stress_nmi_race(struct timer *timer_skel)
+{
+ int err;
+
+ err = run_nmi_test(timer_skel, timer_skel->progs.nmi_race);
+ if (err == EOPNOTSUPP)
+ return 0;
+ return err;
+}
+
+static int timer_stress_nmi_update(struct timer *timer_skel)
+{
+ int err;
+
+ err = run_nmi_test(timer_skel, timer_skel->progs.nmi_update);
+ if (err == EOPNOTSUPP)
+ return 0;
+ if (err)
+ return err;
+ ASSERT_GT(timer_skel->bss->update_hits, 0, "update_hits");
+ return 0;
+}
+
+static int timer_stress_nmi_cancel(struct timer *timer_skel)
+{
+ int err;
+
+ err = run_nmi_test(timer_skel, timer_skel->progs.nmi_cancel);
+ if (err == EOPNOTSUPP)
+ return 0;
+ if (err)
+ return err;
+ ASSERT_GT(timer_skel->bss->cancel_hits, 0, "cancel_hits");
+ return 0;
+}
+
static int timer(struct timer *timer_skel)
{
int err, prog_fd;
test_timer(timer_cancel_async);
}
+void serial_test_timer_stress_nmi_race(void)
+{
+ test_timer(timer_stress_nmi_race);
+}
+
+void serial_test_timer_stress_nmi_update(void)
+{
+ test_timer(timer_stress_nmi_update);
+}
+
+void serial_test_timer_stress_nmi_cancel(void)
+{
+ test_timer(timer_stress_nmi_cancel);
+}
+
void test_timer_interrupt(void)
{
struct timer_interrupt *skel = NULL;
__u64 abs_data;
__u64 err;
__u64 ok;
+__u64 test_hits;
+__u64 update_hits;
+__u64 cancel_hits;
__u64 callback_check = 52;
__u64 callback2_check = 52;
__u64 pinned_callback_check;
return 0;
}
-SEC("syscall")
-int race(void *ctx)
+/* Callback that updates its own map element */
+static int update_self_callback(void *map, int *key, struct bpf_timer *timer)
+{
+ struct elem init = {};
+
+ bpf_map_update_elem(map, key, &init, BPF_ANY);
+ __sync_fetch_and_add(&update_hits, 1);
+ return 0;
+}
+
+/* Callback that cancels itself using async cancel */
+static int cancel_self_callback(void *map, int *key, struct bpf_timer *timer)
+{
+ bpf_timer_cancel_async(timer);
+ __sync_fetch_and_add(&cancel_hits, 1);
+ return 0;
+}
+
+enum test_mode {
+ TEST_RACE_SYNC,
+ TEST_RACE_ASYNC,
+ TEST_UPDATE,
+ TEST_CANCEL,
+};
+
+static __always_inline int test_common(enum test_mode mode)
{
struct bpf_timer *timer;
- int err, race_key = 0;
struct elem init;
+ int ret, key = 0;
__builtin_memset(&init, 0, sizeof(struct elem));
- bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
- timer = bpf_map_lookup_elem(&race_array, &race_key);
+ bpf_map_update_elem(&race_array, &key, &init, BPF_ANY);
+ timer = bpf_map_lookup_elem(&race_array, &key);
if (!timer)
- return 1;
+ return 0;
+
+ ret = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
+ if (ret && ret != -EBUSY)
+ return 0;
- err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
- if (err && err != -EBUSY)
- return 1;
+ if (mode == TEST_RACE_SYNC || mode == TEST_RACE_ASYNC)
+ bpf_timer_set_callback(timer, race_timer_callback);
+ else if (mode == TEST_UPDATE)
+ bpf_timer_set_callback(timer, update_self_callback);
+ else
+ bpf_timer_set_callback(timer, cancel_self_callback);
- bpf_timer_set_callback(timer, race_timer_callback);
bpf_timer_start(timer, 0, 0);
- if (async_cancel)
+
+ if (mode == TEST_RACE_ASYNC)
bpf_timer_cancel_async(timer);
- else
+ else if (mode == TEST_RACE_SYNC)
bpf_timer_cancel(timer);
return 0;
}
+
+SEC("syscall")
+int race(void *ctx)
+{
+ return test_common(async_cancel ? TEST_RACE_ASYNC : TEST_RACE_SYNC);
+}
+
+SEC("perf_event")
+int nmi_race(void *ctx)
+{
+ __sync_fetch_and_add(&test_hits, 1);
+ return test_common(TEST_RACE_ASYNC);
+}
+
+SEC("perf_event")
+int nmi_update(void *ctx)
+{
+ __sync_fetch_and_add(&test_hits, 1);
+ return test_common(TEST_UPDATE);
+}
+
+SEC("perf_event")
+int nmi_cancel(void *ctx)
+{
+ __sync_fetch_and_add(&test_hits, 1);
+ return test_common(TEST_CANCEL);
+}