return 0;
}
+static int timer_cancel_async(struct timer *timer_skel)
+{
+ int err, prog_fd;
+ LIBBPF_OPTS(bpf_test_run_opts, topts);
+
+ prog_fd = bpf_program__fd(timer_skel->progs.test_async_cancel_succeed);
+ err = bpf_prog_test_run_opts(prog_fd, &topts);
+ ASSERT_OK(err, "test_run");
+ ASSERT_EQ(topts.retval, 0, "test_run");
+
+ usleep(500);
+ /* check that there were no errors in timer execution */
+ ASSERT_EQ(timer_skel->bss->err, 0, "err");
+
+ /* check that code paths completed */
+ ASSERT_EQ(timer_skel->bss->ok, 1 | 2 | 4, "ok");
+
+ return 0;
+}
+
static void test_timer(int (*timer_test_fn)(struct timer *timer_skel))
{
struct timer *timer_skel = NULL;
test_timer(timer_stress_async_cancel);
}
+void serial_test_timer_async_cancel(void)
+{
+ test_timer(timer_cancel_async);
+}
+
void test_timer_interrupt(void)
{
struct timer_interrupt *skel = NULL;
return 0;
}
+static int timer_error(void *map, int *key, struct bpf_timer *timer)
+{
+ err = 42;
+ return 0;
+}
+
+SEC("syscall")
+int test_async_cancel_succeed(void *ctx)
+{
+ struct bpf_timer *arr_timer;
+ int array_key = ARRAY;
+
+ arr_timer = bpf_map_lookup_elem(&array, &array_key);
+ if (!arr_timer)
+ return 0;
+ bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
+ bpf_timer_set_callback(arr_timer, timer_error);
+ bpf_timer_start(arr_timer, 100000 /* 100us */, 0);
+ bpf_timer_cancel_async(arr_timer);
+ ok = 7;
+ return 0;
+}
+
/* callback for prealloc and non-prealloca hashtab timers */
static int timer_cb2(void *map, int *key, struct hmap_elem *val)
{