]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Introduce bpf_timer_cancel_async() kfunc
authorAlexei Starovoitov <ast@kernel.org>
Sun, 1 Feb 2026 02:53:57 +0000 (18:53 -0800)
committerAndrii Nakryiko <andrii@kernel.org>
Wed, 4 Feb 2026 00:58:46 +0000 (16:58 -0800)
Introduce bpf_timer_cancel_async() that wraps hrtimer_try_to_cancel()
and executes it either synchronously or defers to irq_work.

Co-developed-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Mykyta Yatsenko <yatsenko@meta.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20260201025403.66625-4-alexei.starovoitov@gmail.com
kernel/bpf/helpers.c

index 354aa607df3e71de16bd39ae8ca620689d7164a8..d4aedac14a6035a392d1135e342e857629b3c065 100644 (file)
@@ -4426,6 +4426,53 @@ __bpf_kfunc int bpf_dynptr_file_discard(struct bpf_dynptr *dynptr)
        return 0;
 }
 
+/**
+ * bpf_timer_cancel_async - try to deactivate a timer
+ * @timer:     bpf_timer to stop
+ *
+ * Returns:
+ *
+ *  *  0 when the timer was not active
+ *  *  1 when the timer was active
+ *  * -1 when the timer is currently executing the callback function and
+ *       cannot be stopped
+ *  * -ECANCELED when the timer will be cancelled asynchronously
+ *  * -ENOMEM when out of memory
+ *  * -EINVAL when the timer was not initialized
+ *  * -ENOENT when this kfunc is racing with timer deletion
+ */
+__bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer)
+{
+       struct bpf_async_kern *async = (void *)timer;
+       struct bpf_async_cb *cb;
+       int ret;
+
+       cb = READ_ONCE(async->cb);
+       if (!cb)
+               return -EINVAL;
+
+       /*
+        * Unlike hrtimer_start() it's ok to synchronously call
+        * hrtimer_try_to_cancel() when refcnt reached zero, but deferring to
+        * irq_work is not, since irq callback may execute after RCU GP and
+        * cb could be freed at that time. Check for refcnt zero for
+        * consistency.
+        */
+       if (!refcount_inc_not_zero(&cb->refcnt))
+               return -ENOENT;
+
+       if (!in_hardirq()) {
+               struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
+
+               ret = hrtimer_try_to_cancel(&t->timer);
+               bpf_async_refcount_put(cb);
+               return ret;
+       } else {
+               ret = bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
+               return ret ? ret : -ECANCELED;
+       }
+}
+
 __bpf_kfunc_end_defs();
 
 static void bpf_task_work_cancel_scheduled(struct irq_work *irq_work)
@@ -4609,6 +4656,7 @@ BTF_ID_FLAGS(func, bpf_task_work_schedule_signal, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, bpf_task_work_schedule_resume, KF_IMPLICIT_ARGS)
 BTF_ID_FLAGS(func, bpf_dynptr_from_file)
 BTF_ID_FLAGS(func, bpf_dynptr_file_discard)
+BTF_ID_FLAGS(func, bpf_timer_cancel_async)
 BTF_KFUNCS_END(common_btf_ids)
 
 static const struct btf_kfunc_id_set common_kfunc_set = {