]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Tighten conditions when timer/wq can be called synchronously
authorAlexei Starovoitov <ast@kernel.org>
Wed, 4 Feb 2026 05:51:44 +0000 (21:51 -0800)
committerAndrii Nakryiko <andrii@kernel.org>
Wed, 4 Feb 2026 21:12:50 +0000 (13:12 -0800)
Though hrtimer_start/cancel() inlines all of the smaller helpers in
hrtimer.c and only call timerqueue_add/del() from lib/timerqueue.c where
everything is not traceable and not kprobe-able (because all files in
lib/ are not traceable), there are tracepoints within hrtimer that are
called with locks held. Therefore prevent the deadlock by tightening
conditions when timer/wq can be called synchronously.
hrtimer/wq are using raw_spin_lock_irqsave(), so irqs_disabled() is enough.

Fixes: 1bfbc267ec91 ("bpf: Enable bpf_timer and bpf_wq in any context")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20260204055147.54960-2-alexei.starovoitov@gmail.com
kernel/bpf/helpers.c

index d4aedac14a6035a392d1135e342e857629b3c065..0517e9a8fc7c9befa9fbf25591adf3a323296c3d 100644 (file)
@@ -1430,8 +1430,6 @@ static int bpf_async_update_prog_callback(struct bpf_async_cb *cb,
 static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op,
                                 u64 nsec, u32 timer_mode)
 {
-       WARN_ON_ONCE(!in_hardirq());
-
        struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE);
 
        if (!cmd) {
@@ -1473,6 +1471,11 @@ static const struct bpf_func_proto bpf_timer_set_callback_proto = {
        .arg2_type      = ARG_PTR_TO_FUNC,
 };
 
+static bool defer_timer_wq_op(void)
+{
+       return in_hardirq() || irqs_disabled();
+}
+
 BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, flags)
 {
        struct bpf_hrtimer *t;
@@ -1500,7 +1503,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, fla
        if (!refcount_inc_not_zero(&t->cb.refcnt))
                return -ENOENT;
 
-       if (!in_hardirq()) {
+       if (!defer_timer_wq_op()) {
                hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode);
                bpf_async_refcount_put(&t->cb);
                return 0;
@@ -1524,7 +1527,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, async)
        bool inc = false;
        int ret = 0;
 
-       if (in_hardirq())
+       if (defer_timer_wq_op())
                return -EOPNOTSUPP;
 
        t = READ_ONCE(async->timer);
@@ -1650,7 +1653,7 @@ static void bpf_async_cancel_and_free(struct bpf_async_kern *async)
         * refcnt. Either synchronously or asynchronously in irq_work.
         */
 
-       if (!in_hardirq()) {
+       if (!defer_timer_wq_op()) {
                bpf_async_process_op(cb, BPF_ASYNC_CANCEL, 0, 0);
        } else {
                (void)bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0);
@@ -3161,7 +3164,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags)
        if (!refcount_inc_not_zero(&w->cb.refcnt))
                return -ENOENT;
 
-       if (!in_hardirq()) {
+       if (!defer_timer_wq_op()) {
                schedule_work(&w->work);
                bpf_async_refcount_put(&w->cb);
                return 0;
@@ -4461,7 +4464,7 @@ __bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer)
        if (!refcount_inc_not_zero(&cb->refcnt))
                return -ENOENT;
 
-       if (!in_hardirq()) {
+       if (!defer_timer_wq_op()) {
                struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb);
 
                ret = hrtimer_try_to_cancel(&t->timer);