From 7d49635e3775da946e536bc81ab55b2bca6b791d Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 3 Feb 2026 21:51:44 -0800 Subject: [PATCH] bpf: Tighten conditions when timer/wq can be called synchronously Though hrtimer_start/cancel() inlines all of the smaller helpers in hrtimer.c and only call timerqueue_add/del() from lib/timerqueue.c where everything is not traceable and not kprobe-able (because all files in lib/ are not traceable), there are tracepoints within hrtimer that are called with locks held. Therefore prevent the deadlock by tightening conditions when timer/wq can be called synchronously. hrtimer/wq are using raw_spin_lock_irqsave(), so irqs_disabled() is enough. Fixes: 1bfbc267ec91 ("bpf: Enable bpf_timer and bpf_wq in any context") Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20260204055147.54960-2-alexei.starovoitov@gmail.com --- kernel/bpf/helpers.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index d4aedac14a603..0517e9a8fc7c9 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1430,8 +1430,6 @@ static int bpf_async_update_prog_callback(struct bpf_async_cb *cb, static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op, u64 nsec, u32 timer_mode) { - WARN_ON_ONCE(!in_hardirq()); - struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE); if (!cmd) { @@ -1473,6 +1471,11 @@ static const struct bpf_func_proto bpf_timer_set_callback_proto = { .arg2_type = ARG_PTR_TO_FUNC, }; +static bool defer_timer_wq_op(void) +{ + return in_hardirq() || irqs_disabled(); +} + BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, flags) { struct bpf_hrtimer *t; @@ -1500,7 +1503,7 @@ BPF_CALL_3(bpf_timer_start, struct bpf_async_kern *, async, u64, nsecs, u64, fla if (!refcount_inc_not_zero(&t->cb.refcnt)) return -ENOENT; - if (!in_hardirq()) { + if (!defer_timer_wq_op()) { hrtimer_start(&t->timer, ns_to_ktime(nsecs), mode); bpf_async_refcount_put(&t->cb); return 0; @@ -1524,7 +1527,7 @@ BPF_CALL_1(bpf_timer_cancel, struct bpf_async_kern *, async) bool inc = false; int ret = 0; - if (in_hardirq()) + if (defer_timer_wq_op()) return -EOPNOTSUPP; t = READ_ONCE(async->timer); @@ -1650,7 +1653,7 @@ static void bpf_async_cancel_and_free(struct bpf_async_kern *async) * refcnt. Either synchronously or asynchronously in irq_work. */ - if (!in_hardirq()) { + if (!defer_timer_wq_op()) { bpf_async_process_op(cb, BPF_ASYNC_CANCEL, 0, 0); } else { (void)bpf_async_schedule_op(cb, BPF_ASYNC_CANCEL, 0, 0); @@ -3161,7 +3164,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) if (!refcount_inc_not_zero(&w->cb.refcnt)) return -ENOENT; - if (!in_hardirq()) { + if (!defer_timer_wq_op()) { schedule_work(&w->work); bpf_async_refcount_put(&w->cb); return 0; @@ -4461,7 +4464,7 @@ __bpf_kfunc int bpf_timer_cancel_async(struct bpf_timer *timer) if (!refcount_inc_not_zero(&cb->refcnt)) return -ENOENT; - if (!in_hardirq()) { + if (!defer_timer_wq_op()) { struct bpf_hrtimer *t = container_of(cb, struct bpf_hrtimer, cb); ret = hrtimer_try_to_cancel(&t->timer); -- 2.47.3