From: Alexei Starovoitov Date: Thu, 5 Feb 2026 19:02:33 +0000 (-0800) Subject: bpf: Prevent reentrance into call_rcu_tasks_trace() X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=1ace9bac1ad2bc6a0a70baaa16d22b7e783e88c5;p=thirdparty%2Flinux.git bpf: Prevent reentrance into call_rcu_tasks_trace() call_rcu_tasks_trace() is not safe from in_nmi() and not reentrant. To prevent deadlock on raw_spin_lock_rcu_node(rtpcp) or memory corruption defer to irq_work when IRQs are disabled. call_rcu_tasks_generic() protects itself with local_irq_save(). Note when bpf_async_cb->refcnt drops to zero it's safe to reuse bpf_async_cb->worker for a different irq_work callback, since bpf_async_schedule_op() -> irq_work_queue(&cb->worker); is only called when refcnt >= 1. Fixes: 1bfbc267ec91 ("bpf: Enable bpf_timer and bpf_wq in any context") Signed-off-by: Alexei Starovoitov Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20260205190233.912-1-alexei.starovoitov@gmail.com --- diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index a4f039cee88b6..0458597134da8 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -1276,12 +1276,24 @@ static void bpf_async_cb_rcu_tasks_trace_free(struct rcu_head *rcu) bpf_async_cb_rcu_free(rcu); } +static void worker_for_call_rcu(struct irq_work *work) +{ + struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker); + + call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); +} + static void bpf_async_refcount_put(struct bpf_async_cb *cb) { if (!refcount_dec_and_test(&cb->refcnt)) return; - call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); + if (irqs_disabled()) { + cb->worker = IRQ_WORK_INIT(worker_for_call_rcu); + irq_work_queue(&cb->worker); + } else { + call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free); + } } static void bpf_async_cancel_and_free(struct bpf_async_kern *async);