]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Prevent reentrance into call_rcu_tasks_trace()
authorAlexei Starovoitov <ast@kernel.org>
Thu, 5 Feb 2026 19:02:33 +0000 (11:02 -0800)
committerAndrii Nakryiko <andrii@kernel.org>
Thu, 5 Feb 2026 19:47:08 +0000 (11:47 -0800)
call_rcu_tasks_trace() is not safe from in_nmi() and not reentrant.
To prevent deadlock on raw_spin_lock_rcu_node(rtpcp) or memory corruption
defer to irq_work when IRQs are disabled. call_rcu_tasks_generic()
protects itself with local_irq_save().
Note when bpf_async_cb->refcnt drops to zero it's safe to reuse
bpf_async_cb->worker for a different irq_work callback, since
bpf_async_schedule_op() -> irq_work_queue(&cb->worker);
is only called when refcnt >= 1.

Fixes: 1bfbc267ec91 ("bpf: Enable bpf_timer and bpf_wq in any context")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20260205190233.912-1-alexei.starovoitov@gmail.com
kernel/bpf/helpers.c

index a4f039cee88b62af7857f704caa1973d7152a4a9..0458597134da8c87e3b0e69ad2e7173c2207baaf 100644 (file)
@@ -1276,12 +1276,24 @@ static void bpf_async_cb_rcu_tasks_trace_free(struct rcu_head *rcu)
        bpf_async_cb_rcu_free(rcu);
 }
 
+static void worker_for_call_rcu(struct irq_work *work)
+{
+       struct bpf_async_cb *cb = container_of(work, struct bpf_async_cb, worker);
+
+       call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
+}
+
 static void bpf_async_refcount_put(struct bpf_async_cb *cb)
 {
        if (!refcount_dec_and_test(&cb->refcnt))
                return;
 
-       call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
+       if (irqs_disabled()) {
+               cb->worker = IRQ_WORK_INIT(worker_for_call_rcu);
+               irq_work_queue(&cb->worker);
+       } else {
+               call_rcu_tasks_trace(&cb->rcu, bpf_async_cb_rcu_tasks_trace_free);
+       }
 }
 
 static void bpf_async_cancel_and_free(struct bpf_async_kern *async);