]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Add a recursion check to prevent loops in bpf_timer
authorAlexei Starovoitov <ast@kernel.org>
Wed, 4 Feb 2026 05:51:46 +0000 (21:51 -0800)
committerAndrii Nakryiko <andrii@kernel.org>
Wed, 4 Feb 2026 21:12:50 +0000 (13:12 -0800)
Do not schedule timer/wq operation on a cpu that is in irq_work
callback that is processing async_cmds queue.
Otherwise the following loop is possible:
bpf_timer_start() -> bpf_async_schedule_op() -> irq_work_queue().
irqrestore -> bpf_async_irq_worker() -> tracepoint -> bpf_timer_start().

Fixes: 1bfbc267ec91 ("bpf: Enable bpf_timer and bpf_wq in any context")
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20260204055147.54960-4-alexei.starovoitov@gmail.com
kernel/bpf/helpers.c

index 0517e9a8fc7c9befa9fbf25591adf3a323296c3d..01052f8664eb92d1f20e6f6bd0155a3a70e73b34 100644 (file)
@@ -1427,9 +1427,23 @@ static int bpf_async_update_prog_callback(struct bpf_async_cb *cb,
        return 0;
 }
 
+static DEFINE_PER_CPU(struct bpf_async_cb *, async_cb_running);
+
 static int bpf_async_schedule_op(struct bpf_async_cb *cb, enum bpf_async_op op,
                                 u64 nsec, u32 timer_mode)
 {
+       /*
+        * Do not schedule another operation on this cpu if it's in irq_work
+        * callback that is processing async_cmds queue. Otherwise the following
+        * loop is possible:
+        * bpf_timer_start() -> bpf_async_schedule_op() -> irq_work_queue().
+        * irqrestore -> bpf_async_irq_worker() -> tracepoint -> bpf_timer_start().
+        */
+       if (this_cpu_read(async_cb_running) == cb) {
+               bpf_async_refcount_put(cb);
+               return -EDEADLK;
+       }
+
        struct bpf_async_cmd *cmd = kmalloc_nolock(sizeof(*cmd), 0, NUMA_NO_NODE);
 
        if (!cmd) {
@@ -1628,6 +1642,7 @@ static void bpf_async_irq_worker(struct irq_work *work)
                return;
 
        list = llist_reverse_order(list);
+       this_cpu_write(async_cb_running, cb);
        llist_for_each_safe(pos, n, list) {
                struct bpf_async_cmd *cmd;
 
@@ -1635,6 +1650,7 @@ static void bpf_async_irq_worker(struct irq_work *work)
                bpf_async_process_op(cb, cmd->op, cmd->nsec, cmd->mode);
                kfree_nolock(cmd);
        }
+       this_cpu_write(async_cb_running, NULL);
 }
 
 static void bpf_async_cancel_and_free(struct bpf_async_kern *async)