]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Remove migrate_disable in kprobe_multi_link_prog_run
authorTao Chen <chen.dylane@linux.dev>
Thu, 14 Aug 2025 12:14:29 +0000 (20:14 +0800)
committerAndrii Nakryiko <andrii@kernel.org>
Fri, 15 Aug 2025 23:49:31 +0000 (16:49 -0700)
Graph tracer framework ensures we won't migrate, kprobe_multi_link_prog_run
called all the way from graph tracer, which disables preemption in
function_graph_enter_regs, as Jiri and Yonghong suggested, there is no
need to use migrate_disable. As a result, some overhead may will be reduced.
And add cant_sleep check for __this_cpu_inc_return.

Fixes: 0dcac2725406 ("bpf: Add multi kprobe link")
Signed-off-by: Tao Chen <chen.dylane@linux.dev>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20250814121430.2347454-1-chen.dylane@linux.dev
kernel/trace/bpf_trace.c

index 3ae52978cae61a5d60b43c764d3e267bd32e1085..606007c387c52f3ca8e412f84c09388dbdb09c76 100644 (file)
@@ -2728,20 +2728,25 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link,
        struct pt_regs *regs;
        int err;
 
+       /*
+        * graph tracer framework ensures we won't migrate, so there is no need
+        * to use migrate_disable for bpf_prog_run again. The check here just for
+        * __this_cpu_inc_return.
+        */
+       cant_sleep();
+
        if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) {
                bpf_prog_inc_misses_counter(link->link.prog);
                err = 1;
                goto out;
        }
 
-       migrate_disable();
        rcu_read_lock();
        regs = ftrace_partial_regs(fregs, bpf_kprobe_multi_pt_regs_ptr());
        old_run_ctx = bpf_set_run_ctx(&run_ctx.session_ctx.run_ctx);
        err = bpf_prog_run(link->link.prog, regs);
        bpf_reset_run_ctx(old_run_ctx);
        rcu_read_unlock();
-       migrate_enable();
 
  out:
        __this_cpu_dec(bpf_prog_active);