]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Have __bpf_trace_run() use rcu_read_lock_dont_migrate()
authorSteven Rostedt <rostedt@goodmis.org>
Mon, 26 Jan 2026 23:11:47 +0000 (18:11 -0500)
committerSteven Rostedt (Google) <rostedt@goodmis.org>
Fri, 30 Jan 2026 15:43:48 +0000 (10:43 -0500)
In order to switch the protection of tracepoint callbacks from
preempt_disable() to srcu_read_lock_fast() the BPF callback from
tracepoints needs to have migration prevention as the BPF programs expect
to stay on the same CPU as they execute. Put together the RCU protection
with migration prevention and use rcu_read_lock_dont_migrate() in
__bpf_trace_run(). This will allow tracepoints callbacks to be
preemptible.

Link: https://lore.kernel.org/all/CAADnVQKvY026HSFGOsavJppm3-Ajm-VsLzY-OeFUe+BaKMRnDg@mail.gmail.com/
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: Alexei Starovoitov <ast@kernel.org>
Link: https://patch.msgid.link/20260126231256.335034877@kernel.org
Suggested-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
kernel/trace/bpf_trace.c

index fe28d86f7c357667cf139e60623e9c3d393ad0b3..abbf0177ad20bce9d162629a9bb18574cfb86650 100644 (file)
@@ -2062,7 +2062,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
        struct bpf_run_ctx *old_run_ctx;
        struct bpf_trace_run_ctx run_ctx;
 
-       cant_sleep();
+       rcu_read_lock_dont_migrate();
        if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
                bpf_prog_inc_misses_counter(prog);
                goto out;
@@ -2071,13 +2071,12 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
        run_ctx.bpf_cookie = link->cookie;
        old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
 
-       rcu_read_lock();
        (void) bpf_prog_run(prog, args);
-       rcu_read_unlock();
 
        bpf_reset_run_ctx(old_run_ctx);
 out:
        this_cpu_dec(*(prog->active));
+       rcu_read_unlock_migrate();
 }
 
 #define UNPACK(...)                    __VA_ARGS__