]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Fix grace period wait for tracepoint bpf_link
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Tue, 31 Mar 2026 21:10:20 +0000 (23:10 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 31 Mar 2026 23:01:13 +0000 (16:01 -0700)
Recently, tracepoints were switched from using disabled preemption
(which acts as RCU read section) to SRCU-fast when they are not
faultable. This means that to do a proper grace period wait for programs
running in such tracepoints, we must use SRCU's grace period wait.
This is only for non-faultable tracepoints, faultable ones continue
using RCU Tasks Trace.

However, bpf_link_free() currently does call_rcu() for all cases when
the link is non-sleepable (hence, for tracepoints, non-faultable). Fix
this by doing a call_srcu() grace period wait.

As far RCU Tasks Trace gp -> RCU gp chaining is concerned, it is deemed
unnecessary for tracepoint programs. The link and program are either
accessed under RCU Tasks Trace protection, or SRCU-fast protection now.

The earlier logic of chaining both RCU Tasks Trace and RCU gp waits was
to generalize the logic, even if it conceded an extra RCU gp wait,
however that is unnecessary for tracepoints even before this change.
In practice no cost was paid since rcu_trace_implies_rcu_gp() was always
true. Hence we need not chaining any RCU gp after the SRCU gp.

For instance, in the non-faultable raw tracepoint, the RCU read section
of the program in __bpf_trace_run() is enclosed in the SRCU gp, likewise
for faultable raw tracepoint, the program is under the RCU Tasks Trace
protection. Hence, the outermost scope can be waited upon to ensure
correctness.

Also, sleepable programs cannot be attached to non-faultable
tracepoints, so whenever program or link is sleepable, only RCU Tasks
Trace protection is being used for the link and prog.

Fixes: a46023d5616e ("tracing: Guard __DECLARE_TRACE() use of __DO_TRACE_CALL() with SRCU-fast")
Reviewed-by: Sun Jian <sun.jian.kdev@gmail.com>
Reviewed-by: Puranjay Mohan <puranjay@kernel.org>
Acked-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Acked-by: Steven Rostedt (Google) <rostedt@goodmis.org>
Link: https://lore.kernel.org/r/20260331211021.1632902-2-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
include/linux/tracepoint.h
kernel/bpf/syscall.c

index 05b34a6355b0350fd2d52e27e972f7217e6f833a..35b1e25bd1043702ef8a2710eece01055cff6bc7 100644 (file)
@@ -1854,6 +1854,10 @@ struct bpf_link_ops {
         * target hook is sleepable, we'll go through tasks trace RCU GP and
         * then "classic" RCU GP; this need for chaining tasks trace and
         * classic RCU GPs is designated by setting bpf_link->sleepable flag
+        *
+        * For non-sleepable tracepoint links we go through SRCU gp instead,
+        * since RCU is not used in that case. Sleepable tracepoints still
+        * follow the scheme above.
         */
        void (*dealloc_deferred)(struct bpf_link *link);
        int (*detach)(struct bpf_link *link);
index 22ca1c8b54f32b65a34d3324dc72f397cc54ecd6..1d7f29f5e901474424c3c4075df10ed7c996112c 100644 (file)
@@ -122,6 +122,22 @@ static inline bool tracepoint_is_faultable(struct tracepoint *tp)
 {
        return tp->ext && tp->ext->faultable;
 }
+/*
+ * Run RCU callback with the appropriate grace period wait for non-faultable
+ * tracepoints, e.g., those used in atomic context.
+ */
+static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func)
+{
+       call_srcu(&tracepoint_srcu, rcu, func);
+}
+/*
+ * Run RCU callback with the appropriate grace period wait for faultable
+ * tracepoints, e.g., those used in syscall context.
+ */
+static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func)
+{
+       call_rcu_tasks_trace(rcu, func);
+}
 #else
 static inline void tracepoint_synchronize_unregister(void)
 { }
@@ -129,6 +145,10 @@ static inline bool tracepoint_is_faultable(struct tracepoint *tp)
 {
        return false;
 }
+static inline void call_tracepoint_unregister_atomic(struct rcu_head *rcu, rcu_callback_t func)
+{  }
+static inline void call_tracepoint_unregister_syscall(struct rcu_head *rcu, rcu_callback_t func)
+{  }
 #endif
 
 #ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
index 274039e36465dfe4d79f20c3bb39a77a104ad36b..700938782bed2fae4c2ea6c8df3f9b2635a8f3b6 100644 (file)
@@ -3261,6 +3261,18 @@ static void bpf_link_defer_dealloc_rcu_gp(struct rcu_head *rcu)
        bpf_link_dealloc(link);
 }
 
+static bool bpf_link_is_tracepoint(struct bpf_link *link)
+{
+       /*
+        * Only these combinations support a tracepoint bpf_link.
+        * BPF_LINK_TYPE_TRACING raw_tp progs are hardcoded to use
+        * bpf_raw_tp_link_lops and thus dealloc_deferred(), see
+        * bpf_raw_tp_link_attach().
+        */
+       return link->type == BPF_LINK_TYPE_RAW_TRACEPOINT ||
+              (link->type == BPF_LINK_TYPE_TRACING && link->attach_type == BPF_TRACE_RAW_TP);
+}
+
 static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
 {
        if (rcu_trace_implies_rcu_gp())
@@ -3279,16 +3291,25 @@ static void bpf_link_free(struct bpf_link *link)
        if (link->prog)
                ops->release(link);
        if (ops->dealloc_deferred) {
-               /* Schedule BPF link deallocation, which will only then
+               /*
+                * Schedule BPF link deallocation, which will only then
                 * trigger putting BPF program refcount.
                 * If underlying BPF program is sleepable or BPF link's target
                 * attach hookpoint is sleepable or otherwise requires RCU GPs
                 * to ensure link and its underlying BPF program is not
                 * reachable anymore, we need to first wait for RCU tasks
-                * trace sync, and then go through "classic" RCU grace period
+                * trace sync, and then go through "classic" RCU grace period.
+                *
+                * For tracepoint BPF links, we need to go through SRCU grace
+                * period wait instead when non-faultable tracepoint is used. We
+                * don't need to chain SRCU grace period waits, however, for the
+                * faultable case, since it exclusively uses RCU Tasks Trace.
                 */
                if (link->sleepable || (link->prog && link->prog->sleepable))
                        call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+               /* We need to do a SRCU grace period wait for non-faultable tracepoint BPF links. */
+               else if (bpf_link_is_tracepoint(link))
+                       call_tracepoint_unregister_atomic(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
                else
                        call_rcu(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
        } else if (ops->dealloc) {