]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
bpf: Retire rcu_trace_implies_rcu_gp()
authorKumar Kartikeya Dwivedi <memxor@gmail.com>
Tue, 7 Apr 2026 16:22:33 +0000 (18:22 +0200)
committerAlexei Starovoitov <ast@kernel.org>
Tue, 7 Apr 2026 19:24:49 +0000 (12:24 -0700)
RCU Tasks Trace grace period implies RCU grace period, and this
guarantee is expected to remain in the future. Only BPF is the user of
this predicate, hence retire the API and clean up all in-tree users.

RCU Tasks Trace is now implemented on SRCU-fast and its grace period
mechanism always has at least one call to synchronize_rcu() as it is
required for SRCU-fast's correctness (it replaces the smp_mb() that
SRCU-fast readers skip). So, RCU-tt GP will always imply RCU GP.

Reviewed-by: Puranjay Mohan <puranjay@kernel.org>
Reviewed-by: Paul E. McKenney <paulmck@kernel.org>
Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
Link: https://lore.kernel.org/r/20260407162234.785270-1-memxor@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/rcupdate.h
kernel/bpf/core.c
kernel/bpf/helpers.c
kernel/bpf/memalloc.c
kernel/bpf/syscall.c

index 04f3f86a41450025718120319213eada8a265242..bfa765132de858227563c92e052d3b34c44e87f5 100644 (file)
@@ -205,18 +205,6 @@ static inline void exit_tasks_rcu_start(void) { }
 static inline void exit_tasks_rcu_finish(void) { }
 #endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
 
-/**
- * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
- *
- * As an accident of implementation, an RCU Tasks Trace grace period also
- * acts as an RCU grace period.  However, this could change at any time.
- * Code relying on this accident must call this function to verify that
- * this accident is still happening.
- *
- * You have been warned!
- */
-static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
-
 /**
  * cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
  *
index ee632f41bd83a4886ea8d111040235e8e9833096..89b89f55415c397c84849e46dbe89e4cbd1fcd75 100644 (file)
@@ -2644,14 +2644,12 @@ static void __bpf_prog_array_free_sleepable_cb(struct rcu_head *rcu)
 {
        struct bpf_prog_array *progs;
 
-       /* If RCU Tasks Trace grace period implies RCU grace period, there is
-        * no need to call kfree_rcu(), just call kfree() directly.
+       /*
+        * RCU Tasks Trace grace period implies RCU grace period, there is no
+        * need to call kfree_rcu(), just call kfree() directly.
         */
        progs = container_of(rcu, struct bpf_prog_array, rcu);
-       if (rcu_trace_implies_rcu_gp())
-               kfree(progs);
-       else
-               kfree_rcu(progs, rcu);
+       kfree(progs);
 }
 
 void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
index 8352b7ee0f4d13457c804c0721cedc2346bf2df3..bb95e287b0dc81d681953651f18d644ffde84845 100644 (file)
@@ -1272,7 +1272,7 @@ static void bpf_async_cb_rcu_tasks_trace_free(struct rcu_head *rcu)
                return;
        }
 
-       /* rcu_trace_implies_rcu_gp() is true and will remain so */
+       /* RCU Tasks Trace grace period implies RCU grace period. */
        bpf_async_cb_rcu_free(rcu);
 }
 
index 682a9f34214bd31de00cbceaa14dfaa8941ad9ac..e9662db7198fe09ca0809e8bb41c04e48450ec0d 100644 (file)
@@ -284,17 +284,6 @@ static void __free_rcu(struct rcu_head *head)
        atomic_set(&c->call_rcu_ttrace_in_progress, 0);
 }
 
-static void __free_rcu_tasks_trace(struct rcu_head *head)
-{
-       /* If RCU Tasks Trace grace period implies RCU grace period,
-        * there is no need to invoke call_rcu().
-        */
-       if (rcu_trace_implies_rcu_gp())
-               __free_rcu(head);
-       else
-               call_rcu(head, __free_rcu);
-}
-
 static void enque_to_free(struct bpf_mem_cache *c, void *obj)
 {
        struct llist_node *llnode = obj;
@@ -326,12 +315,12 @@ static void do_call_rcu_ttrace(struct bpf_mem_cache *c)
                return;
        }
 
-       /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
-        * If RCU Tasks Trace grace period implies RCU grace period, free
-        * these elements directly, else use call_rcu() to wait for normal
-        * progs to finish and finally do free_one() on each element.
+       /*
+        * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
+        * RCU Tasks Trace grace period implies RCU grace period, so pass
+        * __free_rcu directly as the callback.
         */
-       call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
+       call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu);
 }
 
 static void free_bulk(struct bpf_mem_cache *c)
@@ -696,20 +685,18 @@ static void free_mem_alloc_no_barrier(struct bpf_mem_alloc *ma)
 
 static void free_mem_alloc(struct bpf_mem_alloc *ma)
 {
-       /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
+       /*
+        * waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
         * might still execute. Wait for them.
         *
         * rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
         * but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
-        * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
-        * so if call_rcu(head, __free_rcu) is skipped due to
-        * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
-        * using rcu_trace_implies_rcu_gp() as well.
+        * to wait for the pending __free_by_rcu(), and __free_rcu(). RCU Tasks
+        * Trace grace period implies RCU grace period, so all __free_rcu don't
+        * need extra call_rcu() (and thus extra rcu_barrier() here).
         */
        rcu_barrier(); /* wait for __free_by_rcu */
        rcu_barrier_tasks_trace(); /* wait for __free_rcu */
-       if (!rcu_trace_implies_rcu_gp())
-               rcu_barrier();
        free_mem_alloc_no_barrier(ma);
 }
 
index f1044ab9b03be33195c52a7dc76a5ce3de7da22a..b73b25c630734e2699f559f1e6cdbb41f4418341 100644 (file)
@@ -941,14 +941,6 @@ static void bpf_map_free_rcu_gp(struct rcu_head *rcu)
        bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
 }
 
-static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
-{
-       if (rcu_trace_implies_rcu_gp())
-               bpf_map_free_rcu_gp(rcu);
-       else
-               call_rcu(rcu, bpf_map_free_rcu_gp);
-}
-
 /* decrement map refcnt and schedule it for freeing via workqueue
  * (underlying map implementation ops->map_free() might sleep)
  */
@@ -959,8 +951,9 @@ void bpf_map_put(struct bpf_map *map)
                bpf_map_free_id(map);
 
                WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
+               /* RCU tasks trace grace period implies RCU grace period. */
                if (READ_ONCE(map->free_after_mult_rcu_gp))
-                       call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
+                       call_rcu_tasks_trace(&map->rcu, bpf_map_free_rcu_gp);
                else if (READ_ONCE(map->free_after_rcu_gp))
                        call_rcu(&map->rcu, bpf_map_free_rcu_gp);
                else
@@ -3273,14 +3266,6 @@ static bool bpf_link_is_tracepoint(struct bpf_link *link)
               (link->type == BPF_LINK_TYPE_TRACING && link->attach_type == BPF_TRACE_RAW_TP);
 }
 
-static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
-{
-       if (rcu_trace_implies_rcu_gp())
-               bpf_link_defer_dealloc_rcu_gp(rcu);
-       else
-               call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
-}
-
 /* bpf_link_free is guaranteed to be called from process context */
 static void bpf_link_free(struct bpf_link *link)
 {
@@ -3306,7 +3291,8 @@ static void bpf_link_free(struct bpf_link *link)
                 * faultable case, since it exclusively uses RCU Tasks Trace.
                 */
                if (link->sleepable || (link->prog && link->prog->sleepable))
-                       call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+                       /* RCU Tasks Trace grace period implies RCU grace period. */
+                       call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
                /* We need to do a SRCU grace period wait for non-faultable tracepoint BPF links. */
                else if (bpf_link_is_tracepoint(link))
                        call_tracepoint_unregister_atomic(&link->rcu, bpf_link_defer_dealloc_rcu_gp);