static inline void exit_tasks_rcu_finish(void) { }
#endif /* #else #ifdef CONFIG_TASKS_RCU_GENERIC */
-/**
- * rcu_trace_implies_rcu_gp - does an RCU Tasks Trace grace period imply an RCU grace period?
- *
- * As an accident of implementation, an RCU Tasks Trace grace period also
- * acts as an RCU grace period. However, this could change at any time.
- * Code relying on this accident must call this function to verify that
- * this accident is still happening.
- *
- * You have been warned!
- */
-static inline bool rcu_trace_implies_rcu_gp(void) { return true; }
-
/**
* cond_resched_tasks_rcu_qs - Report potential quiescent states to RCU
*
{
struct bpf_prog_array *progs;
- /* If RCU Tasks Trace grace period implies RCU grace period, there is
- * no need to call kfree_rcu(), just call kfree() directly.
+ /*
+ * RCU Tasks Trace grace period implies RCU grace period, there is no
+ * need to call kfree_rcu(), just call kfree() directly.
*/
progs = container_of(rcu, struct bpf_prog_array, rcu);
- if (rcu_trace_implies_rcu_gp())
- kfree(progs);
- else
- kfree_rcu(progs, rcu);
+ kfree(progs);
}
void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs)
return;
}
- /* rcu_trace_implies_rcu_gp() is true and will remain so */
+ /* RCU Tasks Trace grace period implies RCU grace period. */
bpf_async_cb_rcu_free(rcu);
}
atomic_set(&c->call_rcu_ttrace_in_progress, 0);
}
-static void __free_rcu_tasks_trace(struct rcu_head *head)
-{
- /* If RCU Tasks Trace grace period implies RCU grace period,
- * there is no need to invoke call_rcu().
- */
- if (rcu_trace_implies_rcu_gp())
- __free_rcu(head);
- else
- call_rcu(head, __free_rcu);
-}
-
static void enque_to_free(struct bpf_mem_cache *c, void *obj)
{
struct llist_node *llnode = obj;
return;
}
- /* Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
- * If RCU Tasks Trace grace period implies RCU grace period, free
- * these elements directly, else use call_rcu() to wait for normal
- * progs to finish and finally do free_one() on each element.
+ /*
+ * Use call_rcu_tasks_trace() to wait for sleepable progs to finish.
+ * RCU Tasks Trace grace period implies RCU grace period, so pass
+ * __free_rcu directly as the callback.
*/
- call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu_tasks_trace);
+ call_rcu_tasks_trace(&c->rcu_ttrace, __free_rcu);
}
static void free_bulk(struct bpf_mem_cache *c)
static void free_mem_alloc(struct bpf_mem_alloc *ma)
{
- /* waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
+ /*
+ * waiting_for_gp[_ttrace] lists were drained, but RCU callbacks
* might still execute. Wait for them.
*
* rcu_barrier_tasks_trace() doesn't imply synchronize_rcu_tasks_trace(),
* but rcu_barrier_tasks_trace() and rcu_barrier() below are only used
- * to wait for the pending __free_rcu_tasks_trace() and __free_rcu(),
- * so if call_rcu(head, __free_rcu) is skipped due to
- * rcu_trace_implies_rcu_gp(), it will be OK to skip rcu_barrier() by
- * using rcu_trace_implies_rcu_gp() as well.
+ * to wait for the pending __free_by_rcu(), and __free_rcu(). RCU Tasks
+ * Trace grace period implies RCU grace period, so all __free_rcu don't
+ * need extra call_rcu() (and thus extra rcu_barrier() here).
*/
rcu_barrier(); /* wait for __free_by_rcu */
rcu_barrier_tasks_trace(); /* wait for __free_rcu */
- if (!rcu_trace_implies_rcu_gp())
- rcu_barrier();
free_mem_alloc_no_barrier(ma);
}
bpf_map_free_in_work(container_of(rcu, struct bpf_map, rcu));
}
-static void bpf_map_free_mult_rcu_gp(struct rcu_head *rcu)
-{
- if (rcu_trace_implies_rcu_gp())
- bpf_map_free_rcu_gp(rcu);
- else
- call_rcu(rcu, bpf_map_free_rcu_gp);
-}
-
/* decrement map refcnt and schedule it for freeing via workqueue
* (underlying map implementation ops->map_free() might sleep)
*/
bpf_map_free_id(map);
WARN_ON_ONCE(atomic64_read(&map->sleepable_refcnt));
+ /* RCU tasks trace grace period implies RCU grace period. */
if (READ_ONCE(map->free_after_mult_rcu_gp))
- call_rcu_tasks_trace(&map->rcu, bpf_map_free_mult_rcu_gp);
+ call_rcu_tasks_trace(&map->rcu, bpf_map_free_rcu_gp);
else if (READ_ONCE(map->free_after_rcu_gp))
call_rcu(&map->rcu, bpf_map_free_rcu_gp);
else
(link->type == BPF_LINK_TYPE_TRACING && link->attach_type == BPF_TRACE_RAW_TP);
}
-static void bpf_link_defer_dealloc_mult_rcu_gp(struct rcu_head *rcu)
-{
- if (rcu_trace_implies_rcu_gp())
- bpf_link_defer_dealloc_rcu_gp(rcu);
- else
- call_rcu(rcu, bpf_link_defer_dealloc_rcu_gp);
-}
-
/* bpf_link_free is guaranteed to be called from process context */
static void bpf_link_free(struct bpf_link *link)
{
* faultable case, since it exclusively uses RCU Tasks Trace.
*/
if (link->sleepable || (link->prog && link->prog->sleepable))
- call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_mult_rcu_gp);
+ /* RCU Tasks Trace grace period implies RCU grace period. */
+ call_rcu_tasks_trace(&link->rcu, bpf_link_defer_dealloc_rcu_gp);
/* We need to do a SRCU grace period wait for non-faultable tracepoint BPF links. */
else if (bpf_link_is_tracepoint(link))
call_tracepoint_unregister_atomic(&link->rcu, bpf_link_defer_dealloc_rcu_gp);