]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: move recursion detection logic to helpers
authorPuranjay Mohan <puranjay@kernel.org>
Fri, 19 Dec 2025 18:44:17 +0000 (10:44 -0800)
committerAlexei Starovoitov <ast@kernel.org>
Sun, 21 Dec 2025 18:54:37 +0000 (10:54 -0800)
BPF programs detect recursion by doing atomic inc/dec on a per-cpu
active counter from the trampoline. Create two helpers for operations on
this active counter, this makes it easy to changes the recursion
detection logic in future.

This commit makes no functional changes.

Acked-by: Yonghong Song <yonghong.song@linux.dev>
Signed-off-by: Puranjay Mohan <puranjay@kernel.org>
Link: https://lore.kernel.org/r/20251219184422.2899902-2-puranjay@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
include/linux/bpf.h
kernel/bpf/trampoline.c
kernel/trace/bpf_trace.c

index bb3847caeae111a176d06cf52d9da14e7be3ce59..2da986136d262bb42347dc638ed6501bdb3ada7b 100644 (file)
@@ -2004,6 +2004,16 @@ struct bpf_struct_ops_common_value {
        enum bpf_struct_ops_state state;
 };
 
+static inline bool bpf_prog_get_recursion_context(struct bpf_prog *prog)
+{
+       return this_cpu_inc_return(*(prog->active)) == 1;
+}
+
+static inline void bpf_prog_put_recursion_context(struct bpf_prog *prog)
+{
+       this_cpu_dec(*(prog->active));
+}
+
 #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
 /* This macro helps developer to register a struct_ops type and generate
  * type information correctly. Developers should use this macro to register
index 976d89011b15702fc81ce6b8544cd3b1ff3d2e3c..2a125d063e62b534d7413f64448f22512b5608c1 100644 (file)
@@ -949,7 +949,7 @@ static u64 notrace __bpf_prog_enter_recur(struct bpf_prog *prog, struct bpf_tram
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
-       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+       if (unlikely(!bpf_prog_get_recursion_context(prog))) {
                bpf_prog_inc_misses_counter(prog);
                if (prog->aux->recursion_detected)
                        prog->aux->recursion_detected(prog);
@@ -993,7 +993,7 @@ static void notrace __bpf_prog_exit_recur(struct bpf_prog *prog, u64 start,
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       this_cpu_dec(*(prog->active));
+       bpf_prog_put_recursion_context(prog);
        rcu_read_unlock_migrate();
 }
 
@@ -1029,7 +1029,7 @@ u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
-       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+       if (unlikely(!bpf_prog_get_recursion_context(prog))) {
                bpf_prog_inc_misses_counter(prog);
                if (prog->aux->recursion_detected)
                        prog->aux->recursion_detected(prog);
@@ -1044,7 +1044,7 @@ void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       this_cpu_dec(*(prog->active));
+       bpf_prog_put_recursion_context(prog);
        migrate_enable();
        rcu_read_unlock_trace();
 }
index fe28d86f7c357667cf139e60623e9c3d393ad0b3..6e076485bf709511889a26f55709e25f236fd35c 100644 (file)
@@ -2063,7 +2063,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
        struct bpf_trace_run_ctx run_ctx;
 
        cant_sleep();
-       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+       if (unlikely(!bpf_prog_get_recursion_context(prog))) {
                bpf_prog_inc_misses_counter(prog);
                goto out;
        }
@@ -2077,7 +2077,7 @@ void __bpf_trace_run(struct bpf_raw_tp_link *link, u64 *args)
 
        bpf_reset_run_ctx(old_run_ctx);
 out:
-       this_cpu_dec(*(prog->active));
+       bpf_prog_put_recursion_context(prog);
 }
 
 #define UNPACK(...)                    __VA_ARGS__