]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Use this_cpu_{inc_return|dec} for prog->active
authorHou Tao <houtao1@huawei.com>
Thu, 1 Sep 2022 06:19:36 +0000 (14:19 +0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 24 Oct 2022 07:57:07 +0000 (09:57 +0200)
[ Upstream commit c89e843a11f1075d27684f6b42256213e4592383 ]

Both __this_cpu_inc_return() and __this_cpu_dec() are not preemption
safe and now migrate_disable() doesn't disable preemption, so the update
of prog-active is not atomic and in theory under fully preemptible kernel
recurisve prevention may do not work.

Fixing by using the preemption-safe and IRQ-safe variants.

Fixes: ca06f55b9002 ("bpf: Add per-program recursion prevention mechanism")
Signed-off-by: Hou Tao <houtao1@huawei.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/r/20220901061938.3789460-3-houtao@huaweicloud.com
Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
kernel/bpf/trampoline.c

index 93c7675f0c9e7cef4fe93063b8125d1a4ace73ea..fe4f4d9d043b444eff18509394b017f8dece3aa0 100644 (file)
@@ -585,7 +585,7 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *ru
 
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
-       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
                inc_misses_counter(prog);
                return 0;
        }
@@ -620,7 +620,7 @@ void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       __this_cpu_dec(*(prog->active));
+       this_cpu_dec(*(prog->active));
        migrate_enable();
        rcu_read_unlock();
 }
@@ -631,7 +631,7 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
        migrate_disable();
        might_fault();
 
-       if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
                inc_misses_counter(prog);
                return 0;
        }
@@ -647,7 +647,7 @@ void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
        bpf_reset_run_ctx(run_ctx->saved_run_ctx);
 
        update_prog_stats(prog, start);
-       __this_cpu_dec(*(prog->active));
+       this_cpu_dec(*(prog->active));
        migrate_enable();
        rcu_read_unlock_trace();
 }