]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Prevent bpf program recursion for raw tracepoint probes
authorJiri Olsa <jolsa@kernel.org>
Fri, 16 Sep 2022 07:19:14 +0000 (09:19 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 26 Nov 2022 08:27:56 +0000 (09:27 +0100)
commit 05b24ff9b2cfabfcfd951daaa915a036ab53c9e1 upstream.

We got report from sysbot [1] about warnings that were caused by
bpf program attached to contention_begin raw tracepoint triggering
the same tracepoint by using bpf_trace_printk helper that takes
trace_printk_lock lock.

 Call Trace:
  <TASK>
  ? trace_event_raw_event_bpf_trace_printk+0x5f/0x90
  bpf_trace_printk+0x2b/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  bpf_trace_printk+0x3f/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  bpf_trace_printk+0x3f/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  bpf_trace_printk+0x3f/0xe0
  bpf_prog_a9aec6167c091eef_prog+0x1f/0x24
  bpf_trace_run2+0x26/0x90
  native_queued_spin_lock_slowpath+0x1c6/0x2b0
  _raw_spin_lock_irqsave+0x44/0x50
  __unfreeze_partials+0x5b/0x160
  ...

The can be reproduced by attaching bpf program as raw tracepoint on
contention_begin tracepoint. The bpf prog calls bpf_trace_printk
helper. Then by running perf bench the spin lock code is forced to
take slow path and call contention_begin tracepoint.

Fixing this by skipping execution of the bpf program if it's
already running, Using bpf prog 'active' field, which is being
currently used by trampoline programs for the same reason.

Moving bpf_prog_inc_misses_counter to syscall.c because
trampoline.c is compiled in just for CONFIG_BPF_JIT option.

Reviewed-by: Stanislav Fomichev <sdf@google.com>
Reported-by: syzbot+2251879aa068ad9c960d@syzkaller.appspotmail.com
[1] https://lore.kernel.org/bpf/YxhFe3EwqchC%2FfYf@krava/T/#t
Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Link: https://lore.kernel.org/r/20220916071914.7156-1-jolsa@kernel.org
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
include/linux/bpf.h
kernel/bpf/syscall.c
kernel/bpf/trampoline.c
kernel/trace/bpf_trace.c

index 80fc8a88c610dadaaa32e1489bf7d2223714f77b..73662fbabd78fa8b2b2f50bef0c64c4140069ed3 100644 (file)
@@ -1967,6 +1967,7 @@ static inline bool unprivileged_ebpf_enabled(void)
        return !sysctl_unprivileged_bpf_disabled;
 }
 
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog);
 #else /* !CONFIG_BPF_SYSCALL */
 static inline struct bpf_prog *bpf_prog_get(u32 ufd)
 {
@@ -2176,6 +2177,9 @@ static inline bool unprivileged_ebpf_enabled(void)
        return false;
 }
 
+static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+}
 #endif /* CONFIG_BPF_SYSCALL */
 
 void __bpf_free_used_btfs(struct bpf_prog_aux *aux,
index 22e7a805c6723905403a32d10a490615d5e78202..0e758911d963fc400922e3e3f5811ed22bcfd62e 100644 (file)
@@ -2094,6 +2094,17 @@ struct bpf_prog_kstats {
        u64 misses;
 };
 
+void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog)
+{
+       struct bpf_prog_stats *stats;
+       unsigned int flags;
+
+       stats = this_cpu_ptr(prog->stats);
+       flags = u64_stats_update_begin_irqsave(&stats->syncp);
+       u64_stats_inc(&stats->misses);
+       u64_stats_update_end_irqrestore(&stats->syncp, flags);
+}
+
 static void bpf_prog_get_stats(const struct bpf_prog *prog,
                               struct bpf_prog_kstats *stats)
 {
index ad76940b02ccf81e970442137e64b655b2ecb04f..41b67eb83ab3f3a78b842d54a2dfa27861e0b964 100644 (file)
@@ -863,17 +863,6 @@ static __always_inline u64 notrace bpf_prog_start_time(void)
        return start;
 }
 
-static void notrace inc_misses_counter(struct bpf_prog *prog)
-{
-       struct bpf_prog_stats *stats;
-       unsigned int flags;
-
-       stats = this_cpu_ptr(prog->stats);
-       flags = u64_stats_update_begin_irqsave(&stats->syncp);
-       u64_stats_inc(&stats->misses);
-       u64_stats_update_end_irqrestore(&stats->syncp, flags);
-}
-
 /* The logic is similar to bpf_prog_run(), but with an explicit
  * rcu_read_lock() and migrate_disable() which are required
  * for the trampoline. The macro is split into
@@ -896,7 +885,7 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *ru
        run_ctx->saved_run_ctx = bpf_set_run_ctx(&run_ctx->run_ctx);
 
        if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
-               inc_misses_counter(prog);
+               bpf_prog_inc_misses_counter(prog);
                return 0;
        }
        return bpf_prog_start_time();
@@ -967,7 +956,7 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_r
        might_fault();
 
        if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
-               inc_misses_counter(prog);
+               bpf_prog_inc_misses_counter(prog);
                return 0;
        }
 
index b1daf7c9b895ac6511c7cdeb584efaf92ca48ff3..ec4b81007796c238fb7e5ff309571db2cd2215d3 100644 (file)
@@ -2058,9 +2058,15 @@ static __always_inline
 void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
 {
        cant_sleep();
+       if (unlikely(this_cpu_inc_return(*(prog->active)) != 1)) {
+               bpf_prog_inc_misses_counter(prog);
+               goto out;
+       }
        rcu_read_lock();
        (void) bpf_prog_run(prog, args);
        rcu_read_unlock();
+out:
+       this_cpu_dec(*(prog->active));
 }
 
 #define UNPACK(...)                    __VA_ARGS__