]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Fix theoretical prog_array UAF in __uprobe_perf_func()
authorJann Horn <jannh@google.com>
Tue, 10 Dec 2024 19:08:14 +0000 (20:08 +0100)
committerAndrii Nakryiko <andrii@kernel.org>
Tue, 10 Dec 2024 21:06:51 +0000 (13:06 -0800)
Currently, the pointer stored in call->prog_array is loaded in
__uprobe_perf_func(), with no RCU annotation and no immediately visible
RCU protection, so it looks as if the loaded pointer can immediately be
dangling.
Later, bpf_prog_run_array_uprobe() starts a RCU-trace read-side critical
section, but this is too late. It then uses rcu_dereference_check(), but
this use of rcu_dereference_check() does not actually dereference anything.

Fix it by aligning the semantics to bpf_prog_run_array(): Let the caller
provide rcu_read_lock_trace() protection and then load call->prog_array
with rcu_dereference_check().

This issue seems to be theoretical: I don't know of any way to reach this
code without having handle_swbp() further up the stack, which is already
holding a rcu_read_lock_trace() lock, so where we take
rcu_read_lock_trace() in __uprobe_perf_func()/bpf_prog_run_array_uprobe()
doesn't actually have any effect.

Fixes: 8c7dcb84e3b7 ("bpf: implement sleepable uprobes by chaining gps")
Suggested-by: Andrii Nakryiko <andrii@kernel.org>
Signed-off-by: Jann Horn <jannh@google.com>
Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
Link: https://lore.kernel.org/bpf/20241210-bpf-fix-uprobe-uaf-v4-1-5fc8959b2b74@google.com
include/linux/bpf.h
kernel/trace/trace_uprobe.c

index fe392d07497302740a6c1c9f4cc30f2471cfd0ed..805040813f5d5d5d7a88ede9cd29f233e9d3c08d 100644 (file)
@@ -2194,26 +2194,25 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
  * rcu-protected dynamically sized maps.
  */
 static __always_inline u32
-bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
+bpf_prog_run_array_uprobe(const struct bpf_prog_array *array,
                          const void *ctx, bpf_prog_run_fn run_prog)
 {
        const struct bpf_prog_array_item *item;
        const struct bpf_prog *prog;
-       const struct bpf_prog_array *array;
        struct bpf_run_ctx *old_run_ctx;
        struct bpf_trace_run_ctx run_ctx;
        u32 ret = 1;
 
        might_fault();
+       RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held");
+
+       if (unlikely(!array))
+               return ret;
 
-       rcu_read_lock_trace();
        migrate_disable();
 
        run_ctx.is_uprobe = true;
 
-       array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
-       if (unlikely(!array))
-               goto out;
        old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
        item = &array->items[0];
        while ((prog = READ_ONCE(item->prog))) {
@@ -2228,9 +2227,7 @@ bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
                        rcu_read_unlock();
        }
        bpf_reset_run_ctx(old_run_ctx);
-out:
        migrate_enable();
-       rcu_read_unlock_trace();
        return ret;
 }
 
index fed382b7881b82ee3c334ea77860cce77581a74d..4875e7f5de3db249af34c539c079fbedd38f4107 100644 (file)
@@ -1402,9 +1402,13 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
 
 #ifdef CONFIG_BPF_EVENTS
        if (bpf_prog_array_valid(call)) {
+               const struct bpf_prog_array *array;
                u32 ret;
 
-               ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
+               rcu_read_lock_trace();
+               array = rcu_dereference_check(call->prog_array, rcu_read_lock_trace_held());
+               ret = bpf_prog_run_array_uprobe(array, regs, bpf_prog_run);
+               rcu_read_unlock_trace();
                if (!ret)
                        return;
        }