]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
bpf: Fix out-of-bounds read in check_atomic_load/store()
authorKohei Enju <enjuk@amazon.com>
Sat, 22 Mar 2025 04:52:55 +0000 (13:52 +0900)
committerAlexei Starovoitov <ast@kernel.org>
Sat, 22 Mar 2025 13:15:57 +0000 (06:15 -0700)
syzbot reported the following splat [0].

In check_atomic_load/store(), register validity is not checked before
atomic_ptr_type_ok(). This causes the out-of-bounds read in is_ctx_reg()
called from atomic_ptr_type_ok() when the register number is MAX_BPF_REG
or greater.

Call check_load_mem()/check_store_reg() before atomic_ptr_type_ok()
to avoid the OOB read.

However, some tests introduced by commit ff3afe5da998 ("selftests/bpf: Add
selftests for load-acquire and store-release instructions") assume
calling atomic_ptr_type_ok() before checking register validity.
Therefore the swapping of order unintentionally changes verifier messages
of these tests.

For example in the test load_acquire_from_pkt_pointer(), expected message
is 'BPF_ATOMIC loads from R2 pkt is not allowed' although actual messages
are different.

  validate_msgs:FAIL:754 expect_msg
  VERIFIER LOG:
  =============
  Global function load_acquire_from_pkt_pointer() doesn't return scalar. Only those are supported.
  0: R1=ctx() R10=fp0
  ; asm volatile ( @ verifier_load_acquire.c:140
  0: (61) r2 = *(u32 *)(r1 +0)          ; R1=ctx() R2_w=pkt(r=0)
  1: (d3) r0 = load_acquire((u8 *)(r2 +0))
  invalid access to packet, off=0 size=1, R2(id=0,off=0,r=0)
  R2 offset is outside of the packet
  processed 2 insns (limit 1000000) max_states_per_insn 0 total_states 0 peak_states 0 mark_read 0
  =============
  EXPECTED   SUBSTR: 'BPF_ATOMIC loads from R2 pkt is not allowed'
  #505/19  verifier_load_acquire/load-acquire from pkt pointer:FAIL

This is because instructions in the test don't pass check_load_mem() and
therefore don't enter the atomic_ptr_type_ok() path.
In this case, we have to modify instructions so that they pass the
check_load_mem() and trigger atomic_ptr_type_ok().
Similarly for store-release tests, we need to modify instructions so that
they pass check_store_reg().

Like load_acquire_from_pkt_pointer(), modify instructions in:
  load_acquire_from_sock_pointer()
  store_release_to_ctx_pointer()
  store_release_to_pkt_pointer()

Also in store_release_to_sock_pointer(), check_store_reg() returns error
early and atomic_ptr_type_ok() is not triggered, since write to sock
pointer is not possible in general.
We might be able to remove the test, but for now let's leave it and just
change the expected message.

[0]
 BUG: KASAN: slab-out-of-bounds in is_ctx_reg kernel/bpf/verifier.c:6185 [inline]
 BUG: KASAN: slab-out-of-bounds in atomic_ptr_type_ok+0x3d7/0x550 kernel/bpf/verifier.c:6223
 Read of size 4 at addr ffff888141b0d690 by task syz-executor143/5842

 CPU: 1 UID: 0 PID: 5842 Comm: syz-executor143 Not tainted 6.14.0-rc3-syzkaller-gf28214603dc6 #0
 Call Trace:
  <TASK>
  __dump_stack lib/dump_stack.c:94 [inline]
  dump_stack_lvl+0x241/0x360 lib/dump_stack.c:120
  print_address_description mm/kasan/report.c:408 [inline]
  print_report+0x16e/0x5b0 mm/kasan/report.c:521
  kasan_report+0x143/0x180 mm/kasan/report.c:634
  is_ctx_reg kernel/bpf/verifier.c:6185 [inline]
  atomic_ptr_type_ok+0x3d7/0x550 kernel/bpf/verifier.c:6223
  check_atomic_store kernel/bpf/verifier.c:7804 [inline]
  check_atomic kernel/bpf/verifier.c:7841 [inline]
  do_check+0x89dd/0xedd0 kernel/bpf/verifier.c:19334
  do_check_common+0x1678/0x2080 kernel/bpf/verifier.c:22600
  do_check_main kernel/bpf/verifier.c:22691 [inline]
  bpf_check+0x165c8/0x1cca0 kernel/bpf/verifier.c:23821

Reported-by: syzbot+a5964227adc0f904549c@syzkaller.appspotmail.com
Closes: https://syzkaller.appspot.com/bug?extid=a5964227adc0f904549c
Tested-by: syzbot+a5964227adc0f904549c@syzkaller.appspotmail.com
Fixes: e24bbad29a8d ("bpf: Introduce load-acquire and store-release instructions")
Fixes: ff3afe5da998 ("selftests/bpf: Add selftests for load-acquire and store-release instructions")
Signed-off-by: Kohei Enju <enjuk@amazon.com>
Acked-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250322045340.18010-5-enjuk@amazon.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
kernel/bpf/verifier.c
tools/testing/selftests/bpf/progs/verifier_load_acquire.c
tools/testing/selftests/bpf/progs/verifier_store_release.c

index 41fd93db825842d68c926fcf6d8b0217ad744ba2..8ad7338e726be5538f6ce2e73cd853778d2d9155 100644 (file)
@@ -7788,6 +7788,12 @@ static int check_atomic_rmw(struct bpf_verifier_env *env,
 static int check_atomic_load(struct bpf_verifier_env *env,
                             struct bpf_insn *insn)
 {
+       int err;
+
+       err = check_load_mem(env, insn, true, false, false, "atomic_load");
+       if (err)
+               return err;
+
        if (!atomic_ptr_type_ok(env, insn->src_reg, insn)) {
                verbose(env, "BPF_ATOMIC loads from R%d %s is not allowed\n",
                        insn->src_reg,
@@ -7795,12 +7801,18 @@ static int check_atomic_load(struct bpf_verifier_env *env,
                return -EACCES;
        }
 
-       return check_load_mem(env, insn, true, false, false, "atomic_load");
+       return 0;
 }
 
 static int check_atomic_store(struct bpf_verifier_env *env,
                              struct bpf_insn *insn)
 {
+       int err;
+
+       err = check_store_reg(env, insn, true);
+       if (err)
+               return err;
+
        if (!atomic_ptr_type_ok(env, insn->dst_reg, insn)) {
                verbose(env, "BPF_ATOMIC stores into R%d %s is not allowed\n",
                        insn->dst_reg,
@@ -7808,7 +7820,7 @@ static int check_atomic_store(struct bpf_verifier_env *env,
                return -EACCES;
        }
 
-       return check_store_reg(env, insn, true);
+       return 0;
 }
 
 static int check_atomic(struct bpf_verifier_env *env, struct bpf_insn *insn)
index 608097453832709f439d13e73fdfbc0d0674be27..1babe9ad9b4301837a6ab974542c9629ab33031e 100644 (file)
@@ -139,10 +139,16 @@ __naked void load_acquire_from_pkt_pointer(void)
 {
        asm volatile (
        "r2 = *(u32 *)(r1 + %[xdp_md_data]);"
+       "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);"
+       "r1 = r2;"
+       "r1 += 8;"
+       "if r1 >= r3 goto l0_%=;"
        ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0));
+"l0_%=:  r0 = 0;"
        "exit;"
        :
        : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+         __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
          __imm_insn(load_acquire_insn,
                     BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0))
        : __clobber_all);
@@ -172,12 +178,14 @@ __naked void load_acquire_from_sock_pointer(void)
 {
        asm volatile (
        "r2 = *(u64 *)(r1 + %[sk_reuseport_md_sk]);"
-       ".8byte %[load_acquire_insn];" // w0 = load_acquire((u8 *)(r2 + 0));
+       // w0 = load_acquire((u8 *)(r2 + offsetof(struct bpf_sock, family)));
+       ".8byte %[load_acquire_insn];"
        "exit;"
        :
        : __imm_const(sk_reuseport_md_sk, offsetof(struct sk_reuseport_md, sk)),
          __imm_insn(load_acquire_insn,
-                    BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2, 0))
+                    BPF_ATOMIC_OP(BPF_B, BPF_LOAD_ACQ, BPF_REG_0, BPF_REG_2,
+                                  offsetof(struct bpf_sock, family)))
        : __clobber_all);
 }
 
index f1c64c08f25fbfe93c9052d47223702d5f7c74f7..cd6f1e5f378bb3a3509e9c79b4d56846617dd5d4 100644 (file)
@@ -140,11 +140,13 @@ __naked void store_release_to_ctx_pointer(void)
 {
        asm volatile (
        "w0 = 0;"
-       ".8byte %[store_release_insn];" // store_release((u8 *)(r1 + 0), w0);
+       // store_release((u8 *)(r1 + offsetof(struct __sk_buff, cb[0])), w0);
+       ".8byte %[store_release_insn];"
        "exit;"
        :
        : __imm_insn(store_release_insn,
-                    BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_1, BPF_REG_0, 0))
+                    BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_1, BPF_REG_0,
+                                  offsetof(struct __sk_buff, cb[0])))
        : __clobber_all);
 }
 
@@ -156,10 +158,16 @@ __naked void store_release_to_pkt_pointer(void)
        asm volatile (
        "w0 = 0;"
        "r2 = *(u32 *)(r1 + %[xdp_md_data]);"
+       "r3 = *(u32 *)(r1 + %[xdp_md_data_end]);"
+       "r1 = r2;"
+       "r1 += 8;"
+       "if r1 >= r3 goto l0_%=;"
        ".8byte %[store_release_insn];" // store_release((u8 *)(r2 + 0), w0);
+"l0_%=:  r0 = 0;"
        "exit;"
        :
        : __imm_const(xdp_md_data, offsetof(struct xdp_md, data)),
+         __imm_const(xdp_md_data_end, offsetof(struct xdp_md, data_end)),
          __imm_insn(store_release_insn,
                     BPF_ATOMIC_OP(BPF_B, BPF_STORE_REL, BPF_REG_2, BPF_REG_0, 0))
        : __clobber_all);
@@ -185,7 +193,7 @@ __naked void store_release_to_flow_keys_pointer(void)
 
 SEC("sk_reuseport")
 __description("store-release to sock pointer")
-__failure __msg("BPF_ATOMIC stores into R2 sock is not allowed")
+__failure __msg("R2 cannot write into sock")
 __naked void store_release_to_sock_pointer(void)
 {
        asm volatile (