]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
selftests/bpf: tests with a loop state missing read/precision mark
authorEduard Zingerman <eddyz87@gmail.com>
Wed, 11 Jun 2025 20:08:36 +0000 (13:08 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Thu, 12 Jun 2025 23:52:43 +0000 (16:52 -0700)
The test case absent_mark_in_the_middle_state is equivalent of the
following C program:

   1: r8 = bpf_get_prandom_u32();
   2: r6 = -32;
   3: bpf_iter_num_new(&fp[-8], 0, 10);
   4: if (unlikely(bpf_get_prandom_u32()))
   5:   r6 = -31;
   6: for (;;) {
   7:   if (!bpf_iter_num_next(&fp[-8]))
   8:     break;
   9:   if (unlikely(bpf_get_prandom_u32()))
  10:     *(u64 *)(fp + r6) = 7;
  11: }
  12: bpf_iter_num_destroy(&fp[-8]);
  13: return 0;

W/o a fix that instructs verifier to ignore branches count for loop
entries verification proceeds as follows:
- 1-4, state is {r6=-32,fp-8=active};
- 6, checkpoint A is created with {r6=-32,fp-8=active};
- 7, checkpoint B is created with {r6=-32,fp-8=active},
     push state {r6=-32,fp-8=active} from 7 to 9;
- 8,12,13, {r6=-32,fp-8=drained}, exit;
- pop state with {r6=-32,fp-8=active} from 7 to 9;
- 9, push state {r6=-32,fp-8=active} from 9 to 10;
- 6, checkpoint C is created with {r6=-32,fp-8=active};
- 7, checkpoint A is hit, no precision propagated for r6 to C;
- pop state {r6=-32,fp-8=active} from 9 to 10;
- 10, state is {r6=-31,fp-8=active}, r6 is marked as read and precise,
      these marks are propagated to checkpoints A and B (but not C, as
      it is not the parent of current state;
- 6, {r6=-31,fp-8=active} checkpoint C is hit, because r6 is not
     marked precise for this checkpoint;
- the program is accepted, despite a possibility of unaligned u64
  stack access at offset -31.

The test case absent_mark_in_the_middle_state2 is similar except the
following change:

       r8 = bpf_get_prandom_u32();
       r6 = -32;
       bpf_iter_num_new(&fp[-8], 0, 10);
       if (unlikely(bpf_get_prandom_u32())) {
         r6 = -31;
 + jump_into_loop:
 +       goto +0;
 +       goto loop;
 +     }
 +     if (unlikely(bpf_get_prandom_u32()))
 +       goto jump_into_loop;
 + loop:
       for (;;) {
         if (!bpf_iter_num_next(&fp[-8]))
           break;
         if (unlikely(bpf_get_prandom_u32()))
           *(u64 *)(fp + r6) = 7;
       }
       bpf_iter_num_destroy(&fp[-8])
       return 0

The goal is to check that read/precision marks are propagated to
checkpoint created at 'goto +0' that resides outside of the loop.

The test case absent_mark_in_the_middle_state3 is a bit different and
is equivalent to the C program below:

    int absent_mark_in_the_middle_state3(void)
    {
      bpf_iter_num_new(&fp[-8], 0, 10)
      loop1(-32, &fp[-8])
      loop1_wrapper(&fp[-8])
      bpf_iter_num_destroy(&fp[-8])
    }

    int loop1(num, iter)
    {
      while (bpf_iter_num_next(iter)) {
        if (unlikely(bpf_get_prandom_u32()))
          *(fp + num) = 7;
      }
      return 0
    }

    int loop1_wrapper(iter)
    {
      r6 = -32;
      if (unlikely(bpf_get_prandom_u32()))
        r6 = -31;
      loop1(r6, iter);
      return 0;
    }

The unsafe state is reached in a similar manner, but the loop is
located inside a subprogram that is called from two locations in the
main subprogram. This detail is important for exercising
bpf_scc_visit->backedges memory management.

Signed-off-by: Eduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20250611200836.4135542-11-eddyz87@gmail.com
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
tools/testing/selftests/bpf/progs/iters.c

index 76adf4a8f2dad75e8900defc204e68b1e316a009..7dd92a303bf6b3f0fc2962f6ce6cc453350561e3 100644 (file)
@@ -1649,4 +1649,281 @@ int clean_live_states(const void *ctx)
        return 0;
 }
 
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state(void)
+{
+       /* This is equivalent to C program below.
+        *
+        * r8 = bpf_get_prandom_u32();
+        * r6 = -32;
+        * bpf_iter_num_new(&fp[-8], 0, 10);
+        * if (unlikely(bpf_get_prandom_u32()))
+        *   r6 = -31;
+        * while (bpf_iter_num_next(&fp[-8])) {
+        *   if (unlikely(bpf_get_prandom_u32()))
+        *     *(fp + r6) = 7;
+        * }
+        * bpf_iter_num_destroy(&fp[-8])
+        * return 0
+        */
+       asm volatile (
+               "call %[bpf_get_prandom_u32];"
+               "r8 = r0;"
+               "r7 = 0;"
+               "r6 = -32;"
+               "r0 = 0;"
+               "*(u64 *)(r10 - 16) = r0;"
+               "r1 = r10;"
+               "r1 += -8;"
+               "r2 = 0;"
+               "r3 = 10;"
+               "call %[bpf_iter_num_new];"
+               "call %[bpf_get_prandom_u32];"
+               "if r0 == r8 goto change_r6_%=;"
+       "loop_%=:"
+               "call noop;"
+               "r1 = r10;"
+               "r1 += -8;"
+               "call %[bpf_iter_num_next];"
+               "if r0 == 0 goto loop_end_%=;"
+               "call %[bpf_get_prandom_u32];"
+               "if r0 == r8 goto use_r6_%=;"
+               "goto loop_%=;"
+       "loop_end_%=:"
+               "r1 = r10;"
+               "r1 += -8;"
+               "call %[bpf_iter_num_destroy];"
+               "r0 = 0;"
+               "exit;"
+       "use_r6_%=:"
+               "r0 = r10;"
+               "r0 += r6;"
+               "r1 = 7;"
+               "*(u64 *)(r0 + 0) = r1;"
+               "goto loop_%=;"
+       "change_r6_%=:"
+               "r6 = -31;"
+               "goto loop_%=;"
+               :
+               : __imm(bpf_iter_num_new),
+                 __imm(bpf_iter_num_next),
+                 __imm(bpf_iter_num_destroy),
+                 __imm(bpf_get_prandom_u32)
+               : __clobber_all
+       );
+}
+
+__used __naked
+static int noop(void)
+{
+       asm volatile (
+               "r0 = 0;"
+               "exit;"
+       );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state2(void)
+{
+       /* This is equivalent to C program below.
+        *
+        *     r8 = bpf_get_prandom_u32();
+        *     r6 = -32;
+        *     bpf_iter_num_new(&fp[-8], 0, 10);
+        *     if (unlikely(bpf_get_prandom_u32())) {
+        *       r6 = -31;
+        * jump_into_loop:
+        *       goto +0;
+        *       goto loop;
+        *     }
+        *     if (unlikely(bpf_get_prandom_u32()))
+        *       goto jump_into_loop;
+        * loop:
+        *     while (bpf_iter_num_next(&fp[-8])) {
+        *       if (unlikely(bpf_get_prandom_u32()))
+        *         *(fp + r6) = 7;
+        *     }
+        *     bpf_iter_num_destroy(&fp[-8])
+        *     return 0
+        */
+       asm volatile (
+               "call %[bpf_get_prandom_u32];"
+               "r8 = r0;"
+               "r7 = 0;"
+               "r6 = -32;"
+               "r0 = 0;"
+               "*(u64 *)(r10 - 16) = r0;"
+               "r1 = r10;"
+               "r1 += -8;"
+               "r2 = 0;"
+               "r3 = 10;"
+               "call %[bpf_iter_num_new];"
+               "call %[bpf_get_prandom_u32];"
+               "if r0 == r8 goto change_r6_%=;"
+               "call %[bpf_get_prandom_u32];"
+               "if r0 == r8 goto jump_into_loop_%=;"
+       "loop_%=:"
+               "r1 = r10;"
+               "r1 += -8;"
+               "call %[bpf_iter_num_next];"
+               "if r0 == 0 goto loop_end_%=;"
+               "call %[bpf_get_prandom_u32];"
+               "if r0 == r8 goto use_r6_%=;"
+               "goto loop_%=;"
+       "loop_end_%=:"
+               "r1 = r10;"
+               "r1 += -8;"
+               "call %[bpf_iter_num_destroy];"
+               "r0 = 0;"
+               "exit;"
+       "use_r6_%=:"
+               "r0 = r10;"
+               "r0 += r6;"
+               "r1 = 7;"
+               "*(u64 *)(r0 + 0) = r1;"
+               "goto loop_%=;"
+       "change_r6_%=:"
+               "r6 = -31;"
+       "jump_into_loop_%=: "
+               "goto +0;"
+               "goto loop_%=;"
+               :
+               : __imm(bpf_iter_num_new),
+                 __imm(bpf_iter_num_next),
+                 __imm(bpf_iter_num_destroy),
+                 __imm(bpf_get_prandom_u32)
+               : __clobber_all
+       );
+}
+
+SEC("?raw_tp")
+__flag(BPF_F_TEST_STATE_FREQ)
+__failure __msg("misaligned stack access off 0+-31+0 size 8")
+__naked int absent_mark_in_the_middle_state3(void)
+{
+       /*
+        * bpf_iter_num_new(&fp[-8], 0, 10)
+        * loop1(-32, &fp[-8])
+        * loop1_wrapper(&fp[-8])
+        * bpf_iter_num_destroy(&fp[-8])
+        */
+       asm volatile (
+               "r1 = r10;"
+               "r1 += -8;"
+               "r2 = 0;"
+               "r3 = 10;"
+               "call %[bpf_iter_num_new];"
+               /* call #1 */
+               "r1 = -32;"
+               "r2 = r10;"
+               "r2 += -8;"
+               "call loop1;"
+               "r1 = r10;"
+               "r1 += -8;"
+               "call %[bpf_iter_num_destroy];"
+               /* call #2 */
+               "r1 = r10;"
+               "r1 += -8;"
+               "r2 = 0;"
+               "r3 = 10;"
+               "call %[bpf_iter_num_new];"
+               "r1 = r10;"
+               "r1 += -8;"
+               "call loop1_wrapper;"
+               /* return */
+               "r1 = r10;"
+               "r1 += -8;"
+               "call %[bpf_iter_num_destroy];"
+               "r0 = 0;"
+               "exit;"
+               :
+               : __imm(bpf_iter_num_new),
+                 __imm(bpf_iter_num_destroy),
+                 __imm(bpf_get_prandom_u32)
+               : __clobber_all
+       );
+}
+
+__used __naked
+static int loop1(void)
+{
+       /*
+        *  int loop1(num, iter) {
+        *     r6 = num;
+        *     r7 = iter;
+        *     while (bpf_iter_num_next(r7)) {
+        *       if (unlikely(bpf_get_prandom_u32()))
+        *         *(fp + r6) = 7;
+        *     }
+        *     return 0
+        *  }
+        */
+       asm volatile (
+               "r6 = r1;"
+               "r7 = r2;"
+               "call %[bpf_get_prandom_u32];"
+               "r8 = r0;"
+       "loop_%=:"
+               "r1 = r7;"
+               "call %[bpf_iter_num_next];"
+               "if r0 == 0 goto loop_end_%=;"
+               "call %[bpf_get_prandom_u32];"
+               "if r0 == r8 goto use_r6_%=;"
+               "goto loop_%=;"
+       "loop_end_%=:"
+               "r0 = 0;"
+               "exit;"
+       "use_r6_%=:"
+               "r0 = r10;"
+               "r0 += r6;"
+               "r1 = 7;"
+               "*(u64 *)(r0 + 0) = r1;"
+               "goto loop_%=;"
+               :
+               : __imm(bpf_iter_num_next),
+                 __imm(bpf_get_prandom_u32)
+               : __clobber_all
+       );
+}
+
+__used __naked
+static int loop1_wrapper(void)
+{
+       /*
+        *  int loop1_wrapper(iter) {
+        *    r6 = -32;
+        *    r7 = iter;
+        *    if (unlikely(bpf_get_prandom_u32()))
+        *      r6 = -31;
+        *    loop1(r6, r7);
+        *    return 0;
+        *  }
+        */
+       asm volatile (
+               "r6 = -32;"
+               "r7 = r1;"
+               "call %[bpf_get_prandom_u32];"
+               "r8 = r0;"
+               "call %[bpf_get_prandom_u32];"
+               "if r0 == r8 goto change_r6_%=;"
+       "loop_%=:"
+               "r1 = r6;"
+               "r2 = r7;"
+               "call loop1;"
+               "r0 = 0;"
+               "exit;"
+       "change_r6_%=:"
+               "r6 = -31;"
+               "goto loop_%=;"
+               :
+               : __imm(bpf_iter_num_next),
+                 __imm(bpf_get_prandom_u32)
+               : __clobber_all
+       );
+}
+
 char _license[] SEC("license") = "GPL";