]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf 7.0-rc5
authorAlexei Starovoitov <ast@kernel.org>
Mon, 23 Mar 2026 02:31:37 +0000 (19:31 -0700)
committerAlexei Starovoitov <ast@kernel.org>
Mon, 23 Mar 2026 02:33:29 +0000 (19:33 -0700)
Cross-merge BPF and other fixes after downstream PR.

Minor conflicts in:
  tools/testing/selftests/bpf/progs/exceptions_fail.c
  tools/testing/selftests/bpf/progs/verifier_bounds.c

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1  2 
kernel/bpf/btf.c
kernel/bpf/core.c
kernel/bpf/verifier.c
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/progs/exceptions_fail.c
tools/testing/selftests/bpf/progs/verifier_bounds.c

Simple merge
Simple merge
index 80a9eab79cac8b80051820243c40809d4c80cb4a,f108c01ff6d02ca83ad38fa3031ffd83099e2494..cd008b146ee526d0b81639952e53cae5de7a554d
@@@ -17423,8 -17415,14 +17430,14 @@@ static void sync_linked_regs(struct bpf
                        continue;
                if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST))
                        continue;
+               /*
+                * Skip mixed 32/64-bit links: the delta relationship doesn't
+                * hold across different ALU widths.
+                */
+               if (((reg->id ^ known_reg->id) & BPF_ADD_CONST) == BPF_ADD_CONST)
+                       continue;
                if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) ||
 -                  reg->off == known_reg->off) {
 +                  reg->delta == known_reg->delta) {
                        s32 saved_subreg_def = reg->subreg_def;
  
                        copy_register_state(reg, known_reg);
Simple merge
index 275ad6fe4a049a43583506ce2f1d22a8aa3aec07,9ea1353488d736efdd986fbde6877303b0eca1d0..051e2b6f269476252fb0cfe0bce1063f6579cc16
@@@ -359,20 -353,47 +366,62 @@@ int reject_exception_throw_cb_diff(stru
        return 0;
  }
  
 +__weak
 +void foo(void)
 +{
 +      bpf_throw(1);
 +}
 +
 +SEC("?fentry/bpf_check")
 +__failure __msg("At program exit the register R1 has smin=1 smax=1 should")
 +int reject_out_of_range_global_throw(struct __sk_buff *skb)
 +{
 +      foo();
 +
 +      return 0;
 +}
 +
+ __noinline static int always_throws(void)
+ {
+       bpf_throw(0);
+       return 0;
+ }
+ __noinline static int rcu_lock_then_throw(void)
+ {
+       bpf_rcu_read_lock();
+       bpf_throw(0);
+       return 0;
+ }
+ SEC("?tc")
+ __failure __msg("bpf_throw cannot be used inside bpf_rcu_read_lock-ed region")
+ int reject_subprog_rcu_lock_throw(void *ctx)
+ {
+       rcu_lock_then_throw();
+       return 0;
+ }
+ SEC("?tc")
+ __failure __msg("bpf_throw cannot be used inside bpf_preempt_disable-ed region")
+ int reject_subprog_throw_preempt_lock(void *ctx)
+ {
+       bpf_preempt_disable();
+       always_throws();
+       bpf_preempt_enable();
+       return 0;
+ }
+ SEC("?tc")
+ __failure __msg("bpf_throw cannot be used inside bpf_local_irq_save-ed region")
+ int reject_subprog_throw_irq_lock(void *ctx)
+ {
+       unsigned long flags;
+       bpf_local_irq_save(&flags);
+       always_throws();
+       bpf_local_irq_restore(&flags);
+       return 0;
+ }
  
  char _license[] SEC("license") = "GPL";
index 3724d5e5bcb3623aa54b2eaddb70adc9c7987813,79a328276805de721a112f54eebdc0110fdc3e30..bb20f0f06f05f4423a60fa42ed2a476258137c74
@@@ -2037,37 -2037,98 +2037,132 @@@ __naked void signed_unsigned_intersecti
        : __clobber_all);
  }
  
- /* After instruction 3, the u64 and s64 ranges look as follows:
++/*
++ * After instruction 3, the u64 and s64 ranges look as follows:
 + * 0  umin=2                             umax=0xff..ff00..03   U64_MAX
 + * |  [xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx]      |
 + * |----------------------------|------------------------------|
 + * |xx]                           [xxxxxxxxxxxxxxxxxxxxxxxxxxxx|
 + * 0  smax=2                      smin=0x800..02               -1
 + *
 + * The two ranges can't be refined because they overlap in two places. Once we
 + * add an upper-bound to u64 at instruction 4, the refinement can happen. This
 + * test validates that this refinement does happen and is not overwritten by
 + * the less-precise 32bits ranges.
 + */
 +SEC("socket")
 +__description("bounds refinement: 64bits ranges not overwritten by 32bits ranges")
 +__msg("3: (65) if r0 s> 0x2 {{.*}} R0=scalar(smin=0x8000000000000002,smax=2,umin=smin32=umin32=2,umax=0xffffffff00000003,smax32=umax32=3")
 +__msg("4: (25) if r0 > 0x13 {{.*}} R0=2")
 +__success __log_level(2)
 +__naked void refinement_32bounds_not_overwriting_64bounds(void *ctx)
 +{
 +      asm volatile("                  \
 +      call %[bpf_get_prandom_u32];    \
 +      if w0 < 2 goto +5;              \
 +      if w0 > 3 goto +4;              \
 +      if r0 s> 2 goto +3;             \
 +      if r0 > 19 goto +2;             \
 +      if r0 == 2 goto +1;             \
 +      r10 = 0;                        \
 +      exit;                           \
 +"     :
 +      : __imm(bpf_get_prandom_u32)
 +      : __clobber_all);
 +}
 +
+ SEC("socket")
+ __description("maybe_fork_scalars: OR with constant rejects OOB")
+ __failure __msg("invalid access to map value")
+ __naked void or_scalar_fork_rejects_oob(void)
+ {
+       asm volatile ("                                 \
+       r1 = 0;                                         \
+       *(u64*)(r10 - 8) = r1;                          \
+       r2 = r10;                                       \
+       r2 += -8;                                       \
+       r1 = %[map_hash_8b] ll;                         \
+       call %[bpf_map_lookup_elem];                    \
+       if r0 == 0 goto l0_%=;                          \
+       r9 = r0;                                        \
+       r6 = *(u64*)(r9 + 0);                           \
+       r6 s>>= 63;                                     \
+       r6 |= 8;                                        \
+       /* r6 is -1 (current) or 8 (pushed) */          \
+       if r6 s< 0 goto l0_%=;                          \
+       /* pushed path: r6 = 8, OOB for value_size=8 */ \
+       r9 += r6;                                       \
+       r0 = *(u8*)(r9 + 0);                            \
+ l0_%=:        r0 = 0;                                         \
+       exit;                                           \
+ "     :
+       : __imm(bpf_map_lookup_elem),
+         __imm_addr(map_hash_8b)
+       : __clobber_all);
+ }
+ SEC("socket")
+ __description("maybe_fork_scalars: AND with constant still works")
+ __success __retval(0)
+ __naked void and_scalar_fork_still_works(void)
+ {
+       asm volatile ("                                 \
+       r1 = 0;                                         \
+       *(u64*)(r10 - 8) = r1;                          \
+       r2 = r10;                                       \
+       r2 += -8;                                       \
+       r1 = %[map_hash_8b] ll;                         \
+       call %[bpf_map_lookup_elem];                    \
+       if r0 == 0 goto l0_%=;                          \
+       r9 = r0;                                        \
+       r6 = *(u64*)(r9 + 0);                           \
+       r6 s>>= 63;                                     \
+       r6 &= 4;                                        \
+       /*                                              \
+        * r6 is 0 (pushed, 0&4==0) or 4 (current)      \
+        * both within value_size=8                     \
+        */                                             \
+       if r6 s< 0 goto l0_%=;                          \
+       r9 += r6;                                       \
+       r0 = *(u8*)(r9 + 0);                            \
+ l0_%=:        r0 = 0;                                         \
+       exit;                                           \
+ "     :
+       : __imm(bpf_map_lookup_elem),
+         __imm_addr(map_hash_8b)
+       : __clobber_all);
+ }
+ SEC("socket")
+ __description("maybe_fork_scalars: OR with constant allows in-bounds")
+ __success __retval(0)
+ __naked void or_scalar_fork_allows_inbounds(void)
+ {
+       asm volatile ("                                 \
+       r1 = 0;                                         \
+       *(u64*)(r10 - 8) = r1;                          \
+       r2 = r10;                                       \
+       r2 += -8;                                       \
+       r1 = %[map_hash_8b] ll;                         \
+       call %[bpf_map_lookup_elem];                    \
+       if r0 == 0 goto l0_%=;                          \
+       r9 = r0;                                        \
+       r6 = *(u64*)(r9 + 0);                           \
+       r6 s>>= 63;                                     \
+       r6 |= 4;                                        \
+       /*                                              \
+        * r6 is -1 (current) or 4 (pushed)             \
+        * pushed path: r6 = 4, within value_size=8     \
+        */                                             \
+       if r6 s< 0 goto l0_%=;                          \
+       r9 += r6;                                       \
+       r0 = *(u8*)(r9 + 0);                            \
+ l0_%=:        r0 = 0;                                         \
+       exit;                                           \
+ "     :
+       : __imm(bpf_map_lookup_elem),
+         __imm_addr(map_hash_8b)
+       : __clobber_all);
+ }
  char _license[] SEC("license") = "GPL";