]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 23 Jun 2025 12:02:02 +0000 (14:02 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 23 Jun 2025 12:02:02 +0000 (14:02 +0200)
added patches:
revert-bpf-aggressively-forget-precise-markings-during-state-checkpointing.patch
revert-bpf-allow-precision-tracking-for-programs-with-subprogs.patch
revert-bpf-stop-setting-precise-in-current-state.patch
revert-selftests-bpf-make-test_align-selftest-more-robust.patch

queue-5.10/revert-bpf-aggressively-forget-precise-markings-during-state-checkpointing.patch [new file with mode: 0644]
queue-5.10/revert-bpf-allow-precision-tracking-for-programs-with-subprogs.patch [new file with mode: 0644]
queue-5.10/revert-bpf-stop-setting-precise-in-current-state.patch [new file with mode: 0644]
queue-5.10/revert-selftests-bpf-make-test_align-selftest-more-robust.patch [new file with mode: 0644]
queue-5.10/series

diff --git a/queue-5.10/revert-bpf-aggressively-forget-precise-markings-during-state-checkpointing.patch b/queue-5.10/revert-bpf-aggressively-forget-precise-markings-during-state-checkpointing.patch
new file mode 100644 (file)
index 0000000..a7b96fe
--- /dev/null
@@ -0,0 +1,84 @@
+From stable+bounces-155353-greg=kroah.com@vger.kernel.org Mon Jun 23 13:54:53 2025
+From: Aaron Lu <ziqianlu@bytedance.com>
+Date: Mon, 23 Jun 2025 19:54:01 +0800
+Subject: Revert "bpf: aggressively forget precise markings during state checkpointing"
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org, Andrii Nakryiko <andrii@kernel.org>, Alexei Starovoitov <ast@kernel.org>, Pu Lehui <pulehui@huawei.com>, Luiz Capitulino <luizcap@amazon.com>, Wei Wei <weiwei.danny@bytedance.com>, Yuchen Zhang <zhangyuchen.lcr@bytedance.com>
+Message-ID: <20250623115403.299-3-ziqianlu@bytedance.com>
+
+From: Aaron Lu <ziqianlu@bytedance.com>
+
+This reverts commit 1952a4d5e4cf610336b9c9ab52b1fc4e42721cf3 which is
+commit 7a830b53c17bbadcf99f778f28aaaa4e6c41df5f upstream.
+
+The backport of bpf precision tracking related changes has caused bpf
+verifier to panic while loading some certain bpf prog so revert them.
+
+Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/
+Reported-by: Wei Wei <weiwei.danny@bytedance.com>
+Signed-off-by: Aaron Lu <ziqianlu@bytedance.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |   37 -------------------------------------
+ 1 file changed, 37 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2053,31 +2053,6 @@ static void mark_all_scalars_precise(str
+       }
+ }
+-static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st)
+-{
+-      struct bpf_func_state *func;
+-      struct bpf_reg_state *reg;
+-      int i, j;
+-
+-      for (i = 0; i <= st->curframe; i++) {
+-              func = st->frame[i];
+-              for (j = 0; j < BPF_REG_FP; j++) {
+-                      reg = &func->regs[j];
+-                      if (reg->type != SCALAR_VALUE)
+-                              continue;
+-                      reg->precise = false;
+-              }
+-              for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) {
+-                      if (!is_spilled_reg(&func->stack[j]))
+-                              continue;
+-                      reg = &func->stack[j].spilled_ptr;
+-                      if (reg->type != SCALAR_VALUE)
+-                              continue;
+-                      reg->precise = false;
+-              }
+-      }
+-}
+-
+ /*
+  * __mark_chain_precision() backtracks BPF program instruction sequence and
+  * chain of verifier states making sure that register *regno* (if regno >= 0)
+@@ -2156,14 +2131,6 @@ static void mark_all_scalars_imprecise(s
+  * be imprecise. If any child state does require this register to be precise,
+  * we'll mark it precise later retroactively during precise markings
+  * propagation from child state to parent states.
+- *
+- * Skipping precise marking setting in current state is a mild version of
+- * relying on the above observation. But we can utilize this property even
+- * more aggressively by proactively forgetting any precise marking in the
+- * current state (which we inherited from the parent state), right before we
+- * checkpoint it and branch off into new child state. This is done by
+- * mark_all_scalars_imprecise() to hopefully get more permissive and generic
+- * finalized states which help in short circuiting more future states.
+  */
+ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
+                                 int spi)
+@@ -9928,10 +9895,6 @@ next:
+       env->prev_jmps_processed = env->jmps_processed;
+       env->prev_insn_processed = env->insn_processed;
+-      /* forget precise markings we inherited, see __mark_chain_precision */
+-      if (env->bpf_capable)
+-              mark_all_scalars_imprecise(env, cur);
+-
+       /* add new state to the head of linked list */
+       new = &new_sl->state;
+       err = copy_verifier_state(new, cur);
diff --git a/queue-5.10/revert-bpf-allow-precision-tracking-for-programs-with-subprogs.patch b/queue-5.10/revert-bpf-allow-precision-tracking-for-programs-with-subprogs.patch
new file mode 100644 (file)
index 0000000..97e656e
--- /dev/null
@@ -0,0 +1,88 @@
+From stable+bounces-155355-greg=kroah.com@vger.kernel.org Mon Jun 23 13:54:41 2025
+From: Aaron Lu <ziqianlu@bytedance.com>
+Date: Mon, 23 Jun 2025 19:54:03 +0800
+Subject: Revert "bpf: allow precision tracking for programs with subprogs"
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org, Andrii Nakryiko <andrii@kernel.org>, Alexei Starovoitov <ast@kernel.org>, Pu Lehui <pulehui@huawei.com>, Luiz Capitulino <luizcap@amazon.com>, Wei Wei <weiwei.danny@bytedance.com>, Yuchen Zhang <zhangyuchen.lcr@bytedance.com>
+Message-ID: <20250623115403.299-5-ziqianlu@bytedance.com>
+
+From: Aaron Lu <ziqianlu@bytedance.com>
+
+This reverts commit 2474ec58b96d8a028b046beabdf49f5475eefcf8 which is
+commit be2ef8161572ec1973124ebc50f56dafc2925e07 upstream.
+
+The backport of bpf precision tracking related changes has caused bpf
+verifier to panic while loading some certain bpf prog so revert them.
+
+Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/
+Reported-by: Wei Wei <weiwei.danny@bytedance.com>
+Signed-off-by: Aaron Lu <ziqianlu@bytedance.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |   35 +----------------------------------
+ 1 file changed, 1 insertion(+), 34 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1359,7 +1359,7 @@ static void __mark_reg_unknown(const str
+       reg->type = SCALAR_VALUE;
+       reg->var_off = tnum_unknown;
+       reg->frameno = 0;
+-      reg->precise = !env->bpf_capable;
++      reg->precise = env->subprog_cnt > 1 || !env->bpf_capable;
+       __mark_reg_unbounded(reg);
+ }
+@@ -2102,42 +2102,12 @@ static int __mark_chain_precision(struct
+               return 0;
+       if (!reg_mask && !stack_mask)
+               return 0;
+-
+       for (;;) {
+               DECLARE_BITMAP(mask, 64);
+               u32 history = st->jmp_history_cnt;
+               if (env->log.level & BPF_LOG_LEVEL)
+                       verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx);
+-
+-              if (last_idx < 0) {
+-                      /* we are at the entry into subprog, which
+-                       * is expected for global funcs, but only if
+-                       * requested precise registers are R1-R5
+-                       * (which are global func's input arguments)
+-                       */
+-                      if (st->curframe == 0 &&
+-                          st->frame[0]->subprogno > 0 &&
+-                          st->frame[0]->callsite == BPF_MAIN_FUNC &&
+-                          stack_mask == 0 && (reg_mask & ~0x3e) == 0) {
+-                              bitmap_from_u64(mask, reg_mask);
+-                              for_each_set_bit(i, mask, 32) {
+-                                      reg = &st->frame[0]->regs[i];
+-                                      if (reg->type != SCALAR_VALUE) {
+-                                              reg_mask &= ~(1u << i);
+-                                              continue;
+-                                      }
+-                                      reg->precise = true;
+-                              }
+-                              return 0;
+-                      }
+-
+-                      verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n",
+-                              st->frame[0]->subprogno, reg_mask, stack_mask);
+-                      WARN_ONCE(1, "verifier backtracking bug");
+-                      return -EFAULT;
+-              }
+-
+               for (i = last_idx;;) {
+                       if (skip_first) {
+                               err = 0;
+@@ -11896,9 +11866,6 @@ static int do_check_common(struct bpf_ve
+                       0 /* frameno */,
+                       subprog);
+-      state->first_insn_idx = env->subprog_info[subprog].start;
+-      state->last_insn_idx = -1;
+-
+       regs = state->frame[state->curframe]->regs;
+       if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) {
+               ret = btf_prepare_func_args(env, subprog, regs);
diff --git a/queue-5.10/revert-bpf-stop-setting-precise-in-current-state.patch b/queue-5.10/revert-bpf-stop-setting-precise-in-current-state.patch
new file mode 100644 (file)
index 0000000..ee18aa5
--- /dev/null
@@ -0,0 +1,174 @@
+From stable+bounces-155354-greg=kroah.com@vger.kernel.org Mon Jun 23 13:55:13 2025
+From: Aaron Lu <ziqianlu@bytedance.com>
+Date: Mon, 23 Jun 2025 19:54:02 +0800
+Subject: Revert "bpf: stop setting precise in current state"
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org, Andrii Nakryiko <andrii@kernel.org>, Alexei Starovoitov <ast@kernel.org>, Pu Lehui <pulehui@huawei.com>, Luiz Capitulino <luizcap@amazon.com>, Wei Wei <weiwei.danny@bytedance.com>, Yuchen Zhang <zhangyuchen.lcr@bytedance.com>
+Message-ID: <20250623115403.299-4-ziqianlu@bytedance.com>
+
+From: Aaron Lu <ziqianlu@bytedance.com>
+
+This reverts commit 7ca3e7459f4a5795e78b14390635879f534d9741 which is
+commit f63181b6ae79fd3b034cde641db774268c2c3acf upstream.
+
+The backport of bpf precision tracking related changes has caused bpf
+verifier to panic while loading some certain bpf prog so revert them.
+
+Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/
+Reported-by: Wei Wei <weiwei.danny@bytedance.com>
+Signed-off-by: Aaron Lu <ziqianlu@bytedance.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/bpf/verifier.c |  103 +++++---------------------------------------------
+ 1 file changed, 12 insertions(+), 91 deletions(-)
+
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -2028,11 +2028,8 @@ static void mark_all_scalars_precise(str
+       /* big hammer: mark all scalars precise in this path.
+        * pop_stack may still get !precise scalars.
+-       * We also skip current state and go straight to first parent state,
+-       * because precision markings in current non-checkpointed state are
+-       * not needed. See why in the comment in __mark_chain_precision below.
+        */
+-      for (st = st->parent; st; st = st->parent) {
++      for (; st; st = st->parent)
+               for (i = 0; i <= st->curframe; i++) {
+                       func = st->frame[i];
+                       for (j = 0; j < BPF_REG_FP; j++) {
+@@ -2050,88 +2047,8 @@ static void mark_all_scalars_precise(str
+                               reg->precise = true;
+                       }
+               }
+-      }
+ }
+-/*
+- * __mark_chain_precision() backtracks BPF program instruction sequence and
+- * chain of verifier states making sure that register *regno* (if regno >= 0)
+- * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked
+- * SCALARS, as well as any other registers and slots that contribute to
+- * a tracked state of given registers/stack slots, depending on specific BPF
+- * assembly instructions (see backtrack_insns() for exact instruction handling
+- * logic). This backtracking relies on recorded jmp_history and is able to
+- * traverse entire chain of parent states. This process ends only when all the
+- * necessary registers/slots and their transitive dependencies are marked as
+- * precise.
+- *
+- * One important and subtle aspect is that precise marks *do not matter* in
+- * the currently verified state (current state). It is important to understand
+- * why this is the case.
+- *
+- * First, note that current state is the state that is not yet "checkpointed",
+- * i.e., it is not yet put into env->explored_states, and it has no children
+- * states as well. It's ephemeral, and can end up either a) being discarded if
+- * compatible explored state is found at some point or BPF_EXIT instruction is
+- * reached or b) checkpointed and put into env->explored_states, branching out
+- * into one or more children states.
+- *
+- * In the former case, precise markings in current state are completely
+- * ignored by state comparison code (see regsafe() for details). Only
+- * checkpointed ("old") state precise markings are important, and if old
+- * state's register/slot is precise, regsafe() assumes current state's
+- * register/slot as precise and checks value ranges exactly and precisely. If
+- * states turn out to be compatible, current state's necessary precise
+- * markings and any required parent states' precise markings are enforced
+- * after the fact with propagate_precision() logic, after the fact. But it's
+- * important to realize that in this case, even after marking current state
+- * registers/slots as precise, we immediately discard current state. So what
+- * actually matters is any of the precise markings propagated into current
+- * state's parent states, which are always checkpointed (due to b) case above).
+- * As such, for scenario a) it doesn't matter if current state has precise
+- * markings set or not.
+- *
+- * Now, for the scenario b), checkpointing and forking into child(ren)
+- * state(s). Note that before current state gets to checkpointing step, any
+- * processed instruction always assumes precise SCALAR register/slot
+- * knowledge: if precise value or range is useful to prune jump branch, BPF
+- * verifier takes this opportunity enthusiastically. Similarly, when
+- * register's value is used to calculate offset or memory address, exact
+- * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to
+- * what we mentioned above about state comparison ignoring precise markings
+- * during state comparison, BPF verifier ignores and also assumes precise
+- * markings *at will* during instruction verification process. But as verifier
+- * assumes precision, it also propagates any precision dependencies across
+- * parent states, which are not yet finalized, so can be further restricted
+- * based on new knowledge gained from restrictions enforced by their children
+- * states. This is so that once those parent states are finalized, i.e., when
+- * they have no more active children state, state comparison logic in
+- * is_state_visited() would enforce strict and precise SCALAR ranges, if
+- * required for correctness.
+- *
+- * To build a bit more intuition, note also that once a state is checkpointed,
+- * the path we took to get to that state is not important. This is crucial
+- * property for state pruning. When state is checkpointed and finalized at
+- * some instruction index, it can be correctly and safely used to "short
+- * circuit" any *compatible* state that reaches exactly the same instruction
+- * index. I.e., if we jumped to that instruction from a completely different
+- * code path than original finalized state was derived from, it doesn't
+- * matter, current state can be discarded because from that instruction
+- * forward having a compatible state will ensure we will safely reach the
+- * exit. States describe preconditions for further exploration, but completely
+- * forget the history of how we got here.
+- *
+- * This also means that even if we needed precise SCALAR range to get to
+- * finalized state, but from that point forward *that same* SCALAR register is
+- * never used in a precise context (i.e., it's precise value is not needed for
+- * correctness), it's correct and safe to mark such register as "imprecise"
+- * (i.e., precise marking set to false). This is what we rely on when we do
+- * not set precise marking in current state. If no child state requires
+- * precision for any given SCALAR register, it's safe to dictate that it can
+- * be imprecise. If any child state does require this register to be precise,
+- * we'll mark it precise later retroactively during precise markings
+- * propagation from child state to parent states.
+- */
+ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno,
+                                 int spi)
+ {
+@@ -2149,10 +2066,6 @@ static int __mark_chain_precision(struct
+       if (!env->bpf_capable)
+               return 0;
+-      /* Do sanity checks against current state of register and/or stack
+-       * slot, but don't set precise flag in current state, as precision
+-       * tracking in the current state is unnecessary.
+-       */
+       func = st->frame[frame];
+       if (regno >= 0) {
+               reg = &func->regs[regno];
+@@ -2160,7 +2073,11 @@ static int __mark_chain_precision(struct
+                       WARN_ONCE(1, "backtracing misuse");
+                       return -EFAULT;
+               }
+-              new_marks = true;
++              if (!reg->precise)
++                      new_marks = true;
++              else
++                      reg_mask = 0;
++              reg->precise = true;
+       }
+       while (spi >= 0) {
+@@ -2173,7 +2090,11 @@ static int __mark_chain_precision(struct
+                       stack_mask = 0;
+                       break;
+               }
+-              new_marks = true;
++              if (!reg->precise)
++                      new_marks = true;
++              else
++                      stack_mask = 0;
++              reg->precise = true;
+               break;
+       }
+@@ -9358,7 +9279,7 @@ static bool regsafe(struct bpf_verifier_
+               if (env->explore_alu_limits)
+                       return false;
+               if (rcur->type == SCALAR_VALUE) {
+-                      if (!rold->precise)
++                      if (!rold->precise && !rcur->precise)
+                               return true;
+                       /* new val must satisfy old val knowledge */
+                       return range_within(rold, rcur) &&
diff --git a/queue-5.10/revert-selftests-bpf-make-test_align-selftest-more-robust.patch b/queue-5.10/revert-selftests-bpf-make-test_align-selftest-more-robust.patch
new file mode 100644 (file)
index 0000000..1e4825c
--- /dev/null
@@ -0,0 +1,116 @@
+From stable+bounces-155352-greg=kroah.com@vger.kernel.org Mon Jun 23 13:54:48 2025
+From: Aaron Lu <ziqianlu@bytedance.com>
+Date: Mon, 23 Jun 2025 19:54:00 +0800
+Subject: Revert "selftests/bpf: make test_align selftest more robust"
+To: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: stable@vger.kernel.org, Andrii Nakryiko <andrii@kernel.org>, Alexei Starovoitov <ast@kernel.org>, Pu Lehui <pulehui@huawei.com>, Luiz Capitulino <luizcap@amazon.com>, Wei Wei <weiwei.danny@bytedance.com>, Yuchen Zhang <zhangyuchen.lcr@bytedance.com>
+Message-ID: <20250623115403.299-2-ziqianlu@bytedance.com>
+
+From: Aaron Lu <ziqianlu@bytedance.com>
+
+This reverts commit 4af2d9ddb7e78f97c23f709827e5075c6d866e34 which is
+commit 4f999b767769b76378c3616c624afd6f4bb0d99f upstream.
+
+The backport of bpf precision tracking related changes has caused bpf
+verifier to panic while loading some certain bpf prog so revert them.
+
+Link: https://lkml.kernel.org/r/20250605070921.GA3795@bytedance/
+Reported-by: Wei Wei <weiwei.danny@bytedance.com>
+Signed-off-by: Aaron Lu <ziqianlu@bytedance.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/bpf/prog_tests/align.c |   36 +++++++++----------------
+ 1 file changed, 13 insertions(+), 23 deletions(-)
+
+--- a/tools/testing/selftests/bpf/prog_tests/align.c
++++ b/tools/testing/selftests/bpf/prog_tests/align.c
+@@ -2,7 +2,7 @@
+ #include <test_progs.h>
+ #define MAX_INSNS     512
+-#define MAX_MATCHES   24
++#define MAX_MATCHES   16
+ struct bpf_reg_match {
+       unsigned int line;
+@@ -267,7 +267,6 @@ static struct bpf_align_test tests[] = {
+                        */
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+-                      BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
+@@ -281,7 +280,6 @@ static struct bpf_align_test tests[] = {
+                       BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+-                      BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+                       BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
+                       BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
+                       BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
+@@ -313,52 +311,44 @@ static struct bpf_align_test tests[] = {
+                       {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       /* Variable offset is added to R5 packet pointer,
+-                       * resulting in auxiliary alignment of 4. To avoid BPF
+-                       * verifier's precision backtracking logging
+-                       * interfering we also have a no-op R4 = R5
+-                       * instruction to validate R5 state. We also check
+-                       * that R4 is what it should be in such case.
++                       * resulting in auxiliary alignment of 4.
+                        */
+-                      {19, "R4_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+-                      {19, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
++                      {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       /* Constant offset is added to R5, resulting in
+                        * reg->off of 14.
+                        */
+-                      {20, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
++                      {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off
+                        * (14) which is 16.  Then the variable offset is 4-byte
+                        * aligned, so the total offset is 4-byte aligned and
+                        * meets the load's requirements.
+                        */
+-                      {24, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+-                      {24, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
++                      {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
++                      {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       /* Constant offset is added to R5 packet pointer,
+                        * resulting in reg->off value of 14.
+                        */
+-                      {27, "R5_w=pkt(id=0,off=14,r=8"},
++                      {26, "R5_w=pkt(id=0,off=14,r=8"},
+                       /* Variable offset is added to R5, resulting in a
+-                       * variable offset of (4n). See comment for insn #19
+-                       * for R4 = R5 trick.
++                       * variable offset of (4n).
+                        */
+-                      {29, "R4_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+-                      {29, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
++                      {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       /* Constant is added to R5 again, setting reg->off to 18. */
+-                      {30, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
++                      {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
+                       /* And once more we add a variable; resulting var_off
+                        * is still (4n), fixed offset is not changed.
+                        * Also, we create a new reg->id.
+                        */
+-                      {32, "R4_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
+-                      {32, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
++                      {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"},
+                       /* At the time the word size load is performed from R5,
+                        * its total fixed offset is NET_IP_ALIGN + reg->off (18)
+                        * which is 20.  Then the variable offset is (4n), so
+                        * the total offset is 4-byte aligned and meets the
+                        * load's requirements.
+                        */
+-                      {35, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+-                      {35, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
++                      {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
++                      {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"},
+               },
+       },
+       {
index afe9c3e8a16263da95d0e3d344c14706bf8991c5..a7228593871623a23e3f1d41ce758fce483d513a 100644 (file)
@@ -347,3 +347,7 @@ mm-huge_memory-fix-dereferencing-invalid-pmd-migration-entry.patch
 hwmon-occ-fix-p10-vrm-temp-sensors.patch
 rtc-test-fix-invalid-format-specifier.patch
 s390-pci-fix-__pcilg_mio_inuser-inline-assembly.patch
+revert-selftests-bpf-make-test_align-selftest-more-robust.patch
+revert-bpf-aggressively-forget-precise-markings-during-state-checkpointing.patch
+revert-bpf-stop-setting-precise-in-current-state.patch
+revert-bpf-allow-precision-tracking-for-programs-with-subprogs.patch